Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | #include <linux/atomic.h> |
| 4 | #include <linux/bug.h> |
| 5 | #include <linux/delay.h> |
| 6 | #include <linux/export.h> |
| 7 | #include <linux/init.h> |
Marco Elver | 1e6ee2f | 2020-02-04 18:21:10 +0100 | [diff] [blame] | 8 | #include <linux/kernel.h> |
Marco Elver | 80d4c47 | 2020-02-07 19:59:10 +0100 | [diff] [blame] | 9 | #include <linux/moduleparam.h> |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 10 | #include <linux/percpu.h> |
| 11 | #include <linux/preempt.h> |
| 12 | #include <linux/random.h> |
| 13 | #include <linux/sched.h> |
| 14 | #include <linux/uaccess.h> |
| 15 | |
| 16 | #include "atomic.h" |
| 17 | #include "encoding.h" |
| 18 | #include "kcsan.h" |
| 19 | |
Marco Elver | 80d4c47 | 2020-02-07 19:59:10 +0100 | [diff] [blame] | 20 | static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE); |
Marco Elver | 2402d0e | 2020-02-22 00:10:27 +0100 | [diff] [blame] | 21 | unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK; |
| 22 | unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT; |
Marco Elver | 80d4c47 | 2020-02-07 19:59:10 +0100 | [diff] [blame] | 23 | static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH; |
Marco Elver | 48b1fc1 | 2020-02-21 23:02:09 +0100 | [diff] [blame] | 24 | static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER); |
Marco Elver | 80d4c47 | 2020-02-07 19:59:10 +0100 | [diff] [blame] | 25 | |
| 26 | #ifdef MODULE_PARAM_PREFIX |
| 27 | #undef MODULE_PARAM_PREFIX |
| 28 | #endif |
| 29 | #define MODULE_PARAM_PREFIX "kcsan." |
| 30 | module_param_named(early_enable, kcsan_early_enable, bool, 0); |
| 31 | module_param_named(udelay_task, kcsan_udelay_task, uint, 0644); |
| 32 | module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644); |
| 33 | module_param_named(skip_watch, kcsan_skip_watch, long, 0644); |
Marco Elver | 48b1fc1 | 2020-02-21 23:02:09 +0100 | [diff] [blame] | 34 | module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444); |
Marco Elver | 80d4c47 | 2020-02-07 19:59:10 +0100 | [diff] [blame] | 35 | |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 36 | bool kcsan_enabled; |
| 37 | |
| 38 | /* Per-CPU kcsan_ctx for interrupts */ |
| 39 | static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = { |
Ingo Molnar | 5cbaefe | 2019-11-20 10:41:43 +0100 | [diff] [blame] | 40 | .disable_count = 0, |
| 41 | .atomic_next = 0, |
| 42 | .atomic_nest_count = 0, |
| 43 | .in_flat_atomic = false, |
Marco Elver | 81af89e | 2020-02-11 17:04:22 +0100 | [diff] [blame] | 44 | .access_mask = 0, |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 45 | }; |
| 46 | |
| 47 | /* |
Qiujun Huang | e7b3410 | 2020-03-05 15:21:07 +0100 | [diff] [blame] | 48 | * Helper macros to index into adjacent slots, starting from address slot |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 49 | * itself, followed by the right and left slots. |
| 50 | * |
| 51 | * The purpose is 2-fold: |
| 52 | * |
| 53 | * 1. if during insertion the address slot is already occupied, check if |
| 54 | * any adjacent slots are free; |
| 55 | * 2. accesses that straddle a slot boundary due to size that exceeds a |
| 56 | * slot's range may check adjacent slots if any watchpoint matches. |
| 57 | * |
| 58 | * Note that accesses with very large size may still miss a watchpoint; however, |
| 59 | * given this should be rare, this is a reasonable trade-off to make, since this |
| 60 | * will avoid: |
| 61 | * |
| 62 | * 1. excessive contention between watchpoint checks and setup; |
| 63 | * 2. larger number of simultaneous watchpoints without sacrificing |
| 64 | * performance. |
| 65 | * |
| 66 | * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]: |
| 67 | * |
| 68 | * slot=0: [ 1, 2, 0] |
| 69 | * slot=9: [10, 11, 9] |
| 70 | * slot=63: [64, 65, 63] |
| 71 | */ |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 72 | #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS)) |
| 73 | |
| 74 | /* |
Ingo Molnar | 5cbaefe | 2019-11-20 10:41:43 +0100 | [diff] [blame] | 75 | * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary |
Marco Elver | d591ec3 | 2020-02-06 16:46:24 +0100 | [diff] [blame] | 76 | * slot (middle) is fine if we assume that races occur rarely. The set of |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 77 | * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to |
| 78 | * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}. |
| 79 | */ |
| 80 | #define SLOT_IDX_FAST(slot, i) (slot + i) |
| 81 | |
| 82 | /* |
| 83 | * Watchpoints, with each entry encoded as defined in encoding.h: in order to be |
| 84 | * able to safely update and access a watchpoint without introducing locking |
| 85 | * overhead, we encode each watchpoint as a single atomic long. The initial |
| 86 | * zero-initialized state matches INVALID_WATCHPOINT. |
| 87 | * |
| 88 | * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to |
Ingo Molnar | 5cbaefe | 2019-11-20 10:41:43 +0100 | [diff] [blame] | 89 | * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path. |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 90 | */ |
Ingo Molnar | 5cbaefe | 2019-11-20 10:41:43 +0100 | [diff] [blame] | 91 | static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1]; |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 92 | |
| 93 | /* |
| 94 | * Instructions to skip watching counter, used in should_watch(). We use a |
| 95 | * per-CPU counter to avoid excessive contention. |
| 96 | */ |
| 97 | static DEFINE_PER_CPU(long, kcsan_skip); |
| 98 | |
Marco Elver | 5c36142 | 2020-01-07 17:31:04 +0100 | [diff] [blame] | 99 | static __always_inline atomic_long_t *find_watchpoint(unsigned long addr, |
| 100 | size_t size, |
| 101 | bool expect_write, |
| 102 | long *encoded_watchpoint) |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 103 | { |
| 104 | const int slot = watchpoint_slot(addr); |
| 105 | const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK; |
| 106 | atomic_long_t *watchpoint; |
| 107 | unsigned long wp_addr_masked; |
| 108 | size_t wp_size; |
| 109 | bool is_write; |
| 110 | int i; |
| 111 | |
| 112 | BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS); |
| 113 | |
| 114 | for (i = 0; i < NUM_SLOTS; ++i) { |
| 115 | watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)]; |
| 116 | *encoded_watchpoint = atomic_long_read(watchpoint); |
| 117 | if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked, |
| 118 | &wp_size, &is_write)) |
| 119 | continue; |
| 120 | |
| 121 | if (expect_write && !is_write) |
| 122 | continue; |
| 123 | |
| 124 | /* Check if the watchpoint matches the access. */ |
| 125 | if (matching_access(wp_addr_masked, wp_size, addr_masked, size)) |
| 126 | return watchpoint; |
| 127 | } |
| 128 | |
| 129 | return NULL; |
| 130 | } |
| 131 | |
Ingo Molnar | 5cbaefe | 2019-11-20 10:41:43 +0100 | [diff] [blame] | 132 | static inline atomic_long_t * |
| 133 | insert_watchpoint(unsigned long addr, size_t size, bool is_write) |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 134 | { |
| 135 | const int slot = watchpoint_slot(addr); |
| 136 | const long encoded_watchpoint = encode_watchpoint(addr, size, is_write); |
| 137 | atomic_long_t *watchpoint; |
| 138 | int i; |
| 139 | |
| 140 | /* Check slot index logic, ensuring we stay within array bounds. */ |
| 141 | BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT); |
Ingo Molnar | 5cbaefe | 2019-11-20 10:41:43 +0100 | [diff] [blame] | 142 | BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0); |
| 143 | BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1); |
| 144 | BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS); |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 145 | |
| 146 | for (i = 0; i < NUM_SLOTS; ++i) { |
| 147 | long expect_val = INVALID_WATCHPOINT; |
| 148 | |
| 149 | /* Try to acquire this slot. */ |
| 150 | watchpoint = &watchpoints[SLOT_IDX(slot, i)]; |
Ingo Molnar | 5cbaefe | 2019-11-20 10:41:43 +0100 | [diff] [blame] | 151 | if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint)) |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 152 | return watchpoint; |
| 153 | } |
| 154 | |
| 155 | return NULL; |
| 156 | } |
| 157 | |
| 158 | /* |
| 159 | * Return true if watchpoint was successfully consumed, false otherwise. |
| 160 | * |
| 161 | * This may return false if: |
| 162 | * |
| 163 | * 1. another thread already consumed the watchpoint; |
| 164 | * 2. the thread that set up the watchpoint already removed it; |
| 165 | * 3. the watchpoint was removed and then re-used. |
| 166 | */ |
Marco Elver | 5c36142 | 2020-01-07 17:31:04 +0100 | [diff] [blame] | 167 | static __always_inline bool |
Ingo Molnar | 5cbaefe | 2019-11-20 10:41:43 +0100 | [diff] [blame] | 168 | try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint) |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 169 | { |
Ingo Molnar | 5cbaefe | 2019-11-20 10:41:43 +0100 | [diff] [blame] | 170 | return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT); |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 171 | } |
| 172 | |
Marco Elver | 6119418 | 2020-03-18 18:38:45 +0100 | [diff] [blame^] | 173 | /* Return true if watchpoint was not touched, false if already consumed. */ |
| 174 | static inline bool consume_watchpoint(atomic_long_t *watchpoint) |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 175 | { |
Marco Elver | 6119418 | 2020-03-18 18:38:45 +0100 | [diff] [blame^] | 176 | return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT; |
| 177 | } |
| 178 | |
| 179 | /* Remove the watchpoint -- its slot may be reused after. */ |
| 180 | static inline void remove_watchpoint(atomic_long_t *watchpoint) |
| 181 | { |
| 182 | atomic_long_set(watchpoint, INVALID_WATCHPOINT); |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 183 | } |
| 184 | |
Marco Elver | 5c36142 | 2020-01-07 17:31:04 +0100 | [diff] [blame] | 185 | static __always_inline struct kcsan_ctx *get_ctx(void) |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 186 | { |
| 187 | /* |
Ingo Molnar | 5cbaefe | 2019-11-20 10:41:43 +0100 | [diff] [blame] | 188 | * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 189 | * also result in calls that generate warnings in uaccess regions. |
| 190 | */ |
| 191 | return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx); |
| 192 | } |
| 193 | |
Marco Elver | 44656d3 | 2020-02-25 15:32:58 +0100 | [diff] [blame] | 194 | /* Rules for generic atomic accesses. Called from fast-path. */ |
Marco Elver | 1e6ee2f | 2020-02-04 18:21:10 +0100 | [diff] [blame] | 195 | static __always_inline bool |
| 196 | is_atomic(const volatile void *ptr, size_t size, int type) |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 197 | { |
Marco Elver | 1e6ee2f | 2020-02-04 18:21:10 +0100 | [diff] [blame] | 198 | struct kcsan_ctx *ctx; |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 199 | |
Marco Elver | 44656d3 | 2020-02-25 15:32:58 +0100 | [diff] [blame] | 200 | if (type & KCSAN_ACCESS_ATOMIC) |
Marco Elver | 1e6ee2f | 2020-02-04 18:21:10 +0100 | [diff] [blame] | 201 | return true; |
| 202 | |
Marco Elver | d591ec3 | 2020-02-06 16:46:24 +0100 | [diff] [blame] | 203 | /* |
| 204 | * Unless explicitly declared atomic, never consider an assertion access |
| 205 | * as atomic. This allows using them also in atomic regions, such as |
| 206 | * seqlocks, without implicitly changing their semantics. |
| 207 | */ |
Marco Elver | 44656d3 | 2020-02-25 15:32:58 +0100 | [diff] [blame] | 208 | if (type & KCSAN_ACCESS_ASSERT) |
Marco Elver | d591ec3 | 2020-02-06 16:46:24 +0100 | [diff] [blame] | 209 | return false; |
| 210 | |
Marco Elver | 1e6ee2f | 2020-02-04 18:21:10 +0100 | [diff] [blame] | 211 | if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) && |
Marco Elver | 44656d3 | 2020-02-25 15:32:58 +0100 | [diff] [blame] | 212 | (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) && |
Marco Elver | 1e6ee2f | 2020-02-04 18:21:10 +0100 | [diff] [blame] | 213 | IS_ALIGNED((unsigned long)ptr, size)) |
| 214 | return true; /* Assume aligned writes up to word size are atomic. */ |
| 215 | |
| 216 | ctx = get_ctx(); |
Marco Elver | 44656d3 | 2020-02-25 15:32:58 +0100 | [diff] [blame] | 217 | if (ctx->atomic_next > 0) { |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 218 | /* |
| 219 | * Because we do not have separate contexts for nested |
| 220 | * interrupts, in case atomic_next is set, we simply assume that |
| 221 | * the outer interrupt set atomic_next. In the worst case, we |
| 222 | * will conservatively consider operations as atomic. This is a |
| 223 | * reasonable trade-off to make, since this case should be |
| 224 | * extremely rare; however, even if extremely rare, it could |
| 225 | * lead to false positives otherwise. |
| 226 | */ |
| 227 | if ((hardirq_count() >> HARDIRQ_SHIFT) < 2) |
| 228 | --ctx->atomic_next; /* in task, or outer interrupt */ |
| 229 | return true; |
| 230 | } |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 231 | |
Marco Elver | 44656d3 | 2020-02-25 15:32:58 +0100 | [diff] [blame] | 232 | return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic; |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 233 | } |
| 234 | |
Marco Elver | 1e6ee2f | 2020-02-04 18:21:10 +0100 | [diff] [blame] | 235 | static __always_inline bool |
| 236 | should_watch(const volatile void *ptr, size_t size, int type) |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 237 | { |
| 238 | /* |
| 239 | * Never set up watchpoints when memory operations are atomic. |
| 240 | * |
| 241 | * Need to check this first, before kcsan_skip check below: (1) atomics |
| 242 | * should not count towards skipped instructions, and (2) to actually |
| 243 | * decrement kcsan_atomic_next for consecutive instruction stream. |
| 244 | */ |
Marco Elver | 1e6ee2f | 2020-02-04 18:21:10 +0100 | [diff] [blame] | 245 | if (is_atomic(ptr, size, type)) |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 246 | return false; |
| 247 | |
| 248 | if (this_cpu_dec_return(kcsan_skip) >= 0) |
| 249 | return false; |
| 250 | |
| 251 | /* |
| 252 | * NOTE: If we get here, kcsan_skip must always be reset in slow path |
| 253 | * via reset_kcsan_skip() to avoid underflow. |
| 254 | */ |
| 255 | |
| 256 | /* this operation should be watched */ |
| 257 | return true; |
| 258 | } |
| 259 | |
| 260 | static inline void reset_kcsan_skip(void) |
| 261 | { |
Marco Elver | 80d4c47 | 2020-02-07 19:59:10 +0100 | [diff] [blame] | 262 | long skip_count = kcsan_skip_watch - |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 263 | (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ? |
Marco Elver | 80d4c47 | 2020-02-07 19:59:10 +0100 | [diff] [blame] | 264 | prandom_u32_max(kcsan_skip_watch) : |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 265 | 0); |
| 266 | this_cpu_write(kcsan_skip, skip_count); |
| 267 | } |
| 268 | |
Marco Elver | 5c36142 | 2020-01-07 17:31:04 +0100 | [diff] [blame] | 269 | static __always_inline bool kcsan_is_enabled(void) |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 270 | { |
| 271 | return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0; |
| 272 | } |
| 273 | |
| 274 | static inline unsigned int get_delay(void) |
| 275 | { |
Marco Elver | 80d4c47 | 2020-02-07 19:59:10 +0100 | [diff] [blame] | 276 | unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt; |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 277 | return delay - (IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ? |
| 278 | prandom_u32_max(delay) : |
| 279 | 0); |
| 280 | } |
| 281 | |
| 282 | /* |
| 283 | * Pull everything together: check_access() below contains the performance |
| 284 | * critical operations; the fast-path (including check_access) functions should |
| 285 | * all be inlinable by the instrumentation functions. |
| 286 | * |
| 287 | * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are |
| 288 | * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can |
| 289 | * be filtered from the stacktrace, as well as give them unique names for the |
| 290 | * UACCESS whitelist of objtool. Each function uses user_access_save/restore(), |
| 291 | * since they do not access any user memory, but instrumentation is still |
| 292 | * emitted in UACCESS regions. |
| 293 | */ |
| 294 | |
| 295 | static noinline void kcsan_found_watchpoint(const volatile void *ptr, |
Ingo Molnar | 5cbaefe | 2019-11-20 10:41:43 +0100 | [diff] [blame] | 296 | size_t size, |
Marco Elver | 47144ec | 2020-01-10 19:48:33 +0100 | [diff] [blame] | 297 | int type, |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 298 | atomic_long_t *watchpoint, |
| 299 | long encoded_watchpoint) |
| 300 | { |
| 301 | unsigned long flags; |
| 302 | bool consumed; |
| 303 | |
| 304 | if (!kcsan_is_enabled()) |
| 305 | return; |
Marco Elver | 81af89e | 2020-02-11 17:04:22 +0100 | [diff] [blame] | 306 | |
| 307 | /* |
| 308 | * The access_mask check relies on value-change comparison. To avoid |
| 309 | * reporting a race where e.g. the writer set up the watchpoint, but the |
| 310 | * reader has access_mask!=0, we have to ignore the found watchpoint. |
| 311 | */ |
| 312 | if (get_ctx()->access_mask != 0) |
| 313 | return; |
| 314 | |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 315 | /* |
| 316 | * Consume the watchpoint as soon as possible, to minimize the chances |
| 317 | * of !consumed. Consuming the watchpoint must always be guarded by |
| 318 | * kcsan_is_enabled() check, as otherwise we might erroneously |
| 319 | * triggering reports when disabled. |
| 320 | */ |
| 321 | consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint); |
| 322 | |
| 323 | /* keep this after try_consume_watchpoint */ |
| 324 | flags = user_access_save(); |
| 325 | |
| 326 | if (consumed) { |
Marco Elver | 135c087 | 2020-03-18 18:38:44 +0100 | [diff] [blame] | 327 | kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE, |
Marco Elver | 6119418 | 2020-03-18 18:38:45 +0100 | [diff] [blame^] | 328 | KCSAN_REPORT_CONSUMED_WATCHPOINT, |
| 329 | watchpoint - watchpoints); |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 330 | } else { |
| 331 | /* |
| 332 | * The other thread may not print any diagnostics, as it has |
| 333 | * already removed the watchpoint, or another thread consumed |
| 334 | * the watchpoint before this thread. |
| 335 | */ |
| 336 | kcsan_counter_inc(KCSAN_COUNTER_REPORT_RACES); |
| 337 | } |
Marco Elver | d591ec3 | 2020-02-06 16:46:24 +0100 | [diff] [blame] | 338 | |
| 339 | if ((type & KCSAN_ACCESS_ASSERT) != 0) |
| 340 | kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES); |
| 341 | else |
| 342 | kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES); |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 343 | |
| 344 | user_access_restore(flags); |
| 345 | } |
| 346 | |
Ingo Molnar | 5cbaefe | 2019-11-20 10:41:43 +0100 | [diff] [blame] | 347 | static noinline void |
Marco Elver | 47144ec | 2020-01-10 19:48:33 +0100 | [diff] [blame] | 348 | kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 349 | { |
Marco Elver | 47144ec | 2020-01-10 19:48:33 +0100 | [diff] [blame] | 350 | const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0; |
Marco Elver | d591ec3 | 2020-02-06 16:46:24 +0100 | [diff] [blame] | 351 | const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0; |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 352 | atomic_long_t *watchpoint; |
| 353 | union { |
| 354 | u8 _1; |
| 355 | u16 _2; |
| 356 | u32 _4; |
| 357 | u64 _8; |
| 358 | } expect_value; |
Marco Elver | 81af89e | 2020-02-11 17:04:22 +0100 | [diff] [blame] | 359 | unsigned long access_mask; |
Marco Elver | b738f61 | 2020-02-11 17:04:21 +0100 | [diff] [blame] | 360 | enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE; |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 361 | unsigned long ua_flags = user_access_save(); |
Marco Elver | 48b1fc1 | 2020-02-21 23:02:09 +0100 | [diff] [blame] | 362 | unsigned long irq_flags = 0; |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 363 | |
| 364 | /* |
| 365 | * Always reset kcsan_skip counter in slow-path to avoid underflow; see |
| 366 | * should_watch(). |
| 367 | */ |
| 368 | reset_kcsan_skip(); |
| 369 | |
| 370 | if (!kcsan_is_enabled()) |
| 371 | goto out; |
| 372 | |
Marco Elver | 44656d3 | 2020-02-25 15:32:58 +0100 | [diff] [blame] | 373 | /* |
| 374 | * Special atomic rules: unlikely to be true, so we check them here in |
| 375 | * the slow-path, and not in the fast-path in is_atomic(). Call after |
| 376 | * kcsan_is_enabled(), as we may access memory that is not yet |
| 377 | * initialized during early boot. |
| 378 | */ |
| 379 | if (!is_assert && kcsan_is_atomic_special(ptr)) |
| 380 | goto out; |
| 381 | |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 382 | if (!check_encodable((unsigned long)ptr, size)) { |
| 383 | kcsan_counter_inc(KCSAN_COUNTER_UNENCODABLE_ACCESSES); |
| 384 | goto out; |
| 385 | } |
| 386 | |
Marco Elver | 48b1fc1 | 2020-02-21 23:02:09 +0100 | [diff] [blame] | 387 | if (!kcsan_interrupt_watcher) |
| 388 | /* Use raw to avoid lockdep recursion via IRQ flags tracing. */ |
| 389 | raw_local_irq_save(irq_flags); |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 390 | |
| 391 | watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write); |
| 392 | if (watchpoint == NULL) { |
| 393 | /* |
Ingo Molnar | 5cbaefe | 2019-11-20 10:41:43 +0100 | [diff] [blame] | 394 | * Out of capacity: the size of 'watchpoints', and the frequency |
| 395 | * with which should_watch() returns true should be tweaked so |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 396 | * that this case happens very rarely. |
| 397 | */ |
| 398 | kcsan_counter_inc(KCSAN_COUNTER_NO_CAPACITY); |
| 399 | goto out_unlock; |
| 400 | } |
| 401 | |
| 402 | kcsan_counter_inc(KCSAN_COUNTER_SETUP_WATCHPOINTS); |
| 403 | kcsan_counter_inc(KCSAN_COUNTER_USED_WATCHPOINTS); |
| 404 | |
| 405 | /* |
| 406 | * Read the current value, to later check and infer a race if the data |
| 407 | * was modified via a non-instrumented access, e.g. from a device. |
| 408 | */ |
Marco Elver | b738f61 | 2020-02-11 17:04:21 +0100 | [diff] [blame] | 409 | expect_value._8 = 0; |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 410 | switch (size) { |
| 411 | case 1: |
| 412 | expect_value._1 = READ_ONCE(*(const u8 *)ptr); |
| 413 | break; |
| 414 | case 2: |
| 415 | expect_value._2 = READ_ONCE(*(const u16 *)ptr); |
| 416 | break; |
| 417 | case 4: |
| 418 | expect_value._4 = READ_ONCE(*(const u32 *)ptr); |
| 419 | break; |
| 420 | case 8: |
| 421 | expect_value._8 = READ_ONCE(*(const u64 *)ptr); |
| 422 | break; |
| 423 | default: |
| 424 | break; /* ignore; we do not diff the values */ |
| 425 | } |
| 426 | |
| 427 | if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) { |
| 428 | kcsan_disable_current(); |
| 429 | pr_err("KCSAN: watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n", |
| 430 | is_write ? "write" : "read", size, ptr, |
| 431 | watchpoint_slot((unsigned long)ptr), |
| 432 | encode_watchpoint((unsigned long)ptr, size, is_write)); |
| 433 | kcsan_enable_current(); |
| 434 | } |
| 435 | |
| 436 | /* |
| 437 | * Delay this thread, to increase probability of observing a racy |
| 438 | * conflicting access. |
| 439 | */ |
| 440 | udelay(get_delay()); |
| 441 | |
| 442 | /* |
| 443 | * Re-read value, and check if it is as expected; if not, we infer a |
| 444 | * racy access. |
| 445 | */ |
Marco Elver | 81af89e | 2020-02-11 17:04:22 +0100 | [diff] [blame] | 446 | access_mask = get_ctx()->access_mask; |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 447 | switch (size) { |
| 448 | case 1: |
Marco Elver | b738f61 | 2020-02-11 17:04:21 +0100 | [diff] [blame] | 449 | expect_value._1 ^= READ_ONCE(*(const u8 *)ptr); |
Marco Elver | 81af89e | 2020-02-11 17:04:22 +0100 | [diff] [blame] | 450 | if (access_mask) |
| 451 | expect_value._1 &= (u8)access_mask; |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 452 | break; |
| 453 | case 2: |
Marco Elver | b738f61 | 2020-02-11 17:04:21 +0100 | [diff] [blame] | 454 | expect_value._2 ^= READ_ONCE(*(const u16 *)ptr); |
Marco Elver | 81af89e | 2020-02-11 17:04:22 +0100 | [diff] [blame] | 455 | if (access_mask) |
| 456 | expect_value._2 &= (u16)access_mask; |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 457 | break; |
| 458 | case 4: |
Marco Elver | b738f61 | 2020-02-11 17:04:21 +0100 | [diff] [blame] | 459 | expect_value._4 ^= READ_ONCE(*(const u32 *)ptr); |
Marco Elver | 81af89e | 2020-02-11 17:04:22 +0100 | [diff] [blame] | 460 | if (access_mask) |
| 461 | expect_value._4 &= (u32)access_mask; |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 462 | break; |
| 463 | case 8: |
Marco Elver | b738f61 | 2020-02-11 17:04:21 +0100 | [diff] [blame] | 464 | expect_value._8 ^= READ_ONCE(*(const u64 *)ptr); |
Marco Elver | 81af89e | 2020-02-11 17:04:22 +0100 | [diff] [blame] | 465 | if (access_mask) |
| 466 | expect_value._8 &= (u64)access_mask; |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 467 | break; |
| 468 | default: |
| 469 | break; /* ignore; we do not diff the values */ |
| 470 | } |
| 471 | |
Marco Elver | b738f61 | 2020-02-11 17:04:21 +0100 | [diff] [blame] | 472 | /* Were we able to observe a value-change? */ |
| 473 | if (expect_value._8 != 0) |
| 474 | value_change = KCSAN_VALUE_CHANGE_TRUE; |
| 475 | |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 476 | /* Check if this access raced with another. */ |
Marco Elver | 6119418 | 2020-03-18 18:38:45 +0100 | [diff] [blame^] | 477 | if (!consume_watchpoint(watchpoint)) { |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 478 | /* |
Marco Elver | b738f61 | 2020-02-11 17:04:21 +0100 | [diff] [blame] | 479 | * Depending on the access type, map a value_change of MAYBE to |
Marco Elver | 81af89e | 2020-02-11 17:04:22 +0100 | [diff] [blame] | 480 | * TRUE (always report) or FALSE (never report). |
Marco Elver | b738f61 | 2020-02-11 17:04:21 +0100 | [diff] [blame] | 481 | */ |
Marco Elver | 81af89e | 2020-02-11 17:04:22 +0100 | [diff] [blame] | 482 | if (value_change == KCSAN_VALUE_CHANGE_MAYBE) { |
| 483 | if (access_mask != 0) { |
| 484 | /* |
| 485 | * For access with access_mask, we require a |
| 486 | * value-change, as it is likely that races on |
| 487 | * ~access_mask bits are expected. |
| 488 | */ |
| 489 | value_change = KCSAN_VALUE_CHANGE_FALSE; |
| 490 | } else if (size > 8 || is_assert) { |
| 491 | /* Always assume a value-change. */ |
| 492 | value_change = KCSAN_VALUE_CHANGE_TRUE; |
| 493 | } |
Marco Elver | b738f61 | 2020-02-11 17:04:21 +0100 | [diff] [blame] | 494 | } |
| 495 | |
| 496 | /* |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 497 | * No need to increment 'data_races' counter, as the racing |
| 498 | * thread already did. |
Marco Elver | d591ec3 | 2020-02-06 16:46:24 +0100 | [diff] [blame] | 499 | * |
| 500 | * Count 'assert_failures' for each failed ASSERT access, |
| 501 | * therefore both this thread and the racing thread may |
| 502 | * increment this counter. |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 503 | */ |
Marco Elver | b738f61 | 2020-02-11 17:04:21 +0100 | [diff] [blame] | 504 | if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE) |
Marco Elver | d591ec3 | 2020-02-06 16:46:24 +0100 | [diff] [blame] | 505 | kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES); |
| 506 | |
Marco Elver | 6119418 | 2020-03-18 18:38:45 +0100 | [diff] [blame^] | 507 | kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL, |
| 508 | watchpoint - watchpoints); |
Marco Elver | b738f61 | 2020-02-11 17:04:21 +0100 | [diff] [blame] | 509 | } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) { |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 510 | /* Inferring a race, since the value should not have changed. */ |
Marco Elver | d591ec3 | 2020-02-06 16:46:24 +0100 | [diff] [blame] | 511 | |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 512 | kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN); |
Marco Elver | d591ec3 | 2020-02-06 16:46:24 +0100 | [diff] [blame] | 513 | if (is_assert) |
| 514 | kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES); |
| 515 | |
| 516 | if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) |
Marco Elver | b738f61 | 2020-02-11 17:04:21 +0100 | [diff] [blame] | 517 | kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE, |
Marco Elver | 6119418 | 2020-03-18 18:38:45 +0100 | [diff] [blame^] | 518 | KCSAN_REPORT_RACE_UNKNOWN_ORIGIN, |
| 519 | watchpoint - watchpoints); |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 520 | } |
| 521 | |
Marco Elver | 6119418 | 2020-03-18 18:38:45 +0100 | [diff] [blame^] | 522 | /* |
| 523 | * Remove watchpoint; must be after reporting, since the slot may be |
| 524 | * reused after this point. |
| 525 | */ |
| 526 | remove_watchpoint(watchpoint); |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 527 | kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS); |
| 528 | out_unlock: |
Marco Elver | 48b1fc1 | 2020-02-21 23:02:09 +0100 | [diff] [blame] | 529 | if (!kcsan_interrupt_watcher) |
| 530 | raw_local_irq_restore(irq_flags); |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 531 | out: |
| 532 | user_access_restore(ua_flags); |
| 533 | } |
| 534 | |
| 535 | static __always_inline void check_access(const volatile void *ptr, size_t size, |
| 536 | int type) |
| 537 | { |
| 538 | const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0; |
| 539 | atomic_long_t *watchpoint; |
| 540 | long encoded_watchpoint; |
| 541 | |
| 542 | /* |
Marco Elver | ed95f95 | 2020-02-05 11:14:19 +0100 | [diff] [blame] | 543 | * Do nothing for 0 sized check; this comparison will be optimized out |
| 544 | * for constant sized instrumentation (__tsan_{read,write}N). |
| 545 | */ |
| 546 | if (unlikely(size == 0)) |
| 547 | return; |
| 548 | |
| 549 | /* |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 550 | * Avoid user_access_save in fast-path: find_watchpoint is safe without |
| 551 | * user_access_save, as the address that ptr points to is only used to |
| 552 | * check if a watchpoint exists; ptr is never dereferenced. |
| 553 | */ |
| 554 | watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write, |
| 555 | &encoded_watchpoint); |
| 556 | /* |
| 557 | * It is safe to check kcsan_is_enabled() after find_watchpoint in the |
Marco Elver | d591ec3 | 2020-02-06 16:46:24 +0100 | [diff] [blame] | 558 | * slow-path, as long as no state changes that cause a race to be |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 559 | * detected and reported have occurred until kcsan_is_enabled() is |
| 560 | * checked. |
| 561 | */ |
| 562 | |
| 563 | if (unlikely(watchpoint != NULL)) |
Marco Elver | 47144ec | 2020-01-10 19:48:33 +0100 | [diff] [blame] | 564 | kcsan_found_watchpoint(ptr, size, type, watchpoint, |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 565 | encoded_watchpoint); |
Marco Elver | 1e6ee2f | 2020-02-04 18:21:10 +0100 | [diff] [blame] | 566 | else if (unlikely(should_watch(ptr, size, type))) |
Marco Elver | 47144ec | 2020-01-10 19:48:33 +0100 | [diff] [blame] | 567 | kcsan_setup_watchpoint(ptr, size, type); |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 568 | } |
| 569 | |
| 570 | /* === Public interface ===================================================== */ |
| 571 | |
| 572 | void __init kcsan_init(void) |
| 573 | { |
| 574 | BUG_ON(!in_task()); |
| 575 | |
| 576 | kcsan_debugfs_init(); |
| 577 | |
| 578 | /* |
| 579 | * We are in the init task, and no other tasks should be running; |
| 580 | * WRITE_ONCE without memory barrier is sufficient. |
| 581 | */ |
Marco Elver | 80d4c47 | 2020-02-07 19:59:10 +0100 | [diff] [blame] | 582 | if (kcsan_early_enable) |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 583 | WRITE_ONCE(kcsan_enabled, true); |
| 584 | } |
| 585 | |
| 586 | /* === Exported interface =================================================== */ |
| 587 | |
| 588 | void kcsan_disable_current(void) |
| 589 | { |
| 590 | ++get_ctx()->disable_count; |
| 591 | } |
| 592 | EXPORT_SYMBOL(kcsan_disable_current); |
| 593 | |
| 594 | void kcsan_enable_current(void) |
| 595 | { |
| 596 | if (get_ctx()->disable_count-- == 0) { |
| 597 | /* |
| 598 | * Warn if kcsan_enable_current() calls are unbalanced with |
| 599 | * kcsan_disable_current() calls, which causes disable_count to |
| 600 | * become negative and should not happen. |
| 601 | */ |
| 602 | kcsan_disable_current(); /* restore to 0, KCSAN still enabled */ |
| 603 | kcsan_disable_current(); /* disable to generate warning */ |
| 604 | WARN(1, "Unbalanced %s()", __func__); |
| 605 | kcsan_enable_current(); |
| 606 | } |
| 607 | } |
| 608 | EXPORT_SYMBOL(kcsan_enable_current); |
| 609 | |
| 610 | void kcsan_nestable_atomic_begin(void) |
| 611 | { |
| 612 | /* |
| 613 | * Do *not* check and warn if we are in a flat atomic region: nestable |
| 614 | * and flat atomic regions are independent from each other. |
| 615 | * See include/linux/kcsan.h: struct kcsan_ctx comments for more |
| 616 | * comments. |
| 617 | */ |
| 618 | |
| 619 | ++get_ctx()->atomic_nest_count; |
| 620 | } |
| 621 | EXPORT_SYMBOL(kcsan_nestable_atomic_begin); |
| 622 | |
| 623 | void kcsan_nestable_atomic_end(void) |
| 624 | { |
| 625 | if (get_ctx()->atomic_nest_count-- == 0) { |
| 626 | /* |
| 627 | * Warn if kcsan_nestable_atomic_end() calls are unbalanced with |
| 628 | * kcsan_nestable_atomic_begin() calls, which causes |
| 629 | * atomic_nest_count to become negative and should not happen. |
| 630 | */ |
| 631 | kcsan_nestable_atomic_begin(); /* restore to 0 */ |
| 632 | kcsan_disable_current(); /* disable to generate warning */ |
| 633 | WARN(1, "Unbalanced %s()", __func__); |
| 634 | kcsan_enable_current(); |
| 635 | } |
| 636 | } |
| 637 | EXPORT_SYMBOL(kcsan_nestable_atomic_end); |
| 638 | |
| 639 | void kcsan_flat_atomic_begin(void) |
| 640 | { |
| 641 | get_ctx()->in_flat_atomic = true; |
| 642 | } |
| 643 | EXPORT_SYMBOL(kcsan_flat_atomic_begin); |
| 644 | |
| 645 | void kcsan_flat_atomic_end(void) |
| 646 | { |
| 647 | get_ctx()->in_flat_atomic = false; |
| 648 | } |
| 649 | EXPORT_SYMBOL(kcsan_flat_atomic_end); |
| 650 | |
| 651 | void kcsan_atomic_next(int n) |
| 652 | { |
| 653 | get_ctx()->atomic_next = n; |
| 654 | } |
| 655 | EXPORT_SYMBOL(kcsan_atomic_next); |
| 656 | |
Marco Elver | 81af89e | 2020-02-11 17:04:22 +0100 | [diff] [blame] | 657 | void kcsan_set_access_mask(unsigned long mask) |
| 658 | { |
| 659 | get_ctx()->access_mask = mask; |
| 660 | } |
| 661 | EXPORT_SYMBOL(kcsan_set_access_mask); |
| 662 | |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 663 | void __kcsan_check_access(const volatile void *ptr, size_t size, int type) |
| 664 | { |
| 665 | check_access(ptr, size, type); |
| 666 | } |
| 667 | EXPORT_SYMBOL(__kcsan_check_access); |
| 668 | |
| 669 | /* |
| 670 | * KCSAN uses the same instrumentation that is emitted by supported compilers |
| 671 | * for ThreadSanitizer (TSAN). |
| 672 | * |
| 673 | * When enabled, the compiler emits instrumentation calls (the functions |
| 674 | * prefixed with "__tsan" below) for all loads and stores that it generated; |
| 675 | * inline asm is not instrumented. |
| 676 | * |
| 677 | * Note that, not all supported compiler versions distinguish aligned/unaligned |
| 678 | * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned |
| 679 | * version to the generic version, which can handle both. |
| 680 | */ |
| 681 | |
| 682 | #define DEFINE_TSAN_READ_WRITE(size) \ |
| 683 | void __tsan_read##size(void *ptr) \ |
| 684 | { \ |
| 685 | check_access(ptr, size, 0); \ |
| 686 | } \ |
| 687 | EXPORT_SYMBOL(__tsan_read##size); \ |
| 688 | void __tsan_unaligned_read##size(void *ptr) \ |
| 689 | __alias(__tsan_read##size); \ |
| 690 | EXPORT_SYMBOL(__tsan_unaligned_read##size); \ |
| 691 | void __tsan_write##size(void *ptr) \ |
| 692 | { \ |
| 693 | check_access(ptr, size, KCSAN_ACCESS_WRITE); \ |
| 694 | } \ |
| 695 | EXPORT_SYMBOL(__tsan_write##size); \ |
| 696 | void __tsan_unaligned_write##size(void *ptr) \ |
| 697 | __alias(__tsan_write##size); \ |
| 698 | EXPORT_SYMBOL(__tsan_unaligned_write##size) |
| 699 | |
| 700 | DEFINE_TSAN_READ_WRITE(1); |
| 701 | DEFINE_TSAN_READ_WRITE(2); |
| 702 | DEFINE_TSAN_READ_WRITE(4); |
| 703 | DEFINE_TSAN_READ_WRITE(8); |
| 704 | DEFINE_TSAN_READ_WRITE(16); |
| 705 | |
| 706 | void __tsan_read_range(void *ptr, size_t size) |
| 707 | { |
| 708 | check_access(ptr, size, 0); |
| 709 | } |
| 710 | EXPORT_SYMBOL(__tsan_read_range); |
| 711 | |
| 712 | void __tsan_write_range(void *ptr, size_t size) |
| 713 | { |
| 714 | check_access(ptr, size, KCSAN_ACCESS_WRITE); |
| 715 | } |
| 716 | EXPORT_SYMBOL(__tsan_write_range); |
| 717 | |
| 718 | /* |
| 719 | * The below are not required by KCSAN, but can still be emitted by the |
| 720 | * compiler. |
| 721 | */ |
| 722 | void __tsan_func_entry(void *call_pc) |
| 723 | { |
| 724 | } |
| 725 | EXPORT_SYMBOL(__tsan_func_entry); |
| 726 | void __tsan_func_exit(void) |
| 727 | { |
| 728 | } |
| 729 | EXPORT_SYMBOL(__tsan_func_exit); |
| 730 | void __tsan_init(void) |
| 731 | { |
| 732 | } |
| 733 | EXPORT_SYMBOL(__tsan_init); |