blob: f1c38620e3cf8679215c2fe17c68cb1b3a1d87f6 [file] [log] [blame]
Marco Elverdfd402a2019-11-14 19:02:54 +01001// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/atomic.h>
4#include <linux/bug.h>
5#include <linux/delay.h>
6#include <linux/export.h>
7#include <linux/init.h>
Marco Elver1e6ee2f2020-02-04 18:21:10 +01008#include <linux/kernel.h>
Marco Elver80d4c472020-02-07 19:59:10 +01009#include <linux/moduleparam.h>
Marco Elverdfd402a2019-11-14 19:02:54 +010010#include <linux/percpu.h>
11#include <linux/preempt.h>
12#include <linux/random.h>
13#include <linux/sched.h>
14#include <linux/uaccess.h>
15
16#include "atomic.h"
17#include "encoding.h"
18#include "kcsan.h"
19
Marco Elver80d4c472020-02-07 19:59:10 +010020static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
Marco Elver2402d0e2020-02-22 00:10:27 +010021unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
22unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
Marco Elver80d4c472020-02-07 19:59:10 +010023static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
Marco Elver48b1fc12020-02-21 23:02:09 +010024static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
Marco Elver80d4c472020-02-07 19:59:10 +010025
26#ifdef MODULE_PARAM_PREFIX
27#undef MODULE_PARAM_PREFIX
28#endif
29#define MODULE_PARAM_PREFIX "kcsan."
30module_param_named(early_enable, kcsan_early_enable, bool, 0);
31module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
32module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
33module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
Marco Elver48b1fc12020-02-21 23:02:09 +010034module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
Marco Elver80d4c472020-02-07 19:59:10 +010035
Marco Elverdfd402a2019-11-14 19:02:54 +010036bool kcsan_enabled;
37
38/* Per-CPU kcsan_ctx for interrupts */
39static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
Ingo Molnar5cbaefe2019-11-20 10:41:43 +010040 .disable_count = 0,
41 .atomic_next = 0,
42 .atomic_nest_count = 0,
43 .in_flat_atomic = false,
Marco Elver81af89e2020-02-11 17:04:22 +010044 .access_mask = 0,
Marco Elverdfd402a2019-11-14 19:02:54 +010045};
46
47/*
Qiujun Huange7b34102020-03-05 15:21:07 +010048 * Helper macros to index into adjacent slots, starting from address slot
Marco Elverdfd402a2019-11-14 19:02:54 +010049 * itself, followed by the right and left slots.
50 *
51 * The purpose is 2-fold:
52 *
53 * 1. if during insertion the address slot is already occupied, check if
54 * any adjacent slots are free;
55 * 2. accesses that straddle a slot boundary due to size that exceeds a
56 * slot's range may check adjacent slots if any watchpoint matches.
57 *
58 * Note that accesses with very large size may still miss a watchpoint; however,
59 * given this should be rare, this is a reasonable trade-off to make, since this
60 * will avoid:
61 *
62 * 1. excessive contention between watchpoint checks and setup;
63 * 2. larger number of simultaneous watchpoints without sacrificing
64 * performance.
65 *
66 * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
67 *
68 * slot=0: [ 1, 2, 0]
69 * slot=9: [10, 11, 9]
70 * slot=63: [64, 65, 63]
71 */
Ingo Molnar5cbaefe2019-11-20 10:41:43 +010072#define NUM_SLOTS (1 + 2*KCSAN_CHECK_ADJACENT)
Marco Elverdfd402a2019-11-14 19:02:54 +010073#define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
74
75/*
Ingo Molnar5cbaefe2019-11-20 10:41:43 +010076 * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
Marco Elverd591ec32020-02-06 16:46:24 +010077 * slot (middle) is fine if we assume that races occur rarely. The set of
Marco Elverdfd402a2019-11-14 19:02:54 +010078 * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
79 * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
80 */
81#define SLOT_IDX_FAST(slot, i) (slot + i)
82
83/*
84 * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
85 * able to safely update and access a watchpoint without introducing locking
86 * overhead, we encode each watchpoint as a single atomic long. The initial
87 * zero-initialized state matches INVALID_WATCHPOINT.
88 *
89 * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
Ingo Molnar5cbaefe2019-11-20 10:41:43 +010090 * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
Marco Elverdfd402a2019-11-14 19:02:54 +010091 */
Ingo Molnar5cbaefe2019-11-20 10:41:43 +010092static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
Marco Elverdfd402a2019-11-14 19:02:54 +010093
94/*
95 * Instructions to skip watching counter, used in should_watch(). We use a
96 * per-CPU counter to avoid excessive contention.
97 */
98static DEFINE_PER_CPU(long, kcsan_skip);
99
Marco Elver5c361422020-01-07 17:31:04 +0100100static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
101 size_t size,
102 bool expect_write,
103 long *encoded_watchpoint)
Marco Elverdfd402a2019-11-14 19:02:54 +0100104{
105 const int slot = watchpoint_slot(addr);
106 const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
107 atomic_long_t *watchpoint;
108 unsigned long wp_addr_masked;
109 size_t wp_size;
110 bool is_write;
111 int i;
112
113 BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
114
115 for (i = 0; i < NUM_SLOTS; ++i) {
116 watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
117 *encoded_watchpoint = atomic_long_read(watchpoint);
118 if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
119 &wp_size, &is_write))
120 continue;
121
122 if (expect_write && !is_write)
123 continue;
124
125 /* Check if the watchpoint matches the access. */
126 if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
127 return watchpoint;
128 }
129
130 return NULL;
131}
132
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100133static inline atomic_long_t *
134insert_watchpoint(unsigned long addr, size_t size, bool is_write)
Marco Elverdfd402a2019-11-14 19:02:54 +0100135{
136 const int slot = watchpoint_slot(addr);
137 const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
138 atomic_long_t *watchpoint;
139 int i;
140
141 /* Check slot index logic, ensuring we stay within array bounds. */
142 BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100143 BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
144 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
145 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
Marco Elverdfd402a2019-11-14 19:02:54 +0100146
147 for (i = 0; i < NUM_SLOTS; ++i) {
148 long expect_val = INVALID_WATCHPOINT;
149
150 /* Try to acquire this slot. */
151 watchpoint = &watchpoints[SLOT_IDX(slot, i)];
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100152 if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
Marco Elverdfd402a2019-11-14 19:02:54 +0100153 return watchpoint;
154 }
155
156 return NULL;
157}
158
159/*
160 * Return true if watchpoint was successfully consumed, false otherwise.
161 *
162 * This may return false if:
163 *
164 * 1. another thread already consumed the watchpoint;
165 * 2. the thread that set up the watchpoint already removed it;
166 * 3. the watchpoint was removed and then re-used.
167 */
Marco Elver5c361422020-01-07 17:31:04 +0100168static __always_inline bool
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100169try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
Marco Elverdfd402a2019-11-14 19:02:54 +0100170{
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100171 return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
Marco Elverdfd402a2019-11-14 19:02:54 +0100172}
173
174/*
175 * Return true if watchpoint was not touched, false if consumed.
176 */
177static inline bool remove_watchpoint(atomic_long_t *watchpoint)
178{
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100179 return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) != CONSUMED_WATCHPOINT;
Marco Elverdfd402a2019-11-14 19:02:54 +0100180}
181
Marco Elver5c361422020-01-07 17:31:04 +0100182static __always_inline struct kcsan_ctx *get_ctx(void)
Marco Elverdfd402a2019-11-14 19:02:54 +0100183{
184 /*
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100185 * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
Marco Elverdfd402a2019-11-14 19:02:54 +0100186 * also result in calls that generate warnings in uaccess regions.
187 */
188 return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
189}
190
Marco Elver44656d32020-02-25 15:32:58 +0100191/* Rules for generic atomic accesses. Called from fast-path. */
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100192static __always_inline bool
193is_atomic(const volatile void *ptr, size_t size, int type)
Marco Elverdfd402a2019-11-14 19:02:54 +0100194{
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100195 struct kcsan_ctx *ctx;
Marco Elverdfd402a2019-11-14 19:02:54 +0100196
Marco Elver44656d32020-02-25 15:32:58 +0100197 if (type & KCSAN_ACCESS_ATOMIC)
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100198 return true;
199
Marco Elverd591ec32020-02-06 16:46:24 +0100200 /*
201 * Unless explicitly declared atomic, never consider an assertion access
202 * as atomic. This allows using them also in atomic regions, such as
203 * seqlocks, without implicitly changing their semantics.
204 */
Marco Elver44656d32020-02-25 15:32:58 +0100205 if (type & KCSAN_ACCESS_ASSERT)
Marco Elverd591ec32020-02-06 16:46:24 +0100206 return false;
207
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100208 if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
Marco Elver44656d32020-02-25 15:32:58 +0100209 (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100210 IS_ALIGNED((unsigned long)ptr, size))
211 return true; /* Assume aligned writes up to word size are atomic. */
212
213 ctx = get_ctx();
Marco Elver44656d32020-02-25 15:32:58 +0100214 if (ctx->atomic_next > 0) {
Marco Elverdfd402a2019-11-14 19:02:54 +0100215 /*
216 * Because we do not have separate contexts for nested
217 * interrupts, in case atomic_next is set, we simply assume that
218 * the outer interrupt set atomic_next. In the worst case, we
219 * will conservatively consider operations as atomic. This is a
220 * reasonable trade-off to make, since this case should be
221 * extremely rare; however, even if extremely rare, it could
222 * lead to false positives otherwise.
223 */
224 if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
225 --ctx->atomic_next; /* in task, or outer interrupt */
226 return true;
227 }
Marco Elverdfd402a2019-11-14 19:02:54 +0100228
Marco Elver44656d32020-02-25 15:32:58 +0100229 return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
Marco Elverdfd402a2019-11-14 19:02:54 +0100230}
231
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100232static __always_inline bool
233should_watch(const volatile void *ptr, size_t size, int type)
Marco Elverdfd402a2019-11-14 19:02:54 +0100234{
235 /*
236 * Never set up watchpoints when memory operations are atomic.
237 *
238 * Need to check this first, before kcsan_skip check below: (1) atomics
239 * should not count towards skipped instructions, and (2) to actually
240 * decrement kcsan_atomic_next for consecutive instruction stream.
241 */
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100242 if (is_atomic(ptr, size, type))
Marco Elverdfd402a2019-11-14 19:02:54 +0100243 return false;
244
245 if (this_cpu_dec_return(kcsan_skip) >= 0)
246 return false;
247
248 /*
249 * NOTE: If we get here, kcsan_skip must always be reset in slow path
250 * via reset_kcsan_skip() to avoid underflow.
251 */
252
253 /* this operation should be watched */
254 return true;
255}
256
257static inline void reset_kcsan_skip(void)
258{
Marco Elver80d4c472020-02-07 19:59:10 +0100259 long skip_count = kcsan_skip_watch -
Marco Elverdfd402a2019-11-14 19:02:54 +0100260 (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
Marco Elver80d4c472020-02-07 19:59:10 +0100261 prandom_u32_max(kcsan_skip_watch) :
Marco Elverdfd402a2019-11-14 19:02:54 +0100262 0);
263 this_cpu_write(kcsan_skip, skip_count);
264}
265
Marco Elver5c361422020-01-07 17:31:04 +0100266static __always_inline bool kcsan_is_enabled(void)
Marco Elverdfd402a2019-11-14 19:02:54 +0100267{
268 return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
269}
270
271static inline unsigned int get_delay(void)
272{
Marco Elver80d4c472020-02-07 19:59:10 +0100273 unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
Marco Elverdfd402a2019-11-14 19:02:54 +0100274 return delay - (IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
275 prandom_u32_max(delay) :
276 0);
277}
278
279/*
280 * Pull everything together: check_access() below contains the performance
281 * critical operations; the fast-path (including check_access) functions should
282 * all be inlinable by the instrumentation functions.
283 *
284 * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
285 * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
286 * be filtered from the stacktrace, as well as give them unique names for the
287 * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
288 * since they do not access any user memory, but instrumentation is still
289 * emitted in UACCESS regions.
290 */
291
292static noinline void kcsan_found_watchpoint(const volatile void *ptr,
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100293 size_t size,
Marco Elver47144ec2020-01-10 19:48:33 +0100294 int type,
Marco Elverdfd402a2019-11-14 19:02:54 +0100295 atomic_long_t *watchpoint,
296 long encoded_watchpoint)
297{
298 unsigned long flags;
299 bool consumed;
300
301 if (!kcsan_is_enabled())
302 return;
Marco Elver81af89e2020-02-11 17:04:22 +0100303
304 /*
305 * The access_mask check relies on value-change comparison. To avoid
306 * reporting a race where e.g. the writer set up the watchpoint, but the
307 * reader has access_mask!=0, we have to ignore the found watchpoint.
308 */
309 if (get_ctx()->access_mask != 0)
310 return;
311
Marco Elverdfd402a2019-11-14 19:02:54 +0100312 /*
313 * Consume the watchpoint as soon as possible, to minimize the chances
314 * of !consumed. Consuming the watchpoint must always be guarded by
315 * kcsan_is_enabled() check, as otherwise we might erroneously
316 * triggering reports when disabled.
317 */
318 consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
319
320 /* keep this after try_consume_watchpoint */
321 flags = user_access_save();
322
323 if (consumed) {
Marco Elver135c0872020-03-18 18:38:44 +0100324 kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE,
Marco Elverdfd402a2019-11-14 19:02:54 +0100325 KCSAN_REPORT_CONSUMED_WATCHPOINT);
326 } else {
327 /*
328 * The other thread may not print any diagnostics, as it has
329 * already removed the watchpoint, or another thread consumed
330 * the watchpoint before this thread.
331 */
332 kcsan_counter_inc(KCSAN_COUNTER_REPORT_RACES);
333 }
Marco Elverd591ec32020-02-06 16:46:24 +0100334
335 if ((type & KCSAN_ACCESS_ASSERT) != 0)
336 kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
337 else
338 kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES);
Marco Elverdfd402a2019-11-14 19:02:54 +0100339
340 user_access_restore(flags);
341}
342
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100343static noinline void
Marco Elver47144ec2020-01-10 19:48:33 +0100344kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
Marco Elverdfd402a2019-11-14 19:02:54 +0100345{
Marco Elver47144ec2020-01-10 19:48:33 +0100346 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
Marco Elverd591ec32020-02-06 16:46:24 +0100347 const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
Marco Elverdfd402a2019-11-14 19:02:54 +0100348 atomic_long_t *watchpoint;
349 union {
350 u8 _1;
351 u16 _2;
352 u32 _4;
353 u64 _8;
354 } expect_value;
Marco Elver81af89e2020-02-11 17:04:22 +0100355 unsigned long access_mask;
Marco Elverb738f612020-02-11 17:04:21 +0100356 enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
Marco Elverdfd402a2019-11-14 19:02:54 +0100357 unsigned long ua_flags = user_access_save();
Marco Elver48b1fc12020-02-21 23:02:09 +0100358 unsigned long irq_flags = 0;
Marco Elverdfd402a2019-11-14 19:02:54 +0100359
360 /*
361 * Always reset kcsan_skip counter in slow-path to avoid underflow; see
362 * should_watch().
363 */
364 reset_kcsan_skip();
365
366 if (!kcsan_is_enabled())
367 goto out;
368
Marco Elver44656d32020-02-25 15:32:58 +0100369 /*
370 * Special atomic rules: unlikely to be true, so we check them here in
371 * the slow-path, and not in the fast-path in is_atomic(). Call after
372 * kcsan_is_enabled(), as we may access memory that is not yet
373 * initialized during early boot.
374 */
375 if (!is_assert && kcsan_is_atomic_special(ptr))
376 goto out;
377
Marco Elverdfd402a2019-11-14 19:02:54 +0100378 if (!check_encodable((unsigned long)ptr, size)) {
379 kcsan_counter_inc(KCSAN_COUNTER_UNENCODABLE_ACCESSES);
380 goto out;
381 }
382
Marco Elver48b1fc12020-02-21 23:02:09 +0100383 if (!kcsan_interrupt_watcher)
384 /* Use raw to avoid lockdep recursion via IRQ flags tracing. */
385 raw_local_irq_save(irq_flags);
Marco Elverdfd402a2019-11-14 19:02:54 +0100386
387 watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
388 if (watchpoint == NULL) {
389 /*
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100390 * Out of capacity: the size of 'watchpoints', and the frequency
391 * with which should_watch() returns true should be tweaked so
Marco Elverdfd402a2019-11-14 19:02:54 +0100392 * that this case happens very rarely.
393 */
394 kcsan_counter_inc(KCSAN_COUNTER_NO_CAPACITY);
395 goto out_unlock;
396 }
397
398 kcsan_counter_inc(KCSAN_COUNTER_SETUP_WATCHPOINTS);
399 kcsan_counter_inc(KCSAN_COUNTER_USED_WATCHPOINTS);
400
401 /*
402 * Read the current value, to later check and infer a race if the data
403 * was modified via a non-instrumented access, e.g. from a device.
404 */
Marco Elverb738f612020-02-11 17:04:21 +0100405 expect_value._8 = 0;
Marco Elverdfd402a2019-11-14 19:02:54 +0100406 switch (size) {
407 case 1:
408 expect_value._1 = READ_ONCE(*(const u8 *)ptr);
409 break;
410 case 2:
411 expect_value._2 = READ_ONCE(*(const u16 *)ptr);
412 break;
413 case 4:
414 expect_value._4 = READ_ONCE(*(const u32 *)ptr);
415 break;
416 case 8:
417 expect_value._8 = READ_ONCE(*(const u64 *)ptr);
418 break;
419 default:
420 break; /* ignore; we do not diff the values */
421 }
422
423 if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) {
424 kcsan_disable_current();
425 pr_err("KCSAN: watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n",
426 is_write ? "write" : "read", size, ptr,
427 watchpoint_slot((unsigned long)ptr),
428 encode_watchpoint((unsigned long)ptr, size, is_write));
429 kcsan_enable_current();
430 }
431
432 /*
433 * Delay this thread, to increase probability of observing a racy
434 * conflicting access.
435 */
436 udelay(get_delay());
437
438 /*
439 * Re-read value, and check if it is as expected; if not, we infer a
440 * racy access.
441 */
Marco Elver81af89e2020-02-11 17:04:22 +0100442 access_mask = get_ctx()->access_mask;
Marco Elverdfd402a2019-11-14 19:02:54 +0100443 switch (size) {
444 case 1:
Marco Elverb738f612020-02-11 17:04:21 +0100445 expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
Marco Elver81af89e2020-02-11 17:04:22 +0100446 if (access_mask)
447 expect_value._1 &= (u8)access_mask;
Marco Elverdfd402a2019-11-14 19:02:54 +0100448 break;
449 case 2:
Marco Elverb738f612020-02-11 17:04:21 +0100450 expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
Marco Elver81af89e2020-02-11 17:04:22 +0100451 if (access_mask)
452 expect_value._2 &= (u16)access_mask;
Marco Elverdfd402a2019-11-14 19:02:54 +0100453 break;
454 case 4:
Marco Elverb738f612020-02-11 17:04:21 +0100455 expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
Marco Elver81af89e2020-02-11 17:04:22 +0100456 if (access_mask)
457 expect_value._4 &= (u32)access_mask;
Marco Elverdfd402a2019-11-14 19:02:54 +0100458 break;
459 case 8:
Marco Elverb738f612020-02-11 17:04:21 +0100460 expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
Marco Elver81af89e2020-02-11 17:04:22 +0100461 if (access_mask)
462 expect_value._8 &= (u64)access_mask;
Marco Elverdfd402a2019-11-14 19:02:54 +0100463 break;
464 default:
465 break; /* ignore; we do not diff the values */
466 }
467
Marco Elverb738f612020-02-11 17:04:21 +0100468 /* Were we able to observe a value-change? */
469 if (expect_value._8 != 0)
470 value_change = KCSAN_VALUE_CHANGE_TRUE;
471
Marco Elverdfd402a2019-11-14 19:02:54 +0100472 /* Check if this access raced with another. */
473 if (!remove_watchpoint(watchpoint)) {
474 /*
Marco Elverb738f612020-02-11 17:04:21 +0100475 * Depending on the access type, map a value_change of MAYBE to
Marco Elver81af89e2020-02-11 17:04:22 +0100476 * TRUE (always report) or FALSE (never report).
Marco Elverb738f612020-02-11 17:04:21 +0100477 */
Marco Elver81af89e2020-02-11 17:04:22 +0100478 if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
479 if (access_mask != 0) {
480 /*
481 * For access with access_mask, we require a
482 * value-change, as it is likely that races on
483 * ~access_mask bits are expected.
484 */
485 value_change = KCSAN_VALUE_CHANGE_FALSE;
486 } else if (size > 8 || is_assert) {
487 /* Always assume a value-change. */
488 value_change = KCSAN_VALUE_CHANGE_TRUE;
489 }
Marco Elverb738f612020-02-11 17:04:21 +0100490 }
491
492 /*
Marco Elverdfd402a2019-11-14 19:02:54 +0100493 * No need to increment 'data_races' counter, as the racing
494 * thread already did.
Marco Elverd591ec32020-02-06 16:46:24 +0100495 *
496 * Count 'assert_failures' for each failed ASSERT access,
497 * therefore both this thread and the racing thread may
498 * increment this counter.
Marco Elverdfd402a2019-11-14 19:02:54 +0100499 */
Marco Elverb738f612020-02-11 17:04:21 +0100500 if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
Marco Elverd591ec32020-02-06 16:46:24 +0100501 kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
502
Marco Elver135c0872020-03-18 18:38:44 +0100503 kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL);
Marco Elverb738f612020-02-11 17:04:21 +0100504 } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
Marco Elverdfd402a2019-11-14 19:02:54 +0100505 /* Inferring a race, since the value should not have changed. */
Marco Elverd591ec32020-02-06 16:46:24 +0100506
Marco Elverdfd402a2019-11-14 19:02:54 +0100507 kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN);
Marco Elverd591ec32020-02-06 16:46:24 +0100508 if (is_assert)
509 kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
510
511 if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
Marco Elverb738f612020-02-11 17:04:21 +0100512 kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
Marco Elverdfd402a2019-11-14 19:02:54 +0100513 KCSAN_REPORT_RACE_UNKNOWN_ORIGIN);
514 }
515
516 kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS);
517out_unlock:
Marco Elver48b1fc12020-02-21 23:02:09 +0100518 if (!kcsan_interrupt_watcher)
519 raw_local_irq_restore(irq_flags);
Marco Elverdfd402a2019-11-14 19:02:54 +0100520out:
521 user_access_restore(ua_flags);
522}
523
524static __always_inline void check_access(const volatile void *ptr, size_t size,
525 int type)
526{
527 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
528 atomic_long_t *watchpoint;
529 long encoded_watchpoint;
530
531 /*
Marco Elvered95f952020-02-05 11:14:19 +0100532 * Do nothing for 0 sized check; this comparison will be optimized out
533 * for constant sized instrumentation (__tsan_{read,write}N).
534 */
535 if (unlikely(size == 0))
536 return;
537
538 /*
Marco Elverdfd402a2019-11-14 19:02:54 +0100539 * Avoid user_access_save in fast-path: find_watchpoint is safe without
540 * user_access_save, as the address that ptr points to is only used to
541 * check if a watchpoint exists; ptr is never dereferenced.
542 */
543 watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
544 &encoded_watchpoint);
545 /*
546 * It is safe to check kcsan_is_enabled() after find_watchpoint in the
Marco Elverd591ec32020-02-06 16:46:24 +0100547 * slow-path, as long as no state changes that cause a race to be
Marco Elverdfd402a2019-11-14 19:02:54 +0100548 * detected and reported have occurred until kcsan_is_enabled() is
549 * checked.
550 */
551
552 if (unlikely(watchpoint != NULL))
Marco Elver47144ec2020-01-10 19:48:33 +0100553 kcsan_found_watchpoint(ptr, size, type, watchpoint,
Marco Elverdfd402a2019-11-14 19:02:54 +0100554 encoded_watchpoint);
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100555 else if (unlikely(should_watch(ptr, size, type)))
Marco Elver47144ec2020-01-10 19:48:33 +0100556 kcsan_setup_watchpoint(ptr, size, type);
Marco Elverdfd402a2019-11-14 19:02:54 +0100557}
558
559/* === Public interface ===================================================== */
560
561void __init kcsan_init(void)
562{
563 BUG_ON(!in_task());
564
565 kcsan_debugfs_init();
566
567 /*
568 * We are in the init task, and no other tasks should be running;
569 * WRITE_ONCE without memory barrier is sufficient.
570 */
Marco Elver80d4c472020-02-07 19:59:10 +0100571 if (kcsan_early_enable)
Marco Elverdfd402a2019-11-14 19:02:54 +0100572 WRITE_ONCE(kcsan_enabled, true);
573}
574
575/* === Exported interface =================================================== */
576
577void kcsan_disable_current(void)
578{
579 ++get_ctx()->disable_count;
580}
581EXPORT_SYMBOL(kcsan_disable_current);
582
583void kcsan_enable_current(void)
584{
585 if (get_ctx()->disable_count-- == 0) {
586 /*
587 * Warn if kcsan_enable_current() calls are unbalanced with
588 * kcsan_disable_current() calls, which causes disable_count to
589 * become negative and should not happen.
590 */
591 kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
592 kcsan_disable_current(); /* disable to generate warning */
593 WARN(1, "Unbalanced %s()", __func__);
594 kcsan_enable_current();
595 }
596}
597EXPORT_SYMBOL(kcsan_enable_current);
598
599void kcsan_nestable_atomic_begin(void)
600{
601 /*
602 * Do *not* check and warn if we are in a flat atomic region: nestable
603 * and flat atomic regions are independent from each other.
604 * See include/linux/kcsan.h: struct kcsan_ctx comments for more
605 * comments.
606 */
607
608 ++get_ctx()->atomic_nest_count;
609}
610EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
611
612void kcsan_nestable_atomic_end(void)
613{
614 if (get_ctx()->atomic_nest_count-- == 0) {
615 /*
616 * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
617 * kcsan_nestable_atomic_begin() calls, which causes
618 * atomic_nest_count to become negative and should not happen.
619 */
620 kcsan_nestable_atomic_begin(); /* restore to 0 */
621 kcsan_disable_current(); /* disable to generate warning */
622 WARN(1, "Unbalanced %s()", __func__);
623 kcsan_enable_current();
624 }
625}
626EXPORT_SYMBOL(kcsan_nestable_atomic_end);
627
628void kcsan_flat_atomic_begin(void)
629{
630 get_ctx()->in_flat_atomic = true;
631}
632EXPORT_SYMBOL(kcsan_flat_atomic_begin);
633
634void kcsan_flat_atomic_end(void)
635{
636 get_ctx()->in_flat_atomic = false;
637}
638EXPORT_SYMBOL(kcsan_flat_atomic_end);
639
640void kcsan_atomic_next(int n)
641{
642 get_ctx()->atomic_next = n;
643}
644EXPORT_SYMBOL(kcsan_atomic_next);
645
Marco Elver81af89e2020-02-11 17:04:22 +0100646void kcsan_set_access_mask(unsigned long mask)
647{
648 get_ctx()->access_mask = mask;
649}
650EXPORT_SYMBOL(kcsan_set_access_mask);
651
Marco Elverdfd402a2019-11-14 19:02:54 +0100652void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
653{
654 check_access(ptr, size, type);
655}
656EXPORT_SYMBOL(__kcsan_check_access);
657
658/*
659 * KCSAN uses the same instrumentation that is emitted by supported compilers
660 * for ThreadSanitizer (TSAN).
661 *
662 * When enabled, the compiler emits instrumentation calls (the functions
663 * prefixed with "__tsan" below) for all loads and stores that it generated;
664 * inline asm is not instrumented.
665 *
666 * Note that, not all supported compiler versions distinguish aligned/unaligned
667 * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
668 * version to the generic version, which can handle both.
669 */
670
671#define DEFINE_TSAN_READ_WRITE(size) \
672 void __tsan_read##size(void *ptr) \
673 { \
674 check_access(ptr, size, 0); \
675 } \
676 EXPORT_SYMBOL(__tsan_read##size); \
677 void __tsan_unaligned_read##size(void *ptr) \
678 __alias(__tsan_read##size); \
679 EXPORT_SYMBOL(__tsan_unaligned_read##size); \
680 void __tsan_write##size(void *ptr) \
681 { \
682 check_access(ptr, size, KCSAN_ACCESS_WRITE); \
683 } \
684 EXPORT_SYMBOL(__tsan_write##size); \
685 void __tsan_unaligned_write##size(void *ptr) \
686 __alias(__tsan_write##size); \
687 EXPORT_SYMBOL(__tsan_unaligned_write##size)
688
689DEFINE_TSAN_READ_WRITE(1);
690DEFINE_TSAN_READ_WRITE(2);
691DEFINE_TSAN_READ_WRITE(4);
692DEFINE_TSAN_READ_WRITE(8);
693DEFINE_TSAN_READ_WRITE(16);
694
695void __tsan_read_range(void *ptr, size_t size)
696{
697 check_access(ptr, size, 0);
698}
699EXPORT_SYMBOL(__tsan_read_range);
700
701void __tsan_write_range(void *ptr, size_t size)
702{
703 check_access(ptr, size, KCSAN_ACCESS_WRITE);
704}
705EXPORT_SYMBOL(__tsan_write_range);
706
707/*
708 * The below are not required by KCSAN, but can still be emitted by the
709 * compiler.
710 */
711void __tsan_func_entry(void *call_pc)
712{
713}
714EXPORT_SYMBOL(__tsan_func_entry);
715void __tsan_func_exit(void)
716{
717}
718EXPORT_SYMBOL(__tsan_func_exit);
719void __tsan_init(void)
720{
721}
722EXPORT_SYMBOL(__tsan_init);