blob: d9410d58c93ecd5abb7580bc8d5349689f2ec093 [file] [log] [blame]
Marco Elverdfd402a2019-11-14 19:02:54 +01001// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/atomic.h>
4#include <linux/bug.h>
5#include <linux/delay.h>
6#include <linux/export.h>
7#include <linux/init.h>
8#include <linux/percpu.h>
9#include <linux/preempt.h>
10#include <linux/random.h>
11#include <linux/sched.h>
12#include <linux/uaccess.h>
13
14#include "atomic.h"
15#include "encoding.h"
16#include "kcsan.h"
17
18bool kcsan_enabled;
19
20/* Per-CPU kcsan_ctx for interrupts */
21static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
22 .disable_count = 0,
23 .atomic_next = 0,
24 .atomic_nest_count = 0,
25 .in_flat_atomic = false,
26};
27
28/*
29 * Helper macros to index into adjacent slots slots, starting from address slot
30 * itself, followed by the right and left slots.
31 *
32 * The purpose is 2-fold:
33 *
34 * 1. if during insertion the address slot is already occupied, check if
35 * any adjacent slots are free;
36 * 2. accesses that straddle a slot boundary due to size that exceeds a
37 * slot's range may check adjacent slots if any watchpoint matches.
38 *
39 * Note that accesses with very large size may still miss a watchpoint; however,
40 * given this should be rare, this is a reasonable trade-off to make, since this
41 * will avoid:
42 *
43 * 1. excessive contention between watchpoint checks and setup;
44 * 2. larger number of simultaneous watchpoints without sacrificing
45 * performance.
46 *
47 * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
48 *
49 * slot=0: [ 1, 2, 0]
50 * slot=9: [10, 11, 9]
51 * slot=63: [64, 65, 63]
52 */
53#define NUM_SLOTS (1 + 2 * KCSAN_CHECK_ADJACENT)
54#define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
55
56/*
57 * SLOT_IDX_FAST is used in fast-path. Not first checking the address's primary
58 * slot (middle) is fine if we assume that data races occur rarely. The set of
59 * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
60 * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
61 */
62#define SLOT_IDX_FAST(slot, i) (slot + i)
63
64/*
65 * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
66 * able to safely update and access a watchpoint without introducing locking
67 * overhead, we encode each watchpoint as a single atomic long. The initial
68 * zero-initialized state matches INVALID_WATCHPOINT.
69 *
70 * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
71 * use more complicated SLOT_IDX_FAST calculation with modulo in fast-path.
72 */
73static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS - 1];
74
75/*
76 * Instructions to skip watching counter, used in should_watch(). We use a
77 * per-CPU counter to avoid excessive contention.
78 */
79static DEFINE_PER_CPU(long, kcsan_skip);
80
81static inline atomic_long_t *find_watchpoint(unsigned long addr, size_t size,
82 bool expect_write,
83 long *encoded_watchpoint)
84{
85 const int slot = watchpoint_slot(addr);
86 const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
87 atomic_long_t *watchpoint;
88 unsigned long wp_addr_masked;
89 size_t wp_size;
90 bool is_write;
91 int i;
92
93 BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
94
95 for (i = 0; i < NUM_SLOTS; ++i) {
96 watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
97 *encoded_watchpoint = atomic_long_read(watchpoint);
98 if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
99 &wp_size, &is_write))
100 continue;
101
102 if (expect_write && !is_write)
103 continue;
104
105 /* Check if the watchpoint matches the access. */
106 if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
107 return watchpoint;
108 }
109
110 return NULL;
111}
112
113static inline atomic_long_t *insert_watchpoint(unsigned long addr, size_t size,
114 bool is_write)
115{
116 const int slot = watchpoint_slot(addr);
117 const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
118 atomic_long_t *watchpoint;
119 int i;
120
121 /* Check slot index logic, ensuring we stay within array bounds. */
122 BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
123 BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT + 1) != 0);
124 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS - 1,
125 KCSAN_CHECK_ADJACENT) !=
126 ARRAY_SIZE(watchpoints) - 1);
127 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS - 1,
128 KCSAN_CHECK_ADJACENT + 1) !=
129 ARRAY_SIZE(watchpoints) - NUM_SLOTS);
130
131 for (i = 0; i < NUM_SLOTS; ++i) {
132 long expect_val = INVALID_WATCHPOINT;
133
134 /* Try to acquire this slot. */
135 watchpoint = &watchpoints[SLOT_IDX(slot, i)];
136 if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val,
137 encoded_watchpoint))
138 return watchpoint;
139 }
140
141 return NULL;
142}
143
144/*
145 * Return true if watchpoint was successfully consumed, false otherwise.
146 *
147 * This may return false if:
148 *
149 * 1. another thread already consumed the watchpoint;
150 * 2. the thread that set up the watchpoint already removed it;
151 * 3. the watchpoint was removed and then re-used.
152 */
153static inline bool try_consume_watchpoint(atomic_long_t *watchpoint,
154 long encoded_watchpoint)
155{
156 return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint,
157 CONSUMED_WATCHPOINT);
158}
159
160/*
161 * Return true if watchpoint was not touched, false if consumed.
162 */
163static inline bool remove_watchpoint(atomic_long_t *watchpoint)
164{
165 return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) !=
166 CONSUMED_WATCHPOINT;
167}
168
169static inline struct kcsan_ctx *get_ctx(void)
170{
171 /*
172 * In interrupt, use raw_cpu_ptr to avoid unnecessary checks, that would
173 * also result in calls that generate warnings in uaccess regions.
174 */
175 return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
176}
177
178static inline bool is_atomic(const volatile void *ptr)
179{
180 struct kcsan_ctx *ctx = get_ctx();
181
182 if (unlikely(ctx->atomic_next > 0)) {
183 /*
184 * Because we do not have separate contexts for nested
185 * interrupts, in case atomic_next is set, we simply assume that
186 * the outer interrupt set atomic_next. In the worst case, we
187 * will conservatively consider operations as atomic. This is a
188 * reasonable trade-off to make, since this case should be
189 * extremely rare; however, even if extremely rare, it could
190 * lead to false positives otherwise.
191 */
192 if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
193 --ctx->atomic_next; /* in task, or outer interrupt */
194 return true;
195 }
196 if (unlikely(ctx->atomic_nest_count > 0 || ctx->in_flat_atomic))
197 return true;
198
199 return kcsan_is_atomic(ptr);
200}
201
202static inline bool should_watch(const volatile void *ptr, int type)
203{
204 /*
205 * Never set up watchpoints when memory operations are atomic.
206 *
207 * Need to check this first, before kcsan_skip check below: (1) atomics
208 * should not count towards skipped instructions, and (2) to actually
209 * decrement kcsan_atomic_next for consecutive instruction stream.
210 */
211 if ((type & KCSAN_ACCESS_ATOMIC) != 0 || is_atomic(ptr))
212 return false;
213
214 if (this_cpu_dec_return(kcsan_skip) >= 0)
215 return false;
216
217 /*
218 * NOTE: If we get here, kcsan_skip must always be reset in slow path
219 * via reset_kcsan_skip() to avoid underflow.
220 */
221
222 /* this operation should be watched */
223 return true;
224}
225
226static inline void reset_kcsan_skip(void)
227{
228 long skip_count = CONFIG_KCSAN_SKIP_WATCH -
229 (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
230 prandom_u32_max(CONFIG_KCSAN_SKIP_WATCH) :
231 0);
232 this_cpu_write(kcsan_skip, skip_count);
233}
234
235static inline bool kcsan_is_enabled(void)
236{
237 return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
238}
239
240static inline unsigned int get_delay(void)
241{
242 unsigned int delay = in_task() ? CONFIG_KCSAN_UDELAY_TASK :
243 CONFIG_KCSAN_UDELAY_INTERRUPT;
244 return delay - (IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
245 prandom_u32_max(delay) :
246 0);
247}
248
249/*
250 * Pull everything together: check_access() below contains the performance
251 * critical operations; the fast-path (including check_access) functions should
252 * all be inlinable by the instrumentation functions.
253 *
254 * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
255 * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
256 * be filtered from the stacktrace, as well as give them unique names for the
257 * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
258 * since they do not access any user memory, but instrumentation is still
259 * emitted in UACCESS regions.
260 */
261
262static noinline void kcsan_found_watchpoint(const volatile void *ptr,
263 size_t size, bool is_write,
264 atomic_long_t *watchpoint,
265 long encoded_watchpoint)
266{
267 unsigned long flags;
268 bool consumed;
269
270 if (!kcsan_is_enabled())
271 return;
272 /*
273 * Consume the watchpoint as soon as possible, to minimize the chances
274 * of !consumed. Consuming the watchpoint must always be guarded by
275 * kcsan_is_enabled() check, as otherwise we might erroneously
276 * triggering reports when disabled.
277 */
278 consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
279
280 /* keep this after try_consume_watchpoint */
281 flags = user_access_save();
282
283 if (consumed) {
284 kcsan_report(ptr, size, is_write, true, raw_smp_processor_id(),
285 KCSAN_REPORT_CONSUMED_WATCHPOINT);
286 } else {
287 /*
288 * The other thread may not print any diagnostics, as it has
289 * already removed the watchpoint, or another thread consumed
290 * the watchpoint before this thread.
291 */
292 kcsan_counter_inc(KCSAN_COUNTER_REPORT_RACES);
293 }
294 kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES);
295
296 user_access_restore(flags);
297}
298
299static noinline void kcsan_setup_watchpoint(const volatile void *ptr,
300 size_t size, bool is_write)
301{
302 atomic_long_t *watchpoint;
303 union {
304 u8 _1;
305 u16 _2;
306 u32 _4;
307 u64 _8;
308 } expect_value;
309 bool value_change = false;
310 unsigned long ua_flags = user_access_save();
311 unsigned long irq_flags;
312
313 /*
314 * Always reset kcsan_skip counter in slow-path to avoid underflow; see
315 * should_watch().
316 */
317 reset_kcsan_skip();
318
319 if (!kcsan_is_enabled())
320 goto out;
321
322 if (!check_encodable((unsigned long)ptr, size)) {
323 kcsan_counter_inc(KCSAN_COUNTER_UNENCODABLE_ACCESSES);
324 goto out;
325 }
326
327 /*
328 * Disable interrupts & preemptions to avoid another thread on the same
329 * CPU accessing memory locations for the set up watchpoint; this is to
330 * avoid reporting races to e.g. CPU-local data.
331 *
332 * An alternative would be adding the source CPU to the watchpoint
333 * encoding, and checking that watchpoint-CPU != this-CPU. There are
334 * several problems with this:
335 * 1. we should avoid stealing more bits from the watchpoint encoding
336 * as it would affect accuracy, as well as increase performance
337 * overhead in the fast-path;
338 * 2. if we are preempted, but there *is* a genuine data race, we
339 * would *not* report it -- since this is the common case (vs.
340 * CPU-local data accesses), it makes more sense (from a data race
341 * detection point of view) to simply disable preemptions to ensure
342 * as many tasks as possible run on other CPUs.
343 */
344 local_irq_save(irq_flags);
345
346 watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
347 if (watchpoint == NULL) {
348 /*
349 * Out of capacity: the size of `watchpoints`, and the frequency
350 * with which `should_watch()` returns true should be tweaked so
351 * that this case happens very rarely.
352 */
353 kcsan_counter_inc(KCSAN_COUNTER_NO_CAPACITY);
354 goto out_unlock;
355 }
356
357 kcsan_counter_inc(KCSAN_COUNTER_SETUP_WATCHPOINTS);
358 kcsan_counter_inc(KCSAN_COUNTER_USED_WATCHPOINTS);
359
360 /*
361 * Read the current value, to later check and infer a race if the data
362 * was modified via a non-instrumented access, e.g. from a device.
363 */
364 switch (size) {
365 case 1:
366 expect_value._1 = READ_ONCE(*(const u8 *)ptr);
367 break;
368 case 2:
369 expect_value._2 = READ_ONCE(*(const u16 *)ptr);
370 break;
371 case 4:
372 expect_value._4 = READ_ONCE(*(const u32 *)ptr);
373 break;
374 case 8:
375 expect_value._8 = READ_ONCE(*(const u64 *)ptr);
376 break;
377 default:
378 break; /* ignore; we do not diff the values */
379 }
380
381 if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) {
382 kcsan_disable_current();
383 pr_err("KCSAN: watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n",
384 is_write ? "write" : "read", size, ptr,
385 watchpoint_slot((unsigned long)ptr),
386 encode_watchpoint((unsigned long)ptr, size, is_write));
387 kcsan_enable_current();
388 }
389
390 /*
391 * Delay this thread, to increase probability of observing a racy
392 * conflicting access.
393 */
394 udelay(get_delay());
395
396 /*
397 * Re-read value, and check if it is as expected; if not, we infer a
398 * racy access.
399 */
400 switch (size) {
401 case 1:
402 value_change = expect_value._1 != READ_ONCE(*(const u8 *)ptr);
403 break;
404 case 2:
405 value_change = expect_value._2 != READ_ONCE(*(const u16 *)ptr);
406 break;
407 case 4:
408 value_change = expect_value._4 != READ_ONCE(*(const u32 *)ptr);
409 break;
410 case 8:
411 value_change = expect_value._8 != READ_ONCE(*(const u64 *)ptr);
412 break;
413 default:
414 break; /* ignore; we do not diff the values */
415 }
416
417 /* Check if this access raced with another. */
418 if (!remove_watchpoint(watchpoint)) {
419 /*
420 * No need to increment 'data_races' counter, as the racing
421 * thread already did.
422 */
423 kcsan_report(ptr, size, is_write, size > 8 || value_change,
424 smp_processor_id(), KCSAN_REPORT_RACE_SIGNAL);
425 } else if (value_change) {
426 /* Inferring a race, since the value should not have changed. */
427 kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN);
428 if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN))
429 kcsan_report(ptr, size, is_write, true,
430 smp_processor_id(),
431 KCSAN_REPORT_RACE_UNKNOWN_ORIGIN);
432 }
433
434 kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS);
435out_unlock:
436 local_irq_restore(irq_flags);
437out:
438 user_access_restore(ua_flags);
439}
440
441static __always_inline void check_access(const volatile void *ptr, size_t size,
442 int type)
443{
444 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
445 atomic_long_t *watchpoint;
446 long encoded_watchpoint;
447
448 /*
449 * Avoid user_access_save in fast-path: find_watchpoint is safe without
450 * user_access_save, as the address that ptr points to is only used to
451 * check if a watchpoint exists; ptr is never dereferenced.
452 */
453 watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
454 &encoded_watchpoint);
455 /*
456 * It is safe to check kcsan_is_enabled() after find_watchpoint in the
457 * slow-path, as long as no state changes that cause a data race to be
458 * detected and reported have occurred until kcsan_is_enabled() is
459 * checked.
460 */
461
462 if (unlikely(watchpoint != NULL))
463 kcsan_found_watchpoint(ptr, size, is_write, watchpoint,
464 encoded_watchpoint);
465 else if (unlikely(should_watch(ptr, type)))
466 kcsan_setup_watchpoint(ptr, size, is_write);
467}
468
469/* === Public interface ===================================================== */
470
471void __init kcsan_init(void)
472{
473 BUG_ON(!in_task());
474
475 kcsan_debugfs_init();
476
477 /*
478 * We are in the init task, and no other tasks should be running;
479 * WRITE_ONCE without memory barrier is sufficient.
480 */
481 if (IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE))
482 WRITE_ONCE(kcsan_enabled, true);
483}
484
485/* === Exported interface =================================================== */
486
487void kcsan_disable_current(void)
488{
489 ++get_ctx()->disable_count;
490}
491EXPORT_SYMBOL(kcsan_disable_current);
492
493void kcsan_enable_current(void)
494{
495 if (get_ctx()->disable_count-- == 0) {
496 /*
497 * Warn if kcsan_enable_current() calls are unbalanced with
498 * kcsan_disable_current() calls, which causes disable_count to
499 * become negative and should not happen.
500 */
501 kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
502 kcsan_disable_current(); /* disable to generate warning */
503 WARN(1, "Unbalanced %s()", __func__);
504 kcsan_enable_current();
505 }
506}
507EXPORT_SYMBOL(kcsan_enable_current);
508
509void kcsan_nestable_atomic_begin(void)
510{
511 /*
512 * Do *not* check and warn if we are in a flat atomic region: nestable
513 * and flat atomic regions are independent from each other.
514 * See include/linux/kcsan.h: struct kcsan_ctx comments for more
515 * comments.
516 */
517
518 ++get_ctx()->atomic_nest_count;
519}
520EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
521
522void kcsan_nestable_atomic_end(void)
523{
524 if (get_ctx()->atomic_nest_count-- == 0) {
525 /*
526 * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
527 * kcsan_nestable_atomic_begin() calls, which causes
528 * atomic_nest_count to become negative and should not happen.
529 */
530 kcsan_nestable_atomic_begin(); /* restore to 0 */
531 kcsan_disable_current(); /* disable to generate warning */
532 WARN(1, "Unbalanced %s()", __func__);
533 kcsan_enable_current();
534 }
535}
536EXPORT_SYMBOL(kcsan_nestable_atomic_end);
537
538void kcsan_flat_atomic_begin(void)
539{
540 get_ctx()->in_flat_atomic = true;
541}
542EXPORT_SYMBOL(kcsan_flat_atomic_begin);
543
544void kcsan_flat_atomic_end(void)
545{
546 get_ctx()->in_flat_atomic = false;
547}
548EXPORT_SYMBOL(kcsan_flat_atomic_end);
549
550void kcsan_atomic_next(int n)
551{
552 get_ctx()->atomic_next = n;
553}
554EXPORT_SYMBOL(kcsan_atomic_next);
555
556void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
557{
558 check_access(ptr, size, type);
559}
560EXPORT_SYMBOL(__kcsan_check_access);
561
562/*
563 * KCSAN uses the same instrumentation that is emitted by supported compilers
564 * for ThreadSanitizer (TSAN).
565 *
566 * When enabled, the compiler emits instrumentation calls (the functions
567 * prefixed with "__tsan" below) for all loads and stores that it generated;
568 * inline asm is not instrumented.
569 *
570 * Note that, not all supported compiler versions distinguish aligned/unaligned
571 * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
572 * version to the generic version, which can handle both.
573 */
574
575#define DEFINE_TSAN_READ_WRITE(size) \
576 void __tsan_read##size(void *ptr) \
577 { \
578 check_access(ptr, size, 0); \
579 } \
580 EXPORT_SYMBOL(__tsan_read##size); \
581 void __tsan_unaligned_read##size(void *ptr) \
582 __alias(__tsan_read##size); \
583 EXPORT_SYMBOL(__tsan_unaligned_read##size); \
584 void __tsan_write##size(void *ptr) \
585 { \
586 check_access(ptr, size, KCSAN_ACCESS_WRITE); \
587 } \
588 EXPORT_SYMBOL(__tsan_write##size); \
589 void __tsan_unaligned_write##size(void *ptr) \
590 __alias(__tsan_write##size); \
591 EXPORT_SYMBOL(__tsan_unaligned_write##size)
592
593DEFINE_TSAN_READ_WRITE(1);
594DEFINE_TSAN_READ_WRITE(2);
595DEFINE_TSAN_READ_WRITE(4);
596DEFINE_TSAN_READ_WRITE(8);
597DEFINE_TSAN_READ_WRITE(16);
598
599void __tsan_read_range(void *ptr, size_t size)
600{
601 check_access(ptr, size, 0);
602}
603EXPORT_SYMBOL(__tsan_read_range);
604
605void __tsan_write_range(void *ptr, size_t size)
606{
607 check_access(ptr, size, KCSAN_ACCESS_WRITE);
608}
609EXPORT_SYMBOL(__tsan_write_range);
610
611/*
612 * The below are not required by KCSAN, but can still be emitted by the
613 * compiler.
614 */
615void __tsan_func_entry(void *call_pc)
616{
617}
618EXPORT_SYMBOL(__tsan_func_entry);
619void __tsan_func_exit(void)
620{
621}
622EXPORT_SYMBOL(__tsan_func_exit);
623void __tsan_init(void)
624{
625}
626EXPORT_SYMBOL(__tsan_init);