blob: 732623c30359f7b64e7de57a453563bcff316fef [file] [log] [blame]
Marco Elverdfd402a2019-11-14 19:02:54 +01001// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/atomic.h>
4#include <linux/bug.h>
5#include <linux/delay.h>
6#include <linux/export.h>
7#include <linux/init.h>
Marco Elver1e6ee2f2020-02-04 18:21:10 +01008#include <linux/kernel.h>
Marco Elver757a4ce2020-03-25 17:41:56 +01009#include <linux/list.h>
Marco Elver80d4c472020-02-07 19:59:10 +010010#include <linux/moduleparam.h>
Marco Elverdfd402a2019-11-14 19:02:54 +010011#include <linux/percpu.h>
12#include <linux/preempt.h>
13#include <linux/random.h>
14#include <linux/sched.h>
15#include <linux/uaccess.h>
16
17#include "atomic.h"
18#include "encoding.h"
19#include "kcsan.h"
20
Marco Elver80d4c472020-02-07 19:59:10 +010021static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
Marco Elver2402d0e2020-02-22 00:10:27 +010022unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
23unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
Marco Elver80d4c472020-02-07 19:59:10 +010024static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
Marco Elver48b1fc12020-02-21 23:02:09 +010025static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
Marco Elver80d4c472020-02-07 19:59:10 +010026
27#ifdef MODULE_PARAM_PREFIX
28#undef MODULE_PARAM_PREFIX
29#endif
30#define MODULE_PARAM_PREFIX "kcsan."
31module_param_named(early_enable, kcsan_early_enable, bool, 0);
32module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
33module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
34module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
Marco Elver48b1fc12020-02-21 23:02:09 +010035module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
Marco Elver80d4c472020-02-07 19:59:10 +010036
Marco Elverdfd402a2019-11-14 19:02:54 +010037bool kcsan_enabled;
38
39/* Per-CPU kcsan_ctx for interrupts */
40static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
Ingo Molnar5cbaefe2019-11-20 10:41:43 +010041 .disable_count = 0,
42 .atomic_next = 0,
43 .atomic_nest_count = 0,
44 .in_flat_atomic = false,
Marco Elver81af89e2020-02-11 17:04:22 +010045 .access_mask = 0,
Marco Elver757a4ce2020-03-25 17:41:56 +010046 .scoped_accesses = {LIST_POISON1, NULL},
Marco Elverdfd402a2019-11-14 19:02:54 +010047};
48
49/*
Qiujun Huange7b34102020-03-05 15:21:07 +010050 * Helper macros to index into adjacent slots, starting from address slot
Marco Elverdfd402a2019-11-14 19:02:54 +010051 * itself, followed by the right and left slots.
52 *
53 * The purpose is 2-fold:
54 *
55 * 1. if during insertion the address slot is already occupied, check if
56 * any adjacent slots are free;
57 * 2. accesses that straddle a slot boundary due to size that exceeds a
58 * slot's range may check adjacent slots if any watchpoint matches.
59 *
60 * Note that accesses with very large size may still miss a watchpoint; however,
61 * given this should be rare, this is a reasonable trade-off to make, since this
62 * will avoid:
63 *
64 * 1. excessive contention between watchpoint checks and setup;
65 * 2. larger number of simultaneous watchpoints without sacrificing
66 * performance.
67 *
68 * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
69 *
70 * slot=0: [ 1, 2, 0]
71 * slot=9: [10, 11, 9]
72 * slot=63: [64, 65, 63]
73 */
Marco Elverdfd402a2019-11-14 19:02:54 +010074#define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
75
76/*
Ingo Molnar5cbaefe2019-11-20 10:41:43 +010077 * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
Marco Elverd591ec32020-02-06 16:46:24 +010078 * slot (middle) is fine if we assume that races occur rarely. The set of
Marco Elverdfd402a2019-11-14 19:02:54 +010079 * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
80 * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
81 */
82#define SLOT_IDX_FAST(slot, i) (slot + i)
83
84/*
85 * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
86 * able to safely update and access a watchpoint without introducing locking
87 * overhead, we encode each watchpoint as a single atomic long. The initial
88 * zero-initialized state matches INVALID_WATCHPOINT.
89 *
90 * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
Ingo Molnar5cbaefe2019-11-20 10:41:43 +010091 * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
Marco Elverdfd402a2019-11-14 19:02:54 +010092 */
Ingo Molnar5cbaefe2019-11-20 10:41:43 +010093static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
Marco Elverdfd402a2019-11-14 19:02:54 +010094
95/*
96 * Instructions to skip watching counter, used in should_watch(). We use a
97 * per-CPU counter to avoid excessive contention.
98 */
99static DEFINE_PER_CPU(long, kcsan_skip);
100
Marco Elver5c361422020-01-07 17:31:04 +0100101static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
102 size_t size,
103 bool expect_write,
104 long *encoded_watchpoint)
Marco Elverdfd402a2019-11-14 19:02:54 +0100105{
106 const int slot = watchpoint_slot(addr);
107 const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
108 atomic_long_t *watchpoint;
109 unsigned long wp_addr_masked;
110 size_t wp_size;
111 bool is_write;
112 int i;
113
114 BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
115
116 for (i = 0; i < NUM_SLOTS; ++i) {
117 watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
118 *encoded_watchpoint = atomic_long_read(watchpoint);
119 if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
120 &wp_size, &is_write))
121 continue;
122
123 if (expect_write && !is_write)
124 continue;
125
126 /* Check if the watchpoint matches the access. */
127 if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
128 return watchpoint;
129 }
130
131 return NULL;
132}
133
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100134static inline atomic_long_t *
135insert_watchpoint(unsigned long addr, size_t size, bool is_write)
Marco Elverdfd402a2019-11-14 19:02:54 +0100136{
137 const int slot = watchpoint_slot(addr);
138 const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
139 atomic_long_t *watchpoint;
140 int i;
141
142 /* Check slot index logic, ensuring we stay within array bounds. */
143 BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100144 BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
145 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
146 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
Marco Elverdfd402a2019-11-14 19:02:54 +0100147
148 for (i = 0; i < NUM_SLOTS; ++i) {
149 long expect_val = INVALID_WATCHPOINT;
150
151 /* Try to acquire this slot. */
152 watchpoint = &watchpoints[SLOT_IDX(slot, i)];
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100153 if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
Marco Elverdfd402a2019-11-14 19:02:54 +0100154 return watchpoint;
155 }
156
157 return NULL;
158}
159
160/*
161 * Return true if watchpoint was successfully consumed, false otherwise.
162 *
163 * This may return false if:
164 *
165 * 1. another thread already consumed the watchpoint;
166 * 2. the thread that set up the watchpoint already removed it;
167 * 3. the watchpoint was removed and then re-used.
168 */
Marco Elver5c361422020-01-07 17:31:04 +0100169static __always_inline bool
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100170try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
Marco Elverdfd402a2019-11-14 19:02:54 +0100171{
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100172 return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
Marco Elverdfd402a2019-11-14 19:02:54 +0100173}
174
Marco Elver61194182020-03-18 18:38:45 +0100175/* Return true if watchpoint was not touched, false if already consumed. */
176static inline bool consume_watchpoint(atomic_long_t *watchpoint)
Marco Elverdfd402a2019-11-14 19:02:54 +0100177{
Marco Elver61194182020-03-18 18:38:45 +0100178 return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT;
179}
180
181/* Remove the watchpoint -- its slot may be reused after. */
182static inline void remove_watchpoint(atomic_long_t *watchpoint)
183{
184 atomic_long_set(watchpoint, INVALID_WATCHPOINT);
Marco Elverdfd402a2019-11-14 19:02:54 +0100185}
186
Marco Elver5c361422020-01-07 17:31:04 +0100187static __always_inline struct kcsan_ctx *get_ctx(void)
Marco Elverdfd402a2019-11-14 19:02:54 +0100188{
189 /*
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100190 * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
Marco Elverdfd402a2019-11-14 19:02:54 +0100191 * also result in calls that generate warnings in uaccess regions.
192 */
193 return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
194}
195
Marco Elver757a4ce2020-03-25 17:41:56 +0100196/* Check scoped accesses; never inline because this is a slow-path! */
197static noinline void kcsan_check_scoped_accesses(void)
198{
199 struct kcsan_ctx *ctx = get_ctx();
200 struct list_head *prev_save = ctx->scoped_accesses.prev;
201 struct kcsan_scoped_access *scoped_access;
202
203 ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
204 list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
205 __kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
206 ctx->scoped_accesses.prev = prev_save;
207}
208
Marco Elver44656d32020-02-25 15:32:58 +0100209/* Rules for generic atomic accesses. Called from fast-path. */
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100210static __always_inline bool
Marco Elver757a4ce2020-03-25 17:41:56 +0100211is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
Marco Elverdfd402a2019-11-14 19:02:54 +0100212{
Marco Elver44656d32020-02-25 15:32:58 +0100213 if (type & KCSAN_ACCESS_ATOMIC)
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100214 return true;
215
Marco Elverd591ec32020-02-06 16:46:24 +0100216 /*
217 * Unless explicitly declared atomic, never consider an assertion access
218 * as atomic. This allows using them also in atomic regions, such as
219 * seqlocks, without implicitly changing their semantics.
220 */
Marco Elver44656d32020-02-25 15:32:58 +0100221 if (type & KCSAN_ACCESS_ASSERT)
Marco Elverd591ec32020-02-06 16:46:24 +0100222 return false;
223
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100224 if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
Marco Elver44656d32020-02-25 15:32:58 +0100225 (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100226 IS_ALIGNED((unsigned long)ptr, size))
227 return true; /* Assume aligned writes up to word size are atomic. */
228
Marco Elver44656d32020-02-25 15:32:58 +0100229 if (ctx->atomic_next > 0) {
Marco Elverdfd402a2019-11-14 19:02:54 +0100230 /*
231 * Because we do not have separate contexts for nested
232 * interrupts, in case atomic_next is set, we simply assume that
233 * the outer interrupt set atomic_next. In the worst case, we
234 * will conservatively consider operations as atomic. This is a
235 * reasonable trade-off to make, since this case should be
236 * extremely rare; however, even if extremely rare, it could
237 * lead to false positives otherwise.
238 */
239 if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
240 --ctx->atomic_next; /* in task, or outer interrupt */
241 return true;
242 }
Marco Elverdfd402a2019-11-14 19:02:54 +0100243
Marco Elver44656d32020-02-25 15:32:58 +0100244 return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
Marco Elverdfd402a2019-11-14 19:02:54 +0100245}
246
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100247static __always_inline bool
Marco Elver757a4ce2020-03-25 17:41:56 +0100248should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
Marco Elverdfd402a2019-11-14 19:02:54 +0100249{
250 /*
251 * Never set up watchpoints when memory operations are atomic.
252 *
253 * Need to check this first, before kcsan_skip check below: (1) atomics
254 * should not count towards skipped instructions, and (2) to actually
255 * decrement kcsan_atomic_next for consecutive instruction stream.
256 */
Marco Elver757a4ce2020-03-25 17:41:56 +0100257 if (is_atomic(ptr, size, type, ctx))
Marco Elverdfd402a2019-11-14 19:02:54 +0100258 return false;
259
260 if (this_cpu_dec_return(kcsan_skip) >= 0)
261 return false;
262
263 /*
264 * NOTE: If we get here, kcsan_skip must always be reset in slow path
265 * via reset_kcsan_skip() to avoid underflow.
266 */
267
268 /* this operation should be watched */
269 return true;
270}
271
272static inline void reset_kcsan_skip(void)
273{
Marco Elver80d4c472020-02-07 19:59:10 +0100274 long skip_count = kcsan_skip_watch -
Marco Elverdfd402a2019-11-14 19:02:54 +0100275 (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
Marco Elver80d4c472020-02-07 19:59:10 +0100276 prandom_u32_max(kcsan_skip_watch) :
Marco Elverdfd402a2019-11-14 19:02:54 +0100277 0);
278 this_cpu_write(kcsan_skip, skip_count);
279}
280
Marco Elver5c361422020-01-07 17:31:04 +0100281static __always_inline bool kcsan_is_enabled(void)
Marco Elverdfd402a2019-11-14 19:02:54 +0100282{
283 return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
284}
285
286static inline unsigned int get_delay(void)
287{
Marco Elver80d4c472020-02-07 19:59:10 +0100288 unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
Marco Elverdfd402a2019-11-14 19:02:54 +0100289 return delay - (IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
290 prandom_u32_max(delay) :
291 0);
292}
293
294/*
295 * Pull everything together: check_access() below contains the performance
296 * critical operations; the fast-path (including check_access) functions should
297 * all be inlinable by the instrumentation functions.
298 *
299 * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
300 * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
301 * be filtered from the stacktrace, as well as give them unique names for the
302 * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
303 * since they do not access any user memory, but instrumentation is still
304 * emitted in UACCESS regions.
305 */
306
307static noinline void kcsan_found_watchpoint(const volatile void *ptr,
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100308 size_t size,
Marco Elver47144ec2020-01-10 19:48:33 +0100309 int type,
Marco Elverdfd402a2019-11-14 19:02:54 +0100310 atomic_long_t *watchpoint,
311 long encoded_watchpoint)
312{
313 unsigned long flags;
314 bool consumed;
315
316 if (!kcsan_is_enabled())
317 return;
Marco Elver81af89e2020-02-11 17:04:22 +0100318
319 /*
320 * The access_mask check relies on value-change comparison. To avoid
321 * reporting a race where e.g. the writer set up the watchpoint, but the
322 * reader has access_mask!=0, we have to ignore the found watchpoint.
323 */
324 if (get_ctx()->access_mask != 0)
325 return;
326
Marco Elverdfd402a2019-11-14 19:02:54 +0100327 /*
328 * Consume the watchpoint as soon as possible, to minimize the chances
329 * of !consumed. Consuming the watchpoint must always be guarded by
330 * kcsan_is_enabled() check, as otherwise we might erroneously
331 * triggering reports when disabled.
332 */
333 consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
334
335 /* keep this after try_consume_watchpoint */
336 flags = user_access_save();
337
338 if (consumed) {
Marco Elver135c0872020-03-18 18:38:44 +0100339 kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE,
Marco Elver61194182020-03-18 18:38:45 +0100340 KCSAN_REPORT_CONSUMED_WATCHPOINT,
341 watchpoint - watchpoints);
Marco Elverdfd402a2019-11-14 19:02:54 +0100342 } else {
343 /*
344 * The other thread may not print any diagnostics, as it has
345 * already removed the watchpoint, or another thread consumed
346 * the watchpoint before this thread.
347 */
348 kcsan_counter_inc(KCSAN_COUNTER_REPORT_RACES);
349 }
Marco Elverd591ec32020-02-06 16:46:24 +0100350
351 if ((type & KCSAN_ACCESS_ASSERT) != 0)
352 kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
353 else
354 kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES);
Marco Elverdfd402a2019-11-14 19:02:54 +0100355
356 user_access_restore(flags);
357}
358
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100359static noinline void
Marco Elver47144ec2020-01-10 19:48:33 +0100360kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
Marco Elverdfd402a2019-11-14 19:02:54 +0100361{
Marco Elver47144ec2020-01-10 19:48:33 +0100362 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
Marco Elverd591ec32020-02-06 16:46:24 +0100363 const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
Marco Elverdfd402a2019-11-14 19:02:54 +0100364 atomic_long_t *watchpoint;
365 union {
366 u8 _1;
367 u16 _2;
368 u32 _4;
369 u64 _8;
370 } expect_value;
Marco Elver81af89e2020-02-11 17:04:22 +0100371 unsigned long access_mask;
Marco Elverb738f612020-02-11 17:04:21 +0100372 enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
Marco Elverdfd402a2019-11-14 19:02:54 +0100373 unsigned long ua_flags = user_access_save();
Marco Elver48b1fc12020-02-21 23:02:09 +0100374 unsigned long irq_flags = 0;
Marco Elverdfd402a2019-11-14 19:02:54 +0100375
376 /*
377 * Always reset kcsan_skip counter in slow-path to avoid underflow; see
378 * should_watch().
379 */
380 reset_kcsan_skip();
381
382 if (!kcsan_is_enabled())
383 goto out;
384
Marco Elver44656d32020-02-25 15:32:58 +0100385 /*
386 * Special atomic rules: unlikely to be true, so we check them here in
387 * the slow-path, and not in the fast-path in is_atomic(). Call after
388 * kcsan_is_enabled(), as we may access memory that is not yet
389 * initialized during early boot.
390 */
391 if (!is_assert && kcsan_is_atomic_special(ptr))
392 goto out;
393
Marco Elverdfd402a2019-11-14 19:02:54 +0100394 if (!check_encodable((unsigned long)ptr, size)) {
395 kcsan_counter_inc(KCSAN_COUNTER_UNENCODABLE_ACCESSES);
396 goto out;
397 }
398
Marco Elver48b1fc12020-02-21 23:02:09 +0100399 if (!kcsan_interrupt_watcher)
Marco Elver248591f2020-06-24 13:32:46 +0200400 local_irq_save(irq_flags);
Marco Elverdfd402a2019-11-14 19:02:54 +0100401
402 watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
403 if (watchpoint == NULL) {
404 /*
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100405 * Out of capacity: the size of 'watchpoints', and the frequency
406 * with which should_watch() returns true should be tweaked so
Marco Elverdfd402a2019-11-14 19:02:54 +0100407 * that this case happens very rarely.
408 */
409 kcsan_counter_inc(KCSAN_COUNTER_NO_CAPACITY);
410 goto out_unlock;
411 }
412
413 kcsan_counter_inc(KCSAN_COUNTER_SETUP_WATCHPOINTS);
414 kcsan_counter_inc(KCSAN_COUNTER_USED_WATCHPOINTS);
415
416 /*
417 * Read the current value, to later check and infer a race if the data
418 * was modified via a non-instrumented access, e.g. from a device.
419 */
Marco Elverb738f612020-02-11 17:04:21 +0100420 expect_value._8 = 0;
Marco Elverdfd402a2019-11-14 19:02:54 +0100421 switch (size) {
422 case 1:
423 expect_value._1 = READ_ONCE(*(const u8 *)ptr);
424 break;
425 case 2:
426 expect_value._2 = READ_ONCE(*(const u16 *)ptr);
427 break;
428 case 4:
429 expect_value._4 = READ_ONCE(*(const u32 *)ptr);
430 break;
431 case 8:
432 expect_value._8 = READ_ONCE(*(const u64 *)ptr);
433 break;
434 default:
435 break; /* ignore; we do not diff the values */
436 }
437
438 if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) {
439 kcsan_disable_current();
440 pr_err("KCSAN: watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n",
441 is_write ? "write" : "read", size, ptr,
442 watchpoint_slot((unsigned long)ptr),
443 encode_watchpoint((unsigned long)ptr, size, is_write));
444 kcsan_enable_current();
445 }
446
447 /*
448 * Delay this thread, to increase probability of observing a racy
449 * conflicting access.
450 */
451 udelay(get_delay());
452
453 /*
454 * Re-read value, and check if it is as expected; if not, we infer a
455 * racy access.
456 */
Marco Elver81af89e2020-02-11 17:04:22 +0100457 access_mask = get_ctx()->access_mask;
Marco Elverdfd402a2019-11-14 19:02:54 +0100458 switch (size) {
459 case 1:
Marco Elverb738f612020-02-11 17:04:21 +0100460 expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
Marco Elver81af89e2020-02-11 17:04:22 +0100461 if (access_mask)
462 expect_value._1 &= (u8)access_mask;
Marco Elverdfd402a2019-11-14 19:02:54 +0100463 break;
464 case 2:
Marco Elverb738f612020-02-11 17:04:21 +0100465 expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
Marco Elver81af89e2020-02-11 17:04:22 +0100466 if (access_mask)
467 expect_value._2 &= (u16)access_mask;
Marco Elverdfd402a2019-11-14 19:02:54 +0100468 break;
469 case 4:
Marco Elverb738f612020-02-11 17:04:21 +0100470 expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
Marco Elver81af89e2020-02-11 17:04:22 +0100471 if (access_mask)
472 expect_value._4 &= (u32)access_mask;
Marco Elverdfd402a2019-11-14 19:02:54 +0100473 break;
474 case 8:
Marco Elverb738f612020-02-11 17:04:21 +0100475 expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
Marco Elver81af89e2020-02-11 17:04:22 +0100476 if (access_mask)
477 expect_value._8 &= (u64)access_mask;
Marco Elverdfd402a2019-11-14 19:02:54 +0100478 break;
479 default:
480 break; /* ignore; we do not diff the values */
481 }
482
Marco Elverb738f612020-02-11 17:04:21 +0100483 /* Were we able to observe a value-change? */
484 if (expect_value._8 != 0)
485 value_change = KCSAN_VALUE_CHANGE_TRUE;
486
Marco Elverdfd402a2019-11-14 19:02:54 +0100487 /* Check if this access raced with another. */
Marco Elver61194182020-03-18 18:38:45 +0100488 if (!consume_watchpoint(watchpoint)) {
Marco Elverdfd402a2019-11-14 19:02:54 +0100489 /*
Marco Elverb738f612020-02-11 17:04:21 +0100490 * Depending on the access type, map a value_change of MAYBE to
Marco Elver81af89e2020-02-11 17:04:22 +0100491 * TRUE (always report) or FALSE (never report).
Marco Elverb738f612020-02-11 17:04:21 +0100492 */
Marco Elver81af89e2020-02-11 17:04:22 +0100493 if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
494 if (access_mask != 0) {
495 /*
496 * For access with access_mask, we require a
497 * value-change, as it is likely that races on
498 * ~access_mask bits are expected.
499 */
500 value_change = KCSAN_VALUE_CHANGE_FALSE;
501 } else if (size > 8 || is_assert) {
502 /* Always assume a value-change. */
503 value_change = KCSAN_VALUE_CHANGE_TRUE;
504 }
Marco Elverb738f612020-02-11 17:04:21 +0100505 }
506
507 /*
Marco Elverdfd402a2019-11-14 19:02:54 +0100508 * No need to increment 'data_races' counter, as the racing
509 * thread already did.
Marco Elverd591ec32020-02-06 16:46:24 +0100510 *
511 * Count 'assert_failures' for each failed ASSERT access,
512 * therefore both this thread and the racing thread may
513 * increment this counter.
Marco Elverdfd402a2019-11-14 19:02:54 +0100514 */
Marco Elverb738f612020-02-11 17:04:21 +0100515 if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
Marco Elverd591ec32020-02-06 16:46:24 +0100516 kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
517
Marco Elver61194182020-03-18 18:38:45 +0100518 kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL,
519 watchpoint - watchpoints);
Marco Elverb738f612020-02-11 17:04:21 +0100520 } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
Marco Elverdfd402a2019-11-14 19:02:54 +0100521 /* Inferring a race, since the value should not have changed. */
Marco Elverd591ec32020-02-06 16:46:24 +0100522
Marco Elverdfd402a2019-11-14 19:02:54 +0100523 kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN);
Marco Elverd591ec32020-02-06 16:46:24 +0100524 if (is_assert)
525 kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
526
527 if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
Marco Elverb738f612020-02-11 17:04:21 +0100528 kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
Marco Elver61194182020-03-18 18:38:45 +0100529 KCSAN_REPORT_RACE_UNKNOWN_ORIGIN,
530 watchpoint - watchpoints);
Marco Elverdfd402a2019-11-14 19:02:54 +0100531 }
532
Marco Elver61194182020-03-18 18:38:45 +0100533 /*
534 * Remove watchpoint; must be after reporting, since the slot may be
535 * reused after this point.
536 */
537 remove_watchpoint(watchpoint);
Marco Elverdfd402a2019-11-14 19:02:54 +0100538 kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS);
539out_unlock:
Marco Elver48b1fc12020-02-21 23:02:09 +0100540 if (!kcsan_interrupt_watcher)
Marco Elver248591f2020-06-24 13:32:46 +0200541 local_irq_restore(irq_flags);
Marco Elverdfd402a2019-11-14 19:02:54 +0100542out:
543 user_access_restore(ua_flags);
544}
545
546static __always_inline void check_access(const volatile void *ptr, size_t size,
547 int type)
548{
549 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
550 atomic_long_t *watchpoint;
551 long encoded_watchpoint;
552
553 /*
Marco Elvered95f952020-02-05 11:14:19 +0100554 * Do nothing for 0 sized check; this comparison will be optimized out
555 * for constant sized instrumentation (__tsan_{read,write}N).
556 */
557 if (unlikely(size == 0))
558 return;
559
560 /*
Marco Elverdfd402a2019-11-14 19:02:54 +0100561 * Avoid user_access_save in fast-path: find_watchpoint is safe without
562 * user_access_save, as the address that ptr points to is only used to
563 * check if a watchpoint exists; ptr is never dereferenced.
564 */
565 watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
566 &encoded_watchpoint);
567 /*
568 * It is safe to check kcsan_is_enabled() after find_watchpoint in the
Marco Elverd591ec32020-02-06 16:46:24 +0100569 * slow-path, as long as no state changes that cause a race to be
Marco Elverdfd402a2019-11-14 19:02:54 +0100570 * detected and reported have occurred until kcsan_is_enabled() is
571 * checked.
572 */
573
574 if (unlikely(watchpoint != NULL))
Marco Elver47144ec2020-01-10 19:48:33 +0100575 kcsan_found_watchpoint(ptr, size, type, watchpoint,
Marco Elverdfd402a2019-11-14 19:02:54 +0100576 encoded_watchpoint);
Marco Elver757a4ce2020-03-25 17:41:56 +0100577 else {
578 struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
579
580 if (unlikely(should_watch(ptr, size, type, ctx)))
581 kcsan_setup_watchpoint(ptr, size, type);
582 else if (unlikely(ctx->scoped_accesses.prev))
583 kcsan_check_scoped_accesses();
584 }
Marco Elverdfd402a2019-11-14 19:02:54 +0100585}
586
587/* === Public interface ===================================================== */
588
589void __init kcsan_init(void)
590{
591 BUG_ON(!in_task());
592
593 kcsan_debugfs_init();
594
595 /*
596 * We are in the init task, and no other tasks should be running;
597 * WRITE_ONCE without memory barrier is sufficient.
598 */
Marco Elver80d4c472020-02-07 19:59:10 +0100599 if (kcsan_early_enable)
Marco Elverdfd402a2019-11-14 19:02:54 +0100600 WRITE_ONCE(kcsan_enabled, true);
601}
602
603/* === Exported interface =================================================== */
604
605void kcsan_disable_current(void)
606{
607 ++get_ctx()->disable_count;
608}
609EXPORT_SYMBOL(kcsan_disable_current);
610
611void kcsan_enable_current(void)
612{
613 if (get_ctx()->disable_count-- == 0) {
614 /*
615 * Warn if kcsan_enable_current() calls are unbalanced with
616 * kcsan_disable_current() calls, which causes disable_count to
617 * become negative and should not happen.
618 */
619 kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
620 kcsan_disable_current(); /* disable to generate warning */
621 WARN(1, "Unbalanced %s()", __func__);
622 kcsan_enable_current();
623 }
624}
625EXPORT_SYMBOL(kcsan_enable_current);
626
Marco Elver19acd032020-04-24 17:47:29 +0200627void kcsan_enable_current_nowarn(void)
628{
629 if (get_ctx()->disable_count-- == 0)
630 kcsan_disable_current();
631}
632EXPORT_SYMBOL(kcsan_enable_current_nowarn);
633
Marco Elverdfd402a2019-11-14 19:02:54 +0100634void kcsan_nestable_atomic_begin(void)
635{
636 /*
637 * Do *not* check and warn if we are in a flat atomic region: nestable
638 * and flat atomic regions are independent from each other.
639 * See include/linux/kcsan.h: struct kcsan_ctx comments for more
640 * comments.
641 */
642
643 ++get_ctx()->atomic_nest_count;
644}
645EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
646
647void kcsan_nestable_atomic_end(void)
648{
649 if (get_ctx()->atomic_nest_count-- == 0) {
650 /*
651 * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
652 * kcsan_nestable_atomic_begin() calls, which causes
653 * atomic_nest_count to become negative and should not happen.
654 */
655 kcsan_nestable_atomic_begin(); /* restore to 0 */
656 kcsan_disable_current(); /* disable to generate warning */
657 WARN(1, "Unbalanced %s()", __func__);
658 kcsan_enable_current();
659 }
660}
661EXPORT_SYMBOL(kcsan_nestable_atomic_end);
662
663void kcsan_flat_atomic_begin(void)
664{
665 get_ctx()->in_flat_atomic = true;
666}
667EXPORT_SYMBOL(kcsan_flat_atomic_begin);
668
669void kcsan_flat_atomic_end(void)
670{
671 get_ctx()->in_flat_atomic = false;
672}
673EXPORT_SYMBOL(kcsan_flat_atomic_end);
674
675void kcsan_atomic_next(int n)
676{
677 get_ctx()->atomic_next = n;
678}
679EXPORT_SYMBOL(kcsan_atomic_next);
680
Marco Elver81af89e2020-02-11 17:04:22 +0100681void kcsan_set_access_mask(unsigned long mask)
682{
683 get_ctx()->access_mask = mask;
684}
685EXPORT_SYMBOL(kcsan_set_access_mask);
686
Marco Elver757a4ce2020-03-25 17:41:56 +0100687struct kcsan_scoped_access *
688kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
689 struct kcsan_scoped_access *sa)
690{
691 struct kcsan_ctx *ctx = get_ctx();
692
693 __kcsan_check_access(ptr, size, type);
694
695 ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
696
697 INIT_LIST_HEAD(&sa->list);
698 sa->ptr = ptr;
699 sa->size = size;
700 sa->type = type;
701
702 if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
703 INIT_LIST_HEAD(&ctx->scoped_accesses);
704 list_add(&sa->list, &ctx->scoped_accesses);
705
706 ctx->disable_count--;
707 return sa;
708}
709EXPORT_SYMBOL(kcsan_begin_scoped_access);
710
711void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
712{
713 struct kcsan_ctx *ctx = get_ctx();
714
715 if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__))
716 return;
717
718 ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
719
720 list_del(&sa->list);
721 if (list_empty(&ctx->scoped_accesses))
722 /*
723 * Ensure we do not enter kcsan_check_scoped_accesses()
724 * slow-path if unnecessary, and avoids requiring list_empty()
725 * in the fast-path (to avoid a READ_ONCE() and potential
726 * uaccess warning).
727 */
728 ctx->scoped_accesses.prev = NULL;
729
730 ctx->disable_count--;
731
732 __kcsan_check_access(sa->ptr, sa->size, sa->type);
733}
734EXPORT_SYMBOL(kcsan_end_scoped_access);
735
Marco Elverdfd402a2019-11-14 19:02:54 +0100736void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
737{
738 check_access(ptr, size, type);
739}
740EXPORT_SYMBOL(__kcsan_check_access);
741
742/*
743 * KCSAN uses the same instrumentation that is emitted by supported compilers
744 * for ThreadSanitizer (TSAN).
745 *
746 * When enabled, the compiler emits instrumentation calls (the functions
747 * prefixed with "__tsan" below) for all loads and stores that it generated;
748 * inline asm is not instrumented.
749 *
750 * Note that, not all supported compiler versions distinguish aligned/unaligned
751 * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
752 * version to the generic version, which can handle both.
753 */
754
755#define DEFINE_TSAN_READ_WRITE(size) \
756 void __tsan_read##size(void *ptr) \
757 { \
758 check_access(ptr, size, 0); \
759 } \
760 EXPORT_SYMBOL(__tsan_read##size); \
761 void __tsan_unaligned_read##size(void *ptr) \
762 __alias(__tsan_read##size); \
763 EXPORT_SYMBOL(__tsan_unaligned_read##size); \
764 void __tsan_write##size(void *ptr) \
765 { \
766 check_access(ptr, size, KCSAN_ACCESS_WRITE); \
767 } \
768 EXPORT_SYMBOL(__tsan_write##size); \
769 void __tsan_unaligned_write##size(void *ptr) \
770 __alias(__tsan_write##size); \
771 EXPORT_SYMBOL(__tsan_unaligned_write##size)
772
773DEFINE_TSAN_READ_WRITE(1);
774DEFINE_TSAN_READ_WRITE(2);
775DEFINE_TSAN_READ_WRITE(4);
776DEFINE_TSAN_READ_WRITE(8);
777DEFINE_TSAN_READ_WRITE(16);
778
779void __tsan_read_range(void *ptr, size_t size)
780{
781 check_access(ptr, size, 0);
782}
783EXPORT_SYMBOL(__tsan_read_range);
784
785void __tsan_write_range(void *ptr, size_t size)
786{
787 check_access(ptr, size, KCSAN_ACCESS_WRITE);
788}
789EXPORT_SYMBOL(__tsan_write_range);
790
791/*
Marco Elver75d75b72020-05-21 16:20:39 +0200792 * Use of explicit volatile is generally disallowed [1], however, volatile is
793 * still used in various concurrent context, whether in low-level
794 * synchronization primitives or for legacy reasons.
795 * [1] https://lwn.net/Articles/233479/
796 *
797 * We only consider volatile accesses atomic if they are aligned and would pass
798 * the size-check of compiletime_assert_rwonce_type().
799 */
800#define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \
801 void __tsan_volatile_read##size(void *ptr) \
802 { \
803 const bool is_atomic = size <= sizeof(long long) && \
804 IS_ALIGNED((unsigned long)ptr, size); \
805 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
806 return; \
807 check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \
808 } \
809 EXPORT_SYMBOL(__tsan_volatile_read##size); \
810 void __tsan_unaligned_volatile_read##size(void *ptr) \
811 __alias(__tsan_volatile_read##size); \
812 EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \
813 void __tsan_volatile_write##size(void *ptr) \
814 { \
815 const bool is_atomic = size <= sizeof(long long) && \
816 IS_ALIGNED((unsigned long)ptr, size); \
817 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
818 return; \
819 check_access(ptr, size, \
820 KCSAN_ACCESS_WRITE | \
821 (is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \
822 } \
823 EXPORT_SYMBOL(__tsan_volatile_write##size); \
824 void __tsan_unaligned_volatile_write##size(void *ptr) \
825 __alias(__tsan_volatile_write##size); \
826 EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
827
828DEFINE_TSAN_VOLATILE_READ_WRITE(1);
829DEFINE_TSAN_VOLATILE_READ_WRITE(2);
830DEFINE_TSAN_VOLATILE_READ_WRITE(4);
831DEFINE_TSAN_VOLATILE_READ_WRITE(8);
832DEFINE_TSAN_VOLATILE_READ_WRITE(16);
833
834/*
Marco Elverdfd402a2019-11-14 19:02:54 +0100835 * The below are not required by KCSAN, but can still be emitted by the
836 * compiler.
837 */
838void __tsan_func_entry(void *call_pc)
839{
840}
841EXPORT_SYMBOL(__tsan_func_entry);
842void __tsan_func_exit(void)
843{
844}
845EXPORT_SYMBOL(__tsan_func_exit);
846void __tsan_init(void)
847{
848}
849EXPORT_SYMBOL(__tsan_init);