blob: 87ef01e40199d196f5efa3aebeb428d46443147d [file] [log] [blame]
Marco Elverdfd402a2019-11-14 19:02:54 +01001// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/atomic.h>
4#include <linux/bug.h>
5#include <linux/delay.h>
6#include <linux/export.h>
7#include <linux/init.h>
Marco Elver1e6ee2f2020-02-04 18:21:10 +01008#include <linux/kernel.h>
Marco Elverdfd402a2019-11-14 19:02:54 +01009#include <linux/percpu.h>
10#include <linux/preempt.h>
11#include <linux/random.h>
12#include <linux/sched.h>
13#include <linux/uaccess.h>
14
15#include "atomic.h"
16#include "encoding.h"
17#include "kcsan.h"
18
19bool kcsan_enabled;
20
21/* Per-CPU kcsan_ctx for interrupts */
22static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
Ingo Molnar5cbaefe2019-11-20 10:41:43 +010023 .disable_count = 0,
24 .atomic_next = 0,
25 .atomic_nest_count = 0,
26 .in_flat_atomic = false,
Marco Elverdfd402a2019-11-14 19:02:54 +010027};
28
29/*
30 * Helper macros to index into adjacent slots slots, starting from address slot
31 * itself, followed by the right and left slots.
32 *
33 * The purpose is 2-fold:
34 *
35 * 1. if during insertion the address slot is already occupied, check if
36 * any adjacent slots are free;
37 * 2. accesses that straddle a slot boundary due to size that exceeds a
38 * slot's range may check adjacent slots if any watchpoint matches.
39 *
40 * Note that accesses with very large size may still miss a watchpoint; however,
41 * given this should be rare, this is a reasonable trade-off to make, since this
42 * will avoid:
43 *
44 * 1. excessive contention between watchpoint checks and setup;
45 * 2. larger number of simultaneous watchpoints without sacrificing
46 * performance.
47 *
48 * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
49 *
50 * slot=0: [ 1, 2, 0]
51 * slot=9: [10, 11, 9]
52 * slot=63: [64, 65, 63]
53 */
Ingo Molnar5cbaefe2019-11-20 10:41:43 +010054#define NUM_SLOTS (1 + 2*KCSAN_CHECK_ADJACENT)
Marco Elverdfd402a2019-11-14 19:02:54 +010055#define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
56
57/*
Ingo Molnar5cbaefe2019-11-20 10:41:43 +010058 * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
Marco Elverd591ec32020-02-06 16:46:24 +010059 * slot (middle) is fine if we assume that races occur rarely. The set of
Marco Elverdfd402a2019-11-14 19:02:54 +010060 * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
61 * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
62 */
63#define SLOT_IDX_FAST(slot, i) (slot + i)
64
65/*
66 * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
67 * able to safely update and access a watchpoint without introducing locking
68 * overhead, we encode each watchpoint as a single atomic long. The initial
69 * zero-initialized state matches INVALID_WATCHPOINT.
70 *
71 * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
Ingo Molnar5cbaefe2019-11-20 10:41:43 +010072 * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
Marco Elverdfd402a2019-11-14 19:02:54 +010073 */
Ingo Molnar5cbaefe2019-11-20 10:41:43 +010074static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
Marco Elverdfd402a2019-11-14 19:02:54 +010075
76/*
77 * Instructions to skip watching counter, used in should_watch(). We use a
78 * per-CPU counter to avoid excessive contention.
79 */
80static DEFINE_PER_CPU(long, kcsan_skip);
81
Marco Elver5c361422020-01-07 17:31:04 +010082static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
83 size_t size,
84 bool expect_write,
85 long *encoded_watchpoint)
Marco Elverdfd402a2019-11-14 19:02:54 +010086{
87 const int slot = watchpoint_slot(addr);
88 const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
89 atomic_long_t *watchpoint;
90 unsigned long wp_addr_masked;
91 size_t wp_size;
92 bool is_write;
93 int i;
94
95 BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
96
97 for (i = 0; i < NUM_SLOTS; ++i) {
98 watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
99 *encoded_watchpoint = atomic_long_read(watchpoint);
100 if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
101 &wp_size, &is_write))
102 continue;
103
104 if (expect_write && !is_write)
105 continue;
106
107 /* Check if the watchpoint matches the access. */
108 if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
109 return watchpoint;
110 }
111
112 return NULL;
113}
114
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100115static inline atomic_long_t *
116insert_watchpoint(unsigned long addr, size_t size, bool is_write)
Marco Elverdfd402a2019-11-14 19:02:54 +0100117{
118 const int slot = watchpoint_slot(addr);
119 const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
120 atomic_long_t *watchpoint;
121 int i;
122
123 /* Check slot index logic, ensuring we stay within array bounds. */
124 BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100125 BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
126 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
127 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
Marco Elverdfd402a2019-11-14 19:02:54 +0100128
129 for (i = 0; i < NUM_SLOTS; ++i) {
130 long expect_val = INVALID_WATCHPOINT;
131
132 /* Try to acquire this slot. */
133 watchpoint = &watchpoints[SLOT_IDX(slot, i)];
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100134 if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
Marco Elverdfd402a2019-11-14 19:02:54 +0100135 return watchpoint;
136 }
137
138 return NULL;
139}
140
141/*
142 * Return true if watchpoint was successfully consumed, false otherwise.
143 *
144 * This may return false if:
145 *
146 * 1. another thread already consumed the watchpoint;
147 * 2. the thread that set up the watchpoint already removed it;
148 * 3. the watchpoint was removed and then re-used.
149 */
Marco Elver5c361422020-01-07 17:31:04 +0100150static __always_inline bool
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100151try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
Marco Elverdfd402a2019-11-14 19:02:54 +0100152{
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100153 return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
Marco Elverdfd402a2019-11-14 19:02:54 +0100154}
155
156/*
157 * Return true if watchpoint was not touched, false if consumed.
158 */
159static inline bool remove_watchpoint(atomic_long_t *watchpoint)
160{
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100161 return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) != CONSUMED_WATCHPOINT;
Marco Elverdfd402a2019-11-14 19:02:54 +0100162}
163
Marco Elver5c361422020-01-07 17:31:04 +0100164static __always_inline struct kcsan_ctx *get_ctx(void)
Marco Elverdfd402a2019-11-14 19:02:54 +0100165{
166 /*
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100167 * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
Marco Elverdfd402a2019-11-14 19:02:54 +0100168 * also result in calls that generate warnings in uaccess regions.
169 */
170 return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
171}
172
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100173static __always_inline bool
174is_atomic(const volatile void *ptr, size_t size, int type)
Marco Elverdfd402a2019-11-14 19:02:54 +0100175{
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100176 struct kcsan_ctx *ctx;
Marco Elverdfd402a2019-11-14 19:02:54 +0100177
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100178 if ((type & KCSAN_ACCESS_ATOMIC) != 0)
179 return true;
180
Marco Elverd591ec32020-02-06 16:46:24 +0100181 /*
182 * Unless explicitly declared atomic, never consider an assertion access
183 * as atomic. This allows using them also in atomic regions, such as
184 * seqlocks, without implicitly changing their semantics.
185 */
186 if ((type & KCSAN_ACCESS_ASSERT) != 0)
187 return false;
188
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100189 if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
190 (type & KCSAN_ACCESS_WRITE) != 0 && size <= sizeof(long) &&
191 IS_ALIGNED((unsigned long)ptr, size))
192 return true; /* Assume aligned writes up to word size are atomic. */
193
194 ctx = get_ctx();
Marco Elverdfd402a2019-11-14 19:02:54 +0100195 if (unlikely(ctx->atomic_next > 0)) {
196 /*
197 * Because we do not have separate contexts for nested
198 * interrupts, in case atomic_next is set, we simply assume that
199 * the outer interrupt set atomic_next. In the worst case, we
200 * will conservatively consider operations as atomic. This is a
201 * reasonable trade-off to make, since this case should be
202 * extremely rare; however, even if extremely rare, it could
203 * lead to false positives otherwise.
204 */
205 if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
206 --ctx->atomic_next; /* in task, or outer interrupt */
207 return true;
208 }
209 if (unlikely(ctx->atomic_nest_count > 0 || ctx->in_flat_atomic))
210 return true;
211
212 return kcsan_is_atomic(ptr);
213}
214
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100215static __always_inline bool
216should_watch(const volatile void *ptr, size_t size, int type)
Marco Elverdfd402a2019-11-14 19:02:54 +0100217{
218 /*
219 * Never set up watchpoints when memory operations are atomic.
220 *
221 * Need to check this first, before kcsan_skip check below: (1) atomics
222 * should not count towards skipped instructions, and (2) to actually
223 * decrement kcsan_atomic_next for consecutive instruction stream.
224 */
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100225 if (is_atomic(ptr, size, type))
Marco Elverdfd402a2019-11-14 19:02:54 +0100226 return false;
227
228 if (this_cpu_dec_return(kcsan_skip) >= 0)
229 return false;
230
231 /*
232 * NOTE: If we get here, kcsan_skip must always be reset in slow path
233 * via reset_kcsan_skip() to avoid underflow.
234 */
235
236 /* this operation should be watched */
237 return true;
238}
239
240static inline void reset_kcsan_skip(void)
241{
242 long skip_count = CONFIG_KCSAN_SKIP_WATCH -
243 (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
244 prandom_u32_max(CONFIG_KCSAN_SKIP_WATCH) :
245 0);
246 this_cpu_write(kcsan_skip, skip_count);
247}
248
Marco Elver5c361422020-01-07 17:31:04 +0100249static __always_inline bool kcsan_is_enabled(void)
Marco Elverdfd402a2019-11-14 19:02:54 +0100250{
251 return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
252}
253
254static inline unsigned int get_delay(void)
255{
256 unsigned int delay = in_task() ? CONFIG_KCSAN_UDELAY_TASK :
257 CONFIG_KCSAN_UDELAY_INTERRUPT;
258 return delay - (IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
259 prandom_u32_max(delay) :
260 0);
261}
262
263/*
264 * Pull everything together: check_access() below contains the performance
265 * critical operations; the fast-path (including check_access) functions should
266 * all be inlinable by the instrumentation functions.
267 *
268 * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
269 * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
270 * be filtered from the stacktrace, as well as give them unique names for the
271 * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
272 * since they do not access any user memory, but instrumentation is still
273 * emitted in UACCESS regions.
274 */
275
276static noinline void kcsan_found_watchpoint(const volatile void *ptr,
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100277 size_t size,
Marco Elver47144ec2020-01-10 19:48:33 +0100278 int type,
Marco Elverdfd402a2019-11-14 19:02:54 +0100279 atomic_long_t *watchpoint,
280 long encoded_watchpoint)
281{
282 unsigned long flags;
283 bool consumed;
284
285 if (!kcsan_is_enabled())
286 return;
287 /*
288 * Consume the watchpoint as soon as possible, to minimize the chances
289 * of !consumed. Consuming the watchpoint must always be guarded by
290 * kcsan_is_enabled() check, as otherwise we might erroneously
291 * triggering reports when disabled.
292 */
293 consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
294
295 /* keep this after try_consume_watchpoint */
296 flags = user_access_save();
297
298 if (consumed) {
Marco Elver47144ec2020-01-10 19:48:33 +0100299 kcsan_report(ptr, size, type, true, raw_smp_processor_id(),
Marco Elverdfd402a2019-11-14 19:02:54 +0100300 KCSAN_REPORT_CONSUMED_WATCHPOINT);
301 } else {
302 /*
303 * The other thread may not print any diagnostics, as it has
304 * already removed the watchpoint, or another thread consumed
305 * the watchpoint before this thread.
306 */
307 kcsan_counter_inc(KCSAN_COUNTER_REPORT_RACES);
308 }
Marco Elverd591ec32020-02-06 16:46:24 +0100309
310 if ((type & KCSAN_ACCESS_ASSERT) != 0)
311 kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
312 else
313 kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES);
Marco Elverdfd402a2019-11-14 19:02:54 +0100314
315 user_access_restore(flags);
316}
317
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100318static noinline void
Marco Elver47144ec2020-01-10 19:48:33 +0100319kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
Marco Elverdfd402a2019-11-14 19:02:54 +0100320{
Marco Elver47144ec2020-01-10 19:48:33 +0100321 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
Marco Elverd591ec32020-02-06 16:46:24 +0100322 const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
Marco Elverdfd402a2019-11-14 19:02:54 +0100323 atomic_long_t *watchpoint;
324 union {
325 u8 _1;
326 u16 _2;
327 u32 _4;
328 u64 _8;
329 } expect_value;
330 bool value_change = false;
331 unsigned long ua_flags = user_access_save();
332 unsigned long irq_flags;
333
334 /*
335 * Always reset kcsan_skip counter in slow-path to avoid underflow; see
336 * should_watch().
337 */
338 reset_kcsan_skip();
339
340 if (!kcsan_is_enabled())
341 goto out;
342
343 if (!check_encodable((unsigned long)ptr, size)) {
344 kcsan_counter_inc(KCSAN_COUNTER_UNENCODABLE_ACCESSES);
345 goto out;
346 }
347
348 /*
349 * Disable interrupts & preemptions to avoid another thread on the same
350 * CPU accessing memory locations for the set up watchpoint; this is to
351 * avoid reporting races to e.g. CPU-local data.
352 *
353 * An alternative would be adding the source CPU to the watchpoint
354 * encoding, and checking that watchpoint-CPU != this-CPU. There are
355 * several problems with this:
356 * 1. we should avoid stealing more bits from the watchpoint encoding
357 * as it would affect accuracy, as well as increase performance
358 * overhead in the fast-path;
359 * 2. if we are preempted, but there *is* a genuine data race, we
360 * would *not* report it -- since this is the common case (vs.
361 * CPU-local data accesses), it makes more sense (from a data race
362 * detection point of view) to simply disable preemptions to ensure
363 * as many tasks as possible run on other CPUs.
Marco Elverf1bc9622020-01-15 17:25:12 +0100364 *
365 * Use raw versions, to avoid lockdep recursion via IRQ flags tracing.
Marco Elverdfd402a2019-11-14 19:02:54 +0100366 */
Marco Elverf1bc9622020-01-15 17:25:12 +0100367 raw_local_irq_save(irq_flags);
Marco Elverdfd402a2019-11-14 19:02:54 +0100368
369 watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
370 if (watchpoint == NULL) {
371 /*
Ingo Molnar5cbaefe2019-11-20 10:41:43 +0100372 * Out of capacity: the size of 'watchpoints', and the frequency
373 * with which should_watch() returns true should be tweaked so
Marco Elverdfd402a2019-11-14 19:02:54 +0100374 * that this case happens very rarely.
375 */
376 kcsan_counter_inc(KCSAN_COUNTER_NO_CAPACITY);
377 goto out_unlock;
378 }
379
380 kcsan_counter_inc(KCSAN_COUNTER_SETUP_WATCHPOINTS);
381 kcsan_counter_inc(KCSAN_COUNTER_USED_WATCHPOINTS);
382
383 /*
384 * Read the current value, to later check and infer a race if the data
385 * was modified via a non-instrumented access, e.g. from a device.
386 */
387 switch (size) {
388 case 1:
389 expect_value._1 = READ_ONCE(*(const u8 *)ptr);
390 break;
391 case 2:
392 expect_value._2 = READ_ONCE(*(const u16 *)ptr);
393 break;
394 case 4:
395 expect_value._4 = READ_ONCE(*(const u32 *)ptr);
396 break;
397 case 8:
398 expect_value._8 = READ_ONCE(*(const u64 *)ptr);
399 break;
400 default:
401 break; /* ignore; we do not diff the values */
402 }
403
404 if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) {
405 kcsan_disable_current();
406 pr_err("KCSAN: watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n",
407 is_write ? "write" : "read", size, ptr,
408 watchpoint_slot((unsigned long)ptr),
409 encode_watchpoint((unsigned long)ptr, size, is_write));
410 kcsan_enable_current();
411 }
412
413 /*
414 * Delay this thread, to increase probability of observing a racy
415 * conflicting access.
416 */
417 udelay(get_delay());
418
419 /*
420 * Re-read value, and check if it is as expected; if not, we infer a
421 * racy access.
422 */
423 switch (size) {
424 case 1:
425 value_change = expect_value._1 != READ_ONCE(*(const u8 *)ptr);
426 break;
427 case 2:
428 value_change = expect_value._2 != READ_ONCE(*(const u16 *)ptr);
429 break;
430 case 4:
431 value_change = expect_value._4 != READ_ONCE(*(const u32 *)ptr);
432 break;
433 case 8:
434 value_change = expect_value._8 != READ_ONCE(*(const u64 *)ptr);
435 break;
436 default:
437 break; /* ignore; we do not diff the values */
438 }
439
440 /* Check if this access raced with another. */
441 if (!remove_watchpoint(watchpoint)) {
442 /*
443 * No need to increment 'data_races' counter, as the racing
444 * thread already did.
Marco Elverd591ec32020-02-06 16:46:24 +0100445 *
446 * Count 'assert_failures' for each failed ASSERT access,
447 * therefore both this thread and the racing thread may
448 * increment this counter.
Marco Elverdfd402a2019-11-14 19:02:54 +0100449 */
Marco Elverd591ec32020-02-06 16:46:24 +0100450 if (is_assert)
451 kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
452
453 /*
454 * - If we were not able to observe a value change due to size
455 * constraints, always assume a value change.
456 * - If the access type is an assertion, we also always assume a
457 * value change to always report the race.
458 */
459 value_change = value_change || size > 8 || is_assert;
460
461 kcsan_report(ptr, size, type, value_change, smp_processor_id(),
462 KCSAN_REPORT_RACE_SIGNAL);
Marco Elverdfd402a2019-11-14 19:02:54 +0100463 } else if (value_change) {
464 /* Inferring a race, since the value should not have changed. */
Marco Elverd591ec32020-02-06 16:46:24 +0100465
Marco Elverdfd402a2019-11-14 19:02:54 +0100466 kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN);
Marco Elverd591ec32020-02-06 16:46:24 +0100467 if (is_assert)
468 kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
469
470 if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
Marco Elver47144ec2020-01-10 19:48:33 +0100471 kcsan_report(ptr, size, type, true,
Marco Elverdfd402a2019-11-14 19:02:54 +0100472 smp_processor_id(),
473 KCSAN_REPORT_RACE_UNKNOWN_ORIGIN);
474 }
475
476 kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS);
477out_unlock:
Marco Elverf1bc9622020-01-15 17:25:12 +0100478 raw_local_irq_restore(irq_flags);
Marco Elverdfd402a2019-11-14 19:02:54 +0100479out:
480 user_access_restore(ua_flags);
481}
482
483static __always_inline void check_access(const volatile void *ptr, size_t size,
484 int type)
485{
486 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
487 atomic_long_t *watchpoint;
488 long encoded_watchpoint;
489
490 /*
Marco Elvered95f952020-02-05 11:14:19 +0100491 * Do nothing for 0 sized check; this comparison will be optimized out
492 * for constant sized instrumentation (__tsan_{read,write}N).
493 */
494 if (unlikely(size == 0))
495 return;
496
497 /*
Marco Elverdfd402a2019-11-14 19:02:54 +0100498 * Avoid user_access_save in fast-path: find_watchpoint is safe without
499 * user_access_save, as the address that ptr points to is only used to
500 * check if a watchpoint exists; ptr is never dereferenced.
501 */
502 watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
503 &encoded_watchpoint);
504 /*
505 * It is safe to check kcsan_is_enabled() after find_watchpoint in the
Marco Elverd591ec32020-02-06 16:46:24 +0100506 * slow-path, as long as no state changes that cause a race to be
Marco Elverdfd402a2019-11-14 19:02:54 +0100507 * detected and reported have occurred until kcsan_is_enabled() is
508 * checked.
509 */
510
511 if (unlikely(watchpoint != NULL))
Marco Elver47144ec2020-01-10 19:48:33 +0100512 kcsan_found_watchpoint(ptr, size, type, watchpoint,
Marco Elverdfd402a2019-11-14 19:02:54 +0100513 encoded_watchpoint);
Marco Elver1e6ee2f2020-02-04 18:21:10 +0100514 else if (unlikely(should_watch(ptr, size, type)))
Marco Elver47144ec2020-01-10 19:48:33 +0100515 kcsan_setup_watchpoint(ptr, size, type);
Marco Elverdfd402a2019-11-14 19:02:54 +0100516}
517
518/* === Public interface ===================================================== */
519
520void __init kcsan_init(void)
521{
522 BUG_ON(!in_task());
523
524 kcsan_debugfs_init();
525
526 /*
527 * We are in the init task, and no other tasks should be running;
528 * WRITE_ONCE without memory barrier is sufficient.
529 */
530 if (IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE))
531 WRITE_ONCE(kcsan_enabled, true);
532}
533
534/* === Exported interface =================================================== */
535
536void kcsan_disable_current(void)
537{
538 ++get_ctx()->disable_count;
539}
540EXPORT_SYMBOL(kcsan_disable_current);
541
542void kcsan_enable_current(void)
543{
544 if (get_ctx()->disable_count-- == 0) {
545 /*
546 * Warn if kcsan_enable_current() calls are unbalanced with
547 * kcsan_disable_current() calls, which causes disable_count to
548 * become negative and should not happen.
549 */
550 kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
551 kcsan_disable_current(); /* disable to generate warning */
552 WARN(1, "Unbalanced %s()", __func__);
553 kcsan_enable_current();
554 }
555}
556EXPORT_SYMBOL(kcsan_enable_current);
557
558void kcsan_nestable_atomic_begin(void)
559{
560 /*
561 * Do *not* check and warn if we are in a flat atomic region: nestable
562 * and flat atomic regions are independent from each other.
563 * See include/linux/kcsan.h: struct kcsan_ctx comments for more
564 * comments.
565 */
566
567 ++get_ctx()->atomic_nest_count;
568}
569EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
570
571void kcsan_nestable_atomic_end(void)
572{
573 if (get_ctx()->atomic_nest_count-- == 0) {
574 /*
575 * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
576 * kcsan_nestable_atomic_begin() calls, which causes
577 * atomic_nest_count to become negative and should not happen.
578 */
579 kcsan_nestable_atomic_begin(); /* restore to 0 */
580 kcsan_disable_current(); /* disable to generate warning */
581 WARN(1, "Unbalanced %s()", __func__);
582 kcsan_enable_current();
583 }
584}
585EXPORT_SYMBOL(kcsan_nestable_atomic_end);
586
587void kcsan_flat_atomic_begin(void)
588{
589 get_ctx()->in_flat_atomic = true;
590}
591EXPORT_SYMBOL(kcsan_flat_atomic_begin);
592
593void kcsan_flat_atomic_end(void)
594{
595 get_ctx()->in_flat_atomic = false;
596}
597EXPORT_SYMBOL(kcsan_flat_atomic_end);
598
599void kcsan_atomic_next(int n)
600{
601 get_ctx()->atomic_next = n;
602}
603EXPORT_SYMBOL(kcsan_atomic_next);
604
605void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
606{
607 check_access(ptr, size, type);
608}
609EXPORT_SYMBOL(__kcsan_check_access);
610
611/*
612 * KCSAN uses the same instrumentation that is emitted by supported compilers
613 * for ThreadSanitizer (TSAN).
614 *
615 * When enabled, the compiler emits instrumentation calls (the functions
616 * prefixed with "__tsan" below) for all loads and stores that it generated;
617 * inline asm is not instrumented.
618 *
619 * Note that, not all supported compiler versions distinguish aligned/unaligned
620 * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
621 * version to the generic version, which can handle both.
622 */
623
624#define DEFINE_TSAN_READ_WRITE(size) \
625 void __tsan_read##size(void *ptr) \
626 { \
627 check_access(ptr, size, 0); \
628 } \
629 EXPORT_SYMBOL(__tsan_read##size); \
630 void __tsan_unaligned_read##size(void *ptr) \
631 __alias(__tsan_read##size); \
632 EXPORT_SYMBOL(__tsan_unaligned_read##size); \
633 void __tsan_write##size(void *ptr) \
634 { \
635 check_access(ptr, size, KCSAN_ACCESS_WRITE); \
636 } \
637 EXPORT_SYMBOL(__tsan_write##size); \
638 void __tsan_unaligned_write##size(void *ptr) \
639 __alias(__tsan_write##size); \
640 EXPORT_SYMBOL(__tsan_unaligned_write##size)
641
642DEFINE_TSAN_READ_WRITE(1);
643DEFINE_TSAN_READ_WRITE(2);
644DEFINE_TSAN_READ_WRITE(4);
645DEFINE_TSAN_READ_WRITE(8);
646DEFINE_TSAN_READ_WRITE(16);
647
648void __tsan_read_range(void *ptr, size_t size)
649{
650 check_access(ptr, size, 0);
651}
652EXPORT_SYMBOL(__tsan_read_range);
653
654void __tsan_write_range(void *ptr, size_t size)
655{
656 check_access(ptr, size, KCSAN_ACCESS_WRITE);
657}
658EXPORT_SYMBOL(__tsan_write_range);
659
660/*
661 * The below are not required by KCSAN, but can still be emitted by the
662 * compiler.
663 */
664void __tsan_func_entry(void *call_pc)
665{
666}
667EXPORT_SYMBOL(__tsan_func_entry);
668void __tsan_func_exit(void)
669{
670}
671EXPORT_SYMBOL(__tsan_func_exit);
672void __tsan_init(void)
673{
674}
675EXPORT_SYMBOL(__tsan_init);