Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Marco Elver | bd0ccc4 | 2021-01-15 18:09:53 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Rules for implicitly atomic memory accesses. |
| 4 | * |
| 5 | * Copyright (C) 2019, Google LLC. |
| 6 | */ |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 7 | |
| 8 | #ifndef _KERNEL_KCSAN_ATOMIC_H |
| 9 | #define _KERNEL_KCSAN_ATOMIC_H |
| 10 | |
Marco Elver | 7e76656 | 2020-06-16 14:36:24 +0200 | [diff] [blame] | 11 | #include <linux/types.h> |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 12 | |
| 13 | /* |
Marco Elver | 44656d3 | 2020-02-25 15:32:58 +0100 | [diff] [blame] | 14 | * Special rules for certain memory where concurrent conflicting accesses are |
| 15 | * common, however, the current convention is to not mark them; returns true if |
| 16 | * access to @ptr should be considered atomic. Called from slow-path. |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 17 | */ |
Marco Elver | 44656d3 | 2020-02-25 15:32:58 +0100 | [diff] [blame] | 18 | static bool kcsan_is_atomic_special(const volatile void *ptr) |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 19 | { |
Marco Elver | 7e76656 | 2020-06-16 14:36:24 +0200 | [diff] [blame] | 20 | return false; |
Marco Elver | dfd402a | 2019-11-14 19:02:54 +0100 | [diff] [blame] | 21 | } |
| 22 | |
| 23 | #endif /* _KERNEL_KCSAN_ATOMIC_H */ |