blob: 14f72ec964922ca95cc23ec7f3dda5244a61caef [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08002#ifndef _LINUX_KASAN_H
3#define _LINUX_KASAN_H
4
Andrey Konovalov4e35a812020-12-22 12:03:10 -08005#include <linux/static_key.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08006#include <linux/types.h>
7
8struct kmem_cache;
9struct page;
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -070010struct vm_struct;
Masami Hiramatsu5be9b732017-03-16 16:40:21 -070011struct task_struct;
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080012
13#ifdef CONFIG_KASAN
14
Andrey Konovalovf4fb1152020-12-22 12:00:17 -080015#include <linux/linkage.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070016#include <asm/kasan.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080017
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070018/* kasan_data struct is used in KUnit tests for KASAN expected failures */
19struct kunit_kasan_expectation {
20 bool report_expected;
21 bool report_found;
22};
23
Andrey Konovalovf4fb1152020-12-22 12:00:17 -080024#endif
25
26#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
27
28#include <linux/pgtable.h>
29
30/* Software KASAN implementations use shadow memory. */
31
32#ifdef CONFIG_KASAN_SW_TAGS
33#define KASAN_SHADOW_INIT 0xFF
34#else
35#define KASAN_SHADOW_INIT 0
36#endif
37
Hailong Liu49301ba2021-01-12 15:49:14 -080038#ifndef PTE_HWTABLE_PTRS
39#define PTE_HWTABLE_PTRS 0
40#endif
41
Andrey Konovalov9577dd72018-12-28 00:30:01 -080042extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
Hailong Liu49301ba2021-01-12 15:49:14 -080043extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
Andrey Konovalov9577dd72018-12-28 00:30:01 -080044extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
45extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
46extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +030047
Andrey Konovalov9577dd72018-12-28 00:30:01 -080048int kasan_populate_early_shadow(const void *shadow_start,
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +030049 const void *shadow_end);
50
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080051static inline void *kasan_mem_to_shadow(const void *addr)
52{
53 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
54 + KASAN_SHADOW_OFFSET;
55}
56
Andrey Konovalovf4fb1152020-12-22 12:00:17 -080057int kasan_add_zero_shadow(void *start, unsigned long size);
58void kasan_remove_zero_shadow(void *start, unsigned long size);
59
Andrey Konovalove0ae1142020-12-22 12:00:56 -080060/* Enable reporting bugs after kasan_disable_current() */
61extern void kasan_enable_current(void);
62
63/* Disable reporting bugs for current task */
64extern void kasan_disable_current(void);
65
Andrey Konovalovf4fb1152020-12-22 12:00:17 -080066#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
67
68static inline int kasan_add_zero_shadow(void *start, unsigned long size)
69{
70 return 0;
71}
72static inline void kasan_remove_zero_shadow(void *start,
73 unsigned long size)
74{}
75
Andrey Konovalove0ae1142020-12-22 12:00:56 -080076static inline void kasan_enable_current(void) {}
77static inline void kasan_disable_current(void) {}
78
Andrey Konovalovf4fb1152020-12-22 12:00:17 -080079#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
80
81#ifdef CONFIG_KASAN
82
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -070083struct kasan_cache {
84 int alloc_meta_offset;
85 int free_meta_offset;
Andrey Konovalovef8fe242021-02-12 17:14:49 +110086 bool is_kmalloc;
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -070087};
88
Andrey Konovalov4e35a812020-12-22 12:03:10 -080089#ifdef CONFIG_KASAN_HW_TAGS
Andrey Konovalov6d847f82020-12-22 12:03:31 -080090
Andrey Konovalov4e35a812020-12-22 12:03:10 -080091DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
Andrey Konovalov6d847f82020-12-22 12:03:31 -080092
Andrey Konovalov4e35a812020-12-22 12:03:10 -080093static __always_inline bool kasan_enabled(void)
94{
95 return static_branch_likely(&kasan_flag_enabled);
96}
Andrey Konovalov6d847f82020-12-22 12:03:31 -080097
98#else /* CONFIG_KASAN_HW_TAGS */
99
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800100static inline bool kasan_enabled(void)
101{
102 return true;
103}
Andrey Konovalov6d847f82020-12-22 12:03:31 -0800104
105#endif /* CONFIG_KASAN_HW_TAGS */
106
107slab_flags_t __kasan_never_merge(void);
108static __always_inline slab_flags_t kasan_never_merge(void)
109{
110 if (kasan_enabled())
111 return __kasan_never_merge();
112 return 0;
113}
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800114
115void __kasan_unpoison_range(const void *addr, size_t size);
116static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
117{
118 if (kasan_enabled())
119 __kasan_unpoison_range(addr, size);
120}
121
122void __kasan_alloc_pages(struct page *page, unsigned int order);
123static __always_inline void kasan_alloc_pages(struct page *page,
124 unsigned int order)
125{
126 if (kasan_enabled())
127 __kasan_alloc_pages(page, order);
128}
129
130void __kasan_free_pages(struct page *page, unsigned int order);
131static __always_inline void kasan_free_pages(struct page *page,
132 unsigned int order)
133{
134 if (kasan_enabled())
135 __kasan_free_pages(page, order);
136}
137
138void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
139 slab_flags_t *flags);
140static __always_inline void kasan_cache_create(struct kmem_cache *cache,
141 unsigned int *size, slab_flags_t *flags)
142{
143 if (kasan_enabled())
144 __kasan_cache_create(cache, size, flags);
145}
146
Andrey Konovalovef8fe242021-02-12 17:14:49 +1100147void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
148static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
149{
150 if (kasan_enabled())
151 __kasan_cache_create_kmalloc(cache);
152}
153
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800154size_t __kasan_metadata_size(struct kmem_cache *cache);
155static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
156{
157 if (kasan_enabled())
158 return __kasan_metadata_size(cache);
159 return 0;
160}
161
162void __kasan_poison_slab(struct page *page);
163static __always_inline void kasan_poison_slab(struct page *page)
164{
165 if (kasan_enabled())
166 __kasan_poison_slab(page);
167}
168
169void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
170static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
171 void *object)
172{
173 if (kasan_enabled())
174 __kasan_unpoison_object_data(cache, object);
175}
176
177void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
178static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
179 void *object)
180{
181 if (kasan_enabled())
182 __kasan_poison_object_data(cache, object);
183}
184
185void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
186 const void *object);
187static __always_inline void * __must_check kasan_init_slab_obj(
188 struct kmem_cache *cache, const void *object)
189{
190 if (kasan_enabled())
191 return __kasan_init_slab_obj(cache, object);
192 return (void *)object;
193}
194
195bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100196static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object)
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800197{
198 if (kasan_enabled())
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100199 return __kasan_slab_free(s, object, _RET_IP_);
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800200 return false;
201}
202
Andrey Konovalov54c022fd2021-02-12 17:14:50 +1100203void __kasan_kfree_large(void *ptr, unsigned long ip);
204static __always_inline void kasan_kfree_large(void *ptr)
205{
206 if (kasan_enabled())
207 __kasan_kfree_large(ptr, _RET_IP_);
208}
209
Andrey Konovalov1d986f32020-12-22 12:03:13 -0800210void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100211static __always_inline void kasan_slab_free_mempool(void *ptr)
Andrey Konovalov1d986f32020-12-22 12:03:13 -0800212{
213 if (kasan_enabled())
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100214 __kasan_slab_free_mempool(ptr, _RET_IP_);
Andrey Konovalov1d986f32020-12-22 12:03:13 -0800215}
216
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800217void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
218 void *object, gfp_t flags);
219static __always_inline void * __must_check kasan_slab_alloc(
220 struct kmem_cache *s, void *object, gfp_t flags)
221{
222 if (kasan_enabled())
223 return __kasan_slab_alloc(s, object, flags);
224 return object;
225}
226
227void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
228 size_t size, gfp_t flags);
229static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
230 const void *object, size_t size, gfp_t flags)
231{
232 if (kasan_enabled())
233 return __kasan_kmalloc(s, object, size, flags);
234 return (void *)object;
235}
236
237void * __must_check __kasan_kmalloc_large(const void *ptr,
238 size_t size, gfp_t flags);
239static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
240 size_t size, gfp_t flags)
241{
242 if (kasan_enabled())
243 return __kasan_kmalloc_large(ptr, size, flags);
244 return (void *)ptr;
245}
246
247void * __must_check __kasan_krealloc(const void *object,
248 size_t new_size, gfp_t flags);
249static __always_inline void * __must_check kasan_krealloc(const void *object,
250 size_t new_size, gfp_t flags)
251{
252 if (kasan_enabled())
253 return __kasan_krealloc(object, new_size, flags);
254 return (void *)object;
255}
256
Andrey Konovalov696574e2021-02-03 15:35:05 +1100257/*
258 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
259 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
260 */
261bool __kasan_check_byte(const void *addr, unsigned long ip);
262static __always_inline bool kasan_check_byte(const void *addr)
263{
264 if (kasan_enabled())
265 return __kasan_check_byte(addr, _RET_IP_);
266 return true;
267}
268
269
Mark Rutlandb0845ce2017-03-31 15:12:04 -0700270bool kasan_save_enable_multi_shot(void);
271void kasan_restore_multi_shot(bool enabled);
272
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800273#else /* CONFIG_KASAN */
274
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800275static inline bool kasan_enabled(void)
276{
277 return false;
278}
Andrey Konovalov6d847f82020-12-22 12:03:31 -0800279static inline slab_flags_t kasan_never_merge(void)
280{
281 return 0;
282}
Andrey Konovalov37f77a62020-12-22 12:00:21 -0800283static inline void kasan_unpoison_range(const void *address, size_t size) {}
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -0800284static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
285static inline void kasan_free_pages(struct page *page, unsigned int order) {}
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700286static inline void kasan_cache_create(struct kmem_cache *cache,
Alexey Dobriyanbe4a7982018-04-05 16:21:28 -0700287 unsigned int *size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800288 slab_flags_t *flags) {}
Andrey Konovalovef8fe242021-02-12 17:14:49 +1100289static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800290static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800291static inline void kasan_poison_slab(struct page *page) {}
292static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
293 void *object) {}
294static inline void kasan_poison_object_data(struct kmem_cache *cache,
295 void *object) {}
Andrey Konovalov01165232018-12-28 00:29:37 -0800296static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
297 const void *object)
298{
299 return (void *)object;
300}
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100301static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
Andrey Konovalov01165232018-12-28 00:29:37 -0800302{
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800303 return false;
Andrey Konovalov01165232018-12-28 00:29:37 -0800304}
Andrey Konovalov54c022fd2021-02-12 17:14:50 +1100305static inline void kasan_kfree_large(void *ptr) {}
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100306static inline void kasan_slab_free_mempool(void *ptr) {}
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800307static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
308 gfp_t flags)
309{
310 return object;
311}
Andrey Konovalov01165232018-12-28 00:29:37 -0800312static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
313 size_t size, gfp_t flags)
314{
315 return (void *)object;
316}
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800317static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
318{
319 return (void *)ptr;
320}
Andrey Konovalov01165232018-12-28 00:29:37 -0800321static inline void *kasan_krealloc(const void *object, size_t new_size,
322 gfp_t flags)
323{
324 return (void *)object;
325}
Andrey Konovalov696574e2021-02-03 15:35:05 +1100326static inline bool kasan_check_byte(const void *address)
327{
328 return true;
329}
Andrey Ryabinin9b75a862016-06-24 14:49:34 -0700330
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800331#endif /* CONFIG_KASAN */
332
Walter Wu13aefe42021-02-03 15:35:08 +1100333#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
Andrey Konovalov36c15ef2020-12-22 12:02:42 -0800334void kasan_unpoison_task_stack(struct task_struct *task);
335#else
336static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
337#endif
338
Andrey Konovalov2bd926b2018-12-28 00:29:53 -0800339#ifdef CONFIG_KASAN_GENERIC
340
341void kasan_cache_shrink(struct kmem_cache *cache);
342void kasan_cache_shutdown(struct kmem_cache *cache);
Walter Wu26e760c2020-08-06 23:24:35 -0700343void kasan_record_aux_stack(void *ptr);
Andrey Konovalov2bd926b2018-12-28 00:29:53 -0800344
345#else /* CONFIG_KASAN_GENERIC */
346
347static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
348static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
Walter Wu26e760c2020-08-06 23:24:35 -0700349static inline void kasan_record_aux_stack(void *ptr) {}
Andrey Konovalov2bd926b2018-12-28 00:29:53 -0800350
351#endif /* CONFIG_KASAN_GENERIC */
352
Andrey Konovalov11167162020-12-22 12:02:10 -0800353#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
Andrey Konovalov3c9e3aa2018-12-28 00:30:16 -0800354
Andrey Konovalov1e8a05e2020-12-22 12:02:52 -0800355static inline void *kasan_reset_tag(const void *addr)
356{
357 return (void *)arch_kasan_reset_tag(addr);
358}
Andrey Konovalov3c9e3aa2018-12-28 00:30:16 -0800359
Vincenzo Frascinoc648ed52021-02-04 18:32:49 -0800360/**
361 * kasan_report - print a report about a bad memory access detected by KASAN
362 * @addr: address of the bad access
363 * @size: size of the bad access
364 * @is_write: whether the bad access is a write or a read
365 * @ip: instruction pointer for the accessibility check or the bad access itself
366 */
Walter Wu8cceeff2020-04-01 21:09:37 -0700367bool kasan_report(unsigned long addr, size_t size,
Andrey Konovalov41eea9c2018-12-28 00:30:54 -0800368 bool is_write, unsigned long ip);
369
Andrey Konovalov11167162020-12-22 12:02:10 -0800370#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
Andrey Konovalov3c9e3aa2018-12-28 00:30:16 -0800371
372static inline void *kasan_reset_tag(const void *addr)
373{
374 return (void *)addr;
375}
376
Andrey Konovalov11167162020-12-22 12:02:10 -0800377#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
378
379#ifdef CONFIG_KASAN_SW_TAGS
380void __init kasan_init_sw_tags(void);
381#else
382static inline void kasan_init_sw_tags(void) { }
383#endif
384
385#ifdef CONFIG_KASAN_HW_TAGS
386void kasan_init_hw_tags_cpu(void);
387void __init kasan_init_hw_tags(void);
388#else
389static inline void kasan_init_hw_tags_cpu(void) { }
390static inline void kasan_init_hw_tags(void) { }
391#endif
Andrey Konovalov080eb832018-12-28 00:30:09 -0800392
Daniel Axtens3c5c3cf2019-11-30 17:54:50 -0800393#ifdef CONFIG_KASAN_VMALLOC
Andrey Konovalov24877122020-12-22 12:00:14 -0800394
Andrey Ryabinind98c9e82019-12-17 20:51:38 -0800395int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
396void kasan_poison_vmalloc(const void *start, unsigned long size);
397void kasan_unpoison_vmalloc(const void *start, unsigned long size);
Daniel Axtens3c5c3cf2019-11-30 17:54:50 -0800398void kasan_release_vmalloc(unsigned long start, unsigned long end,
399 unsigned long free_region_start,
400 unsigned long free_region_end);
Andrey Konovalov24877122020-12-22 12:00:14 -0800401
402#else /* CONFIG_KASAN_VMALLOC */
403
Andrey Ryabinind98c9e82019-12-17 20:51:38 -0800404static inline int kasan_populate_vmalloc(unsigned long start,
405 unsigned long size)
Daniel Axtens3c5c3cf2019-11-30 17:54:50 -0800406{
407 return 0;
408}
409
Andrey Ryabinind98c9e82019-12-17 20:51:38 -0800410static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
411{ }
412static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
413{ }
Daniel Axtens3c5c3cf2019-11-30 17:54:50 -0800414static inline void kasan_release_vmalloc(unsigned long start,
415 unsigned long end,
416 unsigned long free_region_start,
417 unsigned long free_region_end) {}
Andrey Konovalov24877122020-12-22 12:00:14 -0800418
419#endif /* CONFIG_KASAN_VMALLOC */
420
Andrey Konovalov67e914b2020-12-22 12:02:06 -0800421#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
422 !defined(CONFIG_KASAN_VMALLOC)
Andrey Konovalov24877122020-12-22 12:00:14 -0800423
424/*
425 * These functions provide a special case to support backing module
426 * allocations with real shadow memory. With KASAN vmalloc, the special
427 * case is unnecessary, as the work is handled in the generic case.
428 */
429int kasan_module_alloc(void *addr, size_t size);
430void kasan_free_shadow(const struct vm_struct *vm);
431
Andrey Konovalov67e914b2020-12-22 12:02:06 -0800432#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
Andrey Konovalov24877122020-12-22 12:00:14 -0800433
434static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
435static inline void kasan_free_shadow(const struct vm_struct *vm) {}
436
Andrey Konovalov67e914b2020-12-22 12:02:06 -0800437#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
Daniel Axtens3c5c3cf2019-11-30 17:54:50 -0800438
Jann Horn2f004ee2019-12-19 00:11:50 +0100439#ifdef CONFIG_KASAN_INLINE
440void kasan_non_canonical_hook(unsigned long addr);
441#else /* CONFIG_KASAN_INLINE */
442static inline void kasan_non_canonical_hook(unsigned long addr) { }
443#endif /* CONFIG_KASAN_INLINE */
444
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800445#endif /* LINUX_KASAN_H */