blob: 30ff13780bdecf51470ba39837ab28269dccc149 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08002#ifndef _LINUX_KASAN_H
3#define _LINUX_KASAN_H
4
Andrey Konovalov4e35a812020-12-22 12:03:10 -08005#include <linux/static_key.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08006#include <linux/types.h>
7
8struct kmem_cache;
9struct page;
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -070010struct vm_struct;
Masami Hiramatsu5be9b732017-03-16 16:40:21 -070011struct task_struct;
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080012
13#ifdef CONFIG_KASAN
14
Andrey Konovalovf4fb1152020-12-22 12:00:17 -080015#include <linux/linkage.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070016#include <asm/kasan.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080017
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070018/* kasan_data struct is used in KUnit tests for KASAN expected failures */
19struct kunit_kasan_expectation {
20 bool report_expected;
21 bool report_found;
22};
23
Andrey Konovalovf4fb1152020-12-22 12:00:17 -080024#endif
25
26#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
27
28#include <linux/pgtable.h>
29
30/* Software KASAN implementations use shadow memory. */
31
32#ifdef CONFIG_KASAN_SW_TAGS
33#define KASAN_SHADOW_INIT 0xFF
34#else
35#define KASAN_SHADOW_INIT 0
36#endif
37
Andrey Konovalov9577dd72018-12-28 00:30:01 -080038extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
39extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
40extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
41extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
42extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +030043
Andrey Konovalov9577dd72018-12-28 00:30:01 -080044int kasan_populate_early_shadow(const void *shadow_start,
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +030045 const void *shadow_end);
46
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080047static inline void *kasan_mem_to_shadow(const void *addr)
48{
49 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
50 + KASAN_SHADOW_OFFSET;
51}
52
Andrey Konovalovf4fb1152020-12-22 12:00:17 -080053int kasan_add_zero_shadow(void *start, unsigned long size);
54void kasan_remove_zero_shadow(void *start, unsigned long size);
55
Andrey Konovalove0ae1142020-12-22 12:00:56 -080056/* Enable reporting bugs after kasan_disable_current() */
57extern void kasan_enable_current(void);
58
59/* Disable reporting bugs for current task */
60extern void kasan_disable_current(void);
61
Andrey Konovalovf4fb1152020-12-22 12:00:17 -080062#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
63
64static inline int kasan_add_zero_shadow(void *start, unsigned long size)
65{
66 return 0;
67}
68static inline void kasan_remove_zero_shadow(void *start,
69 unsigned long size)
70{}
71
Andrey Konovalove0ae1142020-12-22 12:00:56 -080072static inline void kasan_enable_current(void) {}
73static inline void kasan_disable_current(void) {}
74
Andrey Konovalovf4fb1152020-12-22 12:00:17 -080075#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
76
77#ifdef CONFIG_KASAN
78
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -070079struct kasan_cache {
80 int alloc_meta_offset;
81 int free_meta_offset;
Andrey Konovalovef8fe242021-02-12 17:14:49 +110082 bool is_kmalloc;
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -070083};
84
Andrey Konovalov4e35a812020-12-22 12:03:10 -080085#ifdef CONFIG_KASAN_HW_TAGS
Andrey Konovalov6d847f82020-12-22 12:03:31 -080086
Andrey Konovalov4e35a812020-12-22 12:03:10 -080087DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
Andrey Konovalov6d847f82020-12-22 12:03:31 -080088
Andrey Konovalov4e35a812020-12-22 12:03:10 -080089static __always_inline bool kasan_enabled(void)
90{
91 return static_branch_likely(&kasan_flag_enabled);
92}
Andrey Konovalov6d847f82020-12-22 12:03:31 -080093
94#else /* CONFIG_KASAN_HW_TAGS */
95
Andrey Konovalov4e35a812020-12-22 12:03:10 -080096static inline bool kasan_enabled(void)
97{
98 return true;
99}
Andrey Konovalov6d847f82020-12-22 12:03:31 -0800100
101#endif /* CONFIG_KASAN_HW_TAGS */
102
103slab_flags_t __kasan_never_merge(void);
104static __always_inline slab_flags_t kasan_never_merge(void)
105{
106 if (kasan_enabled())
107 return __kasan_never_merge();
108 return 0;
109}
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800110
111void __kasan_unpoison_range(const void *addr, size_t size);
112static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
113{
114 if (kasan_enabled())
115 __kasan_unpoison_range(addr, size);
116}
117
118void __kasan_alloc_pages(struct page *page, unsigned int order);
119static __always_inline void kasan_alloc_pages(struct page *page,
120 unsigned int order)
121{
122 if (kasan_enabled())
123 __kasan_alloc_pages(page, order);
124}
125
126void __kasan_free_pages(struct page *page, unsigned int order);
127static __always_inline void kasan_free_pages(struct page *page,
128 unsigned int order)
129{
130 if (kasan_enabled())
131 __kasan_free_pages(page, order);
132}
133
134void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
135 slab_flags_t *flags);
136static __always_inline void kasan_cache_create(struct kmem_cache *cache,
137 unsigned int *size, slab_flags_t *flags)
138{
139 if (kasan_enabled())
140 __kasan_cache_create(cache, size, flags);
141}
142
Andrey Konovalovef8fe242021-02-12 17:14:49 +1100143void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
144static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
145{
146 if (kasan_enabled())
147 __kasan_cache_create_kmalloc(cache);
148}
149
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800150size_t __kasan_metadata_size(struct kmem_cache *cache);
151static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
152{
153 if (kasan_enabled())
154 return __kasan_metadata_size(cache);
155 return 0;
156}
157
158void __kasan_poison_slab(struct page *page);
159static __always_inline void kasan_poison_slab(struct page *page)
160{
161 if (kasan_enabled())
162 __kasan_poison_slab(page);
163}
164
165void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
166static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
167 void *object)
168{
169 if (kasan_enabled())
170 __kasan_unpoison_object_data(cache, object);
171}
172
173void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
174static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
175 void *object)
176{
177 if (kasan_enabled())
178 __kasan_poison_object_data(cache, object);
179}
180
181void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
182 const void *object);
183static __always_inline void * __must_check kasan_init_slab_obj(
184 struct kmem_cache *cache, const void *object)
185{
186 if (kasan_enabled())
187 return __kasan_init_slab_obj(cache, object);
188 return (void *)object;
189}
190
191bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100192static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object)
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800193{
194 if (kasan_enabled())
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100195 return __kasan_slab_free(s, object, _RET_IP_);
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800196 return false;
197}
198
Andrey Konovalov1d986f32020-12-22 12:03:13 -0800199void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100200static __always_inline void kasan_slab_free_mempool(void *ptr)
Andrey Konovalov1d986f32020-12-22 12:03:13 -0800201{
202 if (kasan_enabled())
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100203 __kasan_slab_free_mempool(ptr, _RET_IP_);
Andrey Konovalov1d986f32020-12-22 12:03:13 -0800204}
205
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800206void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
207 void *object, gfp_t flags);
208static __always_inline void * __must_check kasan_slab_alloc(
209 struct kmem_cache *s, void *object, gfp_t flags)
210{
211 if (kasan_enabled())
212 return __kasan_slab_alloc(s, object, flags);
213 return object;
214}
215
216void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
217 size_t size, gfp_t flags);
218static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
219 const void *object, size_t size, gfp_t flags)
220{
221 if (kasan_enabled())
222 return __kasan_kmalloc(s, object, size, flags);
223 return (void *)object;
224}
225
226void * __must_check __kasan_kmalloc_large(const void *ptr,
227 size_t size, gfp_t flags);
228static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
229 size_t size, gfp_t flags)
230{
231 if (kasan_enabled())
232 return __kasan_kmalloc_large(ptr, size, flags);
233 return (void *)ptr;
234}
235
236void * __must_check __kasan_krealloc(const void *object,
237 size_t new_size, gfp_t flags);
238static __always_inline void * __must_check kasan_krealloc(const void *object,
239 size_t new_size, gfp_t flags)
240{
241 if (kasan_enabled())
242 return __kasan_krealloc(object, new_size, flags);
243 return (void *)object;
244}
245
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800246void __kasan_kfree_large(void *ptr, unsigned long ip);
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100247static __always_inline void kasan_kfree_large(void *ptr)
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800248{
249 if (kasan_enabled())
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100250 __kasan_kfree_large(ptr, _RET_IP_);
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800251}
Andrey Ryabinin9b75a862016-06-24 14:49:34 -0700252
Andrey Konovalov696574e2021-02-03 15:35:05 +1100253/*
254 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
255 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
256 */
257bool __kasan_check_byte(const void *addr, unsigned long ip);
258static __always_inline bool kasan_check_byte(const void *addr)
259{
260 if (kasan_enabled())
261 return __kasan_check_byte(addr, _RET_IP_);
262 return true;
263}
264
265
Mark Rutlandb0845ce2017-03-31 15:12:04 -0700266bool kasan_save_enable_multi_shot(void);
267void kasan_restore_multi_shot(bool enabled);
268
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800269#else /* CONFIG_KASAN */
270
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800271static inline bool kasan_enabled(void)
272{
273 return false;
274}
Andrey Konovalov6d847f82020-12-22 12:03:31 -0800275static inline slab_flags_t kasan_never_merge(void)
276{
277 return 0;
278}
Andrey Konovalov37f77a62020-12-22 12:00:21 -0800279static inline void kasan_unpoison_range(const void *address, size_t size) {}
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -0800280static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
281static inline void kasan_free_pages(struct page *page, unsigned int order) {}
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700282static inline void kasan_cache_create(struct kmem_cache *cache,
Alexey Dobriyanbe4a7982018-04-05 16:21:28 -0700283 unsigned int *size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800284 slab_flags_t *flags) {}
Andrey Konovalovef8fe242021-02-12 17:14:49 +1100285static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800286static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800287static inline void kasan_poison_slab(struct page *page) {}
288static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
289 void *object) {}
290static inline void kasan_poison_object_data(struct kmem_cache *cache,
291 void *object) {}
Andrey Konovalov01165232018-12-28 00:29:37 -0800292static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
293 const void *object)
294{
295 return (void *)object;
296}
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100297static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
Andrey Konovalov01165232018-12-28 00:29:37 -0800298{
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800299 return false;
Andrey Konovalov01165232018-12-28 00:29:37 -0800300}
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100301static inline void kasan_slab_free_mempool(void *ptr) {}
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800302static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
303 gfp_t flags)
304{
305 return object;
306}
Andrey Konovalov01165232018-12-28 00:29:37 -0800307static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
308 size_t size, gfp_t flags)
309{
310 return (void *)object;
311}
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800312static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
313{
314 return (void *)ptr;
315}
Andrey Konovalov01165232018-12-28 00:29:37 -0800316static inline void *kasan_krealloc(const void *object, size_t new_size,
317 gfp_t flags)
318{
319 return (void *)object;
320}
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100321static inline void kasan_kfree_large(void *ptr) {}
Andrey Konovalov696574e2021-02-03 15:35:05 +1100322static inline bool kasan_check_byte(const void *address)
323{
324 return true;
325}
Andrey Ryabinin9b75a862016-06-24 14:49:34 -0700326
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800327#endif /* CONFIG_KASAN */
328
Walter Wu13aefe42021-02-03 15:35:08 +1100329#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
Andrey Konovalov36c15ef2020-12-22 12:02:42 -0800330void kasan_unpoison_task_stack(struct task_struct *task);
331#else
332static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
333#endif
334
Andrey Konovalov2bd926b2018-12-28 00:29:53 -0800335#ifdef CONFIG_KASAN_GENERIC
336
337void kasan_cache_shrink(struct kmem_cache *cache);
338void kasan_cache_shutdown(struct kmem_cache *cache);
Walter Wu26e760c2020-08-06 23:24:35 -0700339void kasan_record_aux_stack(void *ptr);
Andrey Konovalov2bd926b2018-12-28 00:29:53 -0800340
341#else /* CONFIG_KASAN_GENERIC */
342
343static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
344static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
Walter Wu26e760c2020-08-06 23:24:35 -0700345static inline void kasan_record_aux_stack(void *ptr) {}
Andrey Konovalov2bd926b2018-12-28 00:29:53 -0800346
347#endif /* CONFIG_KASAN_GENERIC */
348
Andrey Konovalov11167162020-12-22 12:02:10 -0800349#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
Andrey Konovalov3c9e3aa2018-12-28 00:30:16 -0800350
Andrey Konovalov1e8a05e2020-12-22 12:02:52 -0800351static inline void *kasan_reset_tag(const void *addr)
352{
353 return (void *)arch_kasan_reset_tag(addr);
354}
Andrey Konovalov3c9e3aa2018-12-28 00:30:16 -0800355
Vincenzo Frascinoc648ed52021-02-04 18:32:49 -0800356/**
357 * kasan_report - print a report about a bad memory access detected by KASAN
358 * @addr: address of the bad access
359 * @size: size of the bad access
360 * @is_write: whether the bad access is a write or a read
361 * @ip: instruction pointer for the accessibility check or the bad access itself
362 */
Walter Wu8cceeff2020-04-01 21:09:37 -0700363bool kasan_report(unsigned long addr, size_t size,
Andrey Konovalov41eea9c2018-12-28 00:30:54 -0800364 bool is_write, unsigned long ip);
365
Andrey Konovalov11167162020-12-22 12:02:10 -0800366#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
Andrey Konovalov3c9e3aa2018-12-28 00:30:16 -0800367
368static inline void *kasan_reset_tag(const void *addr)
369{
370 return (void *)addr;
371}
372
Andrey Konovalov11167162020-12-22 12:02:10 -0800373#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
374
375#ifdef CONFIG_KASAN_SW_TAGS
376void __init kasan_init_sw_tags(void);
377#else
378static inline void kasan_init_sw_tags(void) { }
379#endif
380
381#ifdef CONFIG_KASAN_HW_TAGS
382void kasan_init_hw_tags_cpu(void);
383void __init kasan_init_hw_tags(void);
384#else
385static inline void kasan_init_hw_tags_cpu(void) { }
386static inline void kasan_init_hw_tags(void) { }
387#endif
Andrey Konovalov080eb832018-12-28 00:30:09 -0800388
Daniel Axtens3c5c3cf2019-11-30 17:54:50 -0800389#ifdef CONFIG_KASAN_VMALLOC
Andrey Konovalov24877122020-12-22 12:00:14 -0800390
Andrey Ryabinind98c9e82019-12-17 20:51:38 -0800391int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
392void kasan_poison_vmalloc(const void *start, unsigned long size);
393void kasan_unpoison_vmalloc(const void *start, unsigned long size);
Daniel Axtens3c5c3cf2019-11-30 17:54:50 -0800394void kasan_release_vmalloc(unsigned long start, unsigned long end,
395 unsigned long free_region_start,
396 unsigned long free_region_end);
Andrey Konovalov24877122020-12-22 12:00:14 -0800397
398#else /* CONFIG_KASAN_VMALLOC */
399
Andrey Ryabinind98c9e82019-12-17 20:51:38 -0800400static inline int kasan_populate_vmalloc(unsigned long start,
401 unsigned long size)
Daniel Axtens3c5c3cf2019-11-30 17:54:50 -0800402{
403 return 0;
404}
405
Andrey Ryabinind98c9e82019-12-17 20:51:38 -0800406static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
407{ }
408static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
409{ }
Daniel Axtens3c5c3cf2019-11-30 17:54:50 -0800410static inline void kasan_release_vmalloc(unsigned long start,
411 unsigned long end,
412 unsigned long free_region_start,
413 unsigned long free_region_end) {}
Andrey Konovalov24877122020-12-22 12:00:14 -0800414
415#endif /* CONFIG_KASAN_VMALLOC */
416
Andrey Konovalov67e914b2020-12-22 12:02:06 -0800417#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
418 !defined(CONFIG_KASAN_VMALLOC)
Andrey Konovalov24877122020-12-22 12:00:14 -0800419
420/*
421 * These functions provide a special case to support backing module
422 * allocations with real shadow memory. With KASAN vmalloc, the special
423 * case is unnecessary, as the work is handled in the generic case.
424 */
425int kasan_module_alloc(void *addr, size_t size);
426void kasan_free_shadow(const struct vm_struct *vm);
427
Andrey Konovalov67e914b2020-12-22 12:02:06 -0800428#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
Andrey Konovalov24877122020-12-22 12:00:14 -0800429
430static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
431static inline void kasan_free_shadow(const struct vm_struct *vm) {}
432
Andrey Konovalov67e914b2020-12-22 12:02:06 -0800433#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
Daniel Axtens3c5c3cf2019-11-30 17:54:50 -0800434
Jann Horn2f004ee2019-12-19 00:11:50 +0100435#ifdef CONFIG_KASAN_INLINE
436void kasan_non_canonical_hook(unsigned long addr);
437#else /* CONFIG_KASAN_INLINE */
438static inline void kasan_non_canonical_hook(unsigned long addr) { }
439#endif /* CONFIG_KASAN_INLINE */
440
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800441#endif /* LINUX_KASAN_H */