blob: b59675cc19b78c2b07ec66ed919c8e755993015b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08002#ifndef _LINUX_KASAN_H
3#define _LINUX_KASAN_H
4
Peter Collingbourne3cb3bed2021-05-10 19:53:40 -07005#include <linux/bug.h>
Andrey Konovalov4e35a812020-12-22 12:03:10 -08006#include <linux/static_key.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08007#include <linux/types.h>
8
9struct kmem_cache;
10struct page;
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -070011struct vm_struct;
Masami Hiramatsu5be9b732017-03-16 16:40:21 -070012struct task_struct;
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080013
14#ifdef CONFIG_KASAN
15
Andrey Konovalovf4fb1152020-12-22 12:00:17 -080016#include <linux/linkage.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070017#include <asm/kasan.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080018
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070019/* kasan_data struct is used in KUnit tests for KASAN expected failures */
20struct kunit_kasan_expectation {
21 bool report_expected;
22 bool report_found;
23};
24
Andrey Konovalovf4fb1152020-12-22 12:00:17 -080025#endif
26
27#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
28
29#include <linux/pgtable.h>
30
31/* Software KASAN implementations use shadow memory. */
32
33#ifdef CONFIG_KASAN_SW_TAGS
Andrey Konovalovb1df3e82021-03-18 17:01:40 +110034/* This matches KASAN_TAG_INVALID. */
35#define KASAN_SHADOW_INIT 0xFE
Andrey Konovalovf4fb1152020-12-22 12:00:17 -080036#else
37#define KASAN_SHADOW_INIT 0
38#endif
39
Hailong Liu49301ba2021-01-12 15:49:14 -080040#ifndef PTE_HWTABLE_PTRS
41#define PTE_HWTABLE_PTRS 0
42#endif
43
Andrey Konovalov9577dd72018-12-28 00:30:01 -080044extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
Hailong Liu49301ba2021-01-12 15:49:14 -080045extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
Andrey Konovalov9577dd72018-12-28 00:30:01 -080046extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
47extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
48extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +030049
Andrey Konovalov9577dd72018-12-28 00:30:01 -080050int kasan_populate_early_shadow(const void *shadow_start,
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +030051 const void *shadow_end);
52
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080053static inline void *kasan_mem_to_shadow(const void *addr)
54{
55 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
56 + KASAN_SHADOW_OFFSET;
57}
58
Andrey Konovalovf4fb1152020-12-22 12:00:17 -080059int kasan_add_zero_shadow(void *start, unsigned long size);
60void kasan_remove_zero_shadow(void *start, unsigned long size);
61
Andrey Konovalove0ae1142020-12-22 12:00:56 -080062/* Enable reporting bugs after kasan_disable_current() */
63extern void kasan_enable_current(void);
64
65/* Disable reporting bugs for current task */
66extern void kasan_disable_current(void);
67
Andrey Konovalovf4fb1152020-12-22 12:00:17 -080068#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
69
70static inline int kasan_add_zero_shadow(void *start, unsigned long size)
71{
72 return 0;
73}
74static inline void kasan_remove_zero_shadow(void *start,
75 unsigned long size)
76{}
77
Andrey Konovalove0ae1142020-12-22 12:00:56 -080078static inline void kasan_enable_current(void) {}
79static inline void kasan_disable_current(void) {}
80
Andrey Konovalovf4fb1152020-12-22 12:00:17 -080081#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
82
Andrey Konovalov4e35a812020-12-22 12:03:10 -080083#ifdef CONFIG_KASAN_HW_TAGS
Andrey Konovalov6d847f82020-12-22 12:03:31 -080084
Andrey Konovalov4e35a812020-12-22 12:03:10 -080085DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
Andrey Konovalov6d847f82020-12-22 12:03:31 -080086
Andrey Konovalov4e35a812020-12-22 12:03:10 -080087static __always_inline bool kasan_enabled(void)
88{
89 return static_branch_likely(&kasan_flag_enabled);
90}
Andrey Konovalov6d847f82020-12-22 12:03:31 -080091
Peter Collingbourne55b0b342021-09-23 18:06:55 -070092static inline bool kasan_hw_tags_enabled(void)
Andrey Konovalova1598942021-03-18 17:01:40 +110093{
94 return kasan_enabled();
95}
96
Peter Collingbourne3cb3bed2021-05-10 19:53:40 -070097void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
98void kasan_free_pages(struct page *page, unsigned int order);
99
Andrey Konovalov6d847f82020-12-22 12:03:31 -0800100#else /* CONFIG_KASAN_HW_TAGS */
101
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800102static inline bool kasan_enabled(void)
103{
Peter Collingbourne3cb3bed2021-05-10 19:53:40 -0700104 return IS_ENABLED(CONFIG_KASAN);
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800105}
Andrey Konovalov6d847f82020-12-22 12:03:31 -0800106
Peter Collingbourne55b0b342021-09-23 18:06:55 -0700107static inline bool kasan_hw_tags_enabled(void)
Andrey Konovalova1598942021-03-18 17:01:40 +1100108{
109 return false;
110}
111
Peter Collingbourne3cb3bed2021-05-10 19:53:40 -0700112static __always_inline void kasan_alloc_pages(struct page *page,
113 unsigned int order, gfp_t flags)
114{
115 /* Only available for integrated init. */
116 BUILD_BUG();
117}
118
119static __always_inline void kasan_free_pages(struct page *page,
120 unsigned int order)
121{
122 /* Only available for integrated init. */
123 BUILD_BUG();
124}
125
Andrey Konovalov6d847f82020-12-22 12:03:31 -0800126#endif /* CONFIG_KASAN_HW_TAGS */
127
Peter Collingbourne55b0b342021-09-23 18:06:55 -0700128static inline bool kasan_has_integrated_init(void)
129{
130 return kasan_hw_tags_enabled();
131}
132
Peter Collingbourne3cb3bed2021-05-10 19:53:40 -0700133#ifdef CONFIG_KASAN
134
135struct kasan_cache {
136 int alloc_meta_offset;
137 int free_meta_offset;
138 bool is_kmalloc;
139};
140
Andrey Konovalov6d847f82020-12-22 12:03:31 -0800141slab_flags_t __kasan_never_merge(void);
142static __always_inline slab_flags_t kasan_never_merge(void)
143{
144 if (kasan_enabled())
145 return __kasan_never_merge();
146 return 0;
147}
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800148
149void __kasan_unpoison_range(const void *addr, size_t size);
150static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
151{
152 if (kasan_enabled())
153 __kasan_unpoison_range(addr, size);
154}
155
Peter Collingbourne3cb3bed2021-05-10 19:53:40 -0700156void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
157static __always_inline void kasan_poison_pages(struct page *page,
Andrey Konovalova1598942021-03-18 17:01:40 +1100158 unsigned int order, bool init)
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800159{
160 if (kasan_enabled())
Peter Collingbourne3cb3bed2021-05-10 19:53:40 -0700161 __kasan_poison_pages(page, order, init);
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800162}
163
Peter Collingbourne3cb3bed2021-05-10 19:53:40 -0700164void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
165static __always_inline void kasan_unpoison_pages(struct page *page,
166 unsigned int order, bool init)
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800167{
168 if (kasan_enabled())
Peter Collingbourne3cb3bed2021-05-10 19:53:40 -0700169 __kasan_unpoison_pages(page, order, init);
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800170}
171
172void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
173 slab_flags_t *flags);
174static __always_inline void kasan_cache_create(struct kmem_cache *cache,
175 unsigned int *size, slab_flags_t *flags)
176{
177 if (kasan_enabled())
178 __kasan_cache_create(cache, size, flags);
179}
180
Andrey Konovalovef8fe242021-02-12 17:14:49 +1100181void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
182static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
183{
184 if (kasan_enabled())
185 __kasan_cache_create_kmalloc(cache);
186}
187
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800188size_t __kasan_metadata_size(struct kmem_cache *cache);
189static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
190{
191 if (kasan_enabled())
192 return __kasan_metadata_size(cache);
193 return 0;
194}
195
196void __kasan_poison_slab(struct page *page);
197static __always_inline void kasan_poison_slab(struct page *page)
198{
199 if (kasan_enabled())
200 __kasan_poison_slab(page);
201}
202
203void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
204static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
205 void *object)
206{
207 if (kasan_enabled())
208 __kasan_unpoison_object_data(cache, object);
209}
210
211void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
212static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
213 void *object)
214{
215 if (kasan_enabled())
216 __kasan_poison_object_data(cache, object);
217}
218
219void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
220 const void *object);
221static __always_inline void * __must_check kasan_init_slab_obj(
222 struct kmem_cache *cache, const void *object)
223{
224 if (kasan_enabled())
225 return __kasan_init_slab_obj(cache, object);
226 return (void *)object;
227}
228
Andrey Konovalov24690d72021-03-18 17:01:41 +1100229bool __kasan_slab_free(struct kmem_cache *s, void *object,
230 unsigned long ip, bool init);
231static __always_inline bool kasan_slab_free(struct kmem_cache *s,
232 void *object, bool init)
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800233{
234 if (kasan_enabled())
Andrey Konovalov24690d72021-03-18 17:01:41 +1100235 return __kasan_slab_free(s, object, _RET_IP_, init);
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800236 return false;
237}
238
Andrey Konovalov54c022fd2021-02-12 17:14:50 +1100239void __kasan_kfree_large(void *ptr, unsigned long ip);
240static __always_inline void kasan_kfree_large(void *ptr)
241{
242 if (kasan_enabled())
243 __kasan_kfree_large(ptr, _RET_IP_);
244}
245
Andrey Konovalov1d986f32020-12-22 12:03:13 -0800246void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100247static __always_inline void kasan_slab_free_mempool(void *ptr)
Andrey Konovalov1d986f32020-12-22 12:03:13 -0800248{
249 if (kasan_enabled())
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100250 __kasan_slab_free_mempool(ptr, _RET_IP_);
Andrey Konovalov1d986f32020-12-22 12:03:13 -0800251}
252
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800253void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
Andrey Konovalov5a7af112021-03-18 17:01:41 +1100254 void *object, gfp_t flags, bool init);
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800255static __always_inline void * __must_check kasan_slab_alloc(
Andrey Konovalov5a7af112021-03-18 17:01:41 +1100256 struct kmem_cache *s, void *object, gfp_t flags, bool init)
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800257{
258 if (kasan_enabled())
Andrey Konovalov5a7af112021-03-18 17:01:41 +1100259 return __kasan_slab_alloc(s, object, flags, init);
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800260 return object;
261}
262
263void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
264 size_t size, gfp_t flags);
265static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
266 const void *object, size_t size, gfp_t flags)
267{
268 if (kasan_enabled())
269 return __kasan_kmalloc(s, object, size, flags);
270 return (void *)object;
271}
272
273void * __must_check __kasan_kmalloc_large(const void *ptr,
274 size_t size, gfp_t flags);
275static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
276 size_t size, gfp_t flags)
277{
278 if (kasan_enabled())
279 return __kasan_kmalloc_large(ptr, size, flags);
280 return (void *)ptr;
281}
282
283void * __must_check __kasan_krealloc(const void *object,
284 size_t new_size, gfp_t flags);
285static __always_inline void * __must_check kasan_krealloc(const void *object,
286 size_t new_size, gfp_t flags)
287{
288 if (kasan_enabled())
289 return __kasan_krealloc(object, new_size, flags);
290 return (void *)object;
291}
292
Andrey Konovalov696574e2021-02-03 15:35:05 +1100293/*
294 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
295 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
296 */
297bool __kasan_check_byte(const void *addr, unsigned long ip);
298static __always_inline bool kasan_check_byte(const void *addr)
299{
300 if (kasan_enabled())
301 return __kasan_check_byte(addr, _RET_IP_);
302 return true;
303}
304
305
Mark Rutlandb0845ce2017-03-31 15:12:04 -0700306bool kasan_save_enable_multi_shot(void);
307void kasan_restore_multi_shot(bool enabled);
308
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800309#else /* CONFIG_KASAN */
310
Andrey Konovalov6d847f82020-12-22 12:03:31 -0800311static inline slab_flags_t kasan_never_merge(void)
312{
313 return 0;
314}
Andrey Konovalov37f77a62020-12-22 12:00:21 -0800315static inline void kasan_unpoison_range(const void *address, size_t size) {}
Peter Collingbourne3cb3bed2021-05-10 19:53:40 -0700316static inline void kasan_poison_pages(struct page *page, unsigned int order,
317 bool init) {}
318static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
319 bool init) {}
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700320static inline void kasan_cache_create(struct kmem_cache *cache,
Alexey Dobriyanbe4a7982018-04-05 16:21:28 -0700321 unsigned int *size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800322 slab_flags_t *flags) {}
Andrey Konovalovef8fe242021-02-12 17:14:49 +1100323static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800324static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800325static inline void kasan_poison_slab(struct page *page) {}
326static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
327 void *object) {}
328static inline void kasan_poison_object_data(struct kmem_cache *cache,
329 void *object) {}
Andrey Konovalov01165232018-12-28 00:29:37 -0800330static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
331 const void *object)
332{
333 return (void *)object;
334}
Andrey Konovalov24690d72021-03-18 17:01:41 +1100335static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
Andrey Konovalov01165232018-12-28 00:29:37 -0800336{
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800337 return false;
Andrey Konovalov01165232018-12-28 00:29:37 -0800338}
Andrey Konovalov54c022fd2021-02-12 17:14:50 +1100339static inline void kasan_kfree_large(void *ptr) {}
Andrey Konovalov3cd65f52021-02-03 15:35:05 +1100340static inline void kasan_slab_free_mempool(void *ptr) {}
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800341static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
Andrey Konovalov5a7af112021-03-18 17:01:41 +1100342 gfp_t flags, bool init)
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800343{
344 return object;
345}
Andrey Konovalov01165232018-12-28 00:29:37 -0800346static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
347 size_t size, gfp_t flags)
348{
349 return (void *)object;
350}
Andrey Konovalov4e35a812020-12-22 12:03:10 -0800351static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
352{
353 return (void *)ptr;
354}
Andrey Konovalov01165232018-12-28 00:29:37 -0800355static inline void *kasan_krealloc(const void *object, size_t new_size,
356 gfp_t flags)
357{
358 return (void *)object;
359}
Andrey Konovalov696574e2021-02-03 15:35:05 +1100360static inline bool kasan_check_byte(const void *address)
361{
362 return true;
363}
Andrey Ryabinin9b75a862016-06-24 14:49:34 -0700364
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800365#endif /* CONFIG_KASAN */
366
Walter Wu86a1ff22021-03-18 17:01:39 +1100367#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
Andrey Konovalov36c15ef2020-12-22 12:02:42 -0800368void kasan_unpoison_task_stack(struct task_struct *task);
369#else
370static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
371#endif
372
Andrey Konovalov2bd926b2018-12-28 00:29:53 -0800373#ifdef CONFIG_KASAN_GENERIC
374
375void kasan_cache_shrink(struct kmem_cache *cache);
376void kasan_cache_shutdown(struct kmem_cache *cache);
Walter Wu26e760c2020-08-06 23:24:35 -0700377void kasan_record_aux_stack(void *ptr);
Andrey Konovalov2bd926b2018-12-28 00:29:53 -0800378
379#else /* CONFIG_KASAN_GENERIC */
380
381static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
382static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
Walter Wu26e760c2020-08-06 23:24:35 -0700383static inline void kasan_record_aux_stack(void *ptr) {}
Andrey Konovalov2bd926b2018-12-28 00:29:53 -0800384
385#endif /* CONFIG_KASAN_GENERIC */
386
Andrey Konovalov11167162020-12-22 12:02:10 -0800387#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
Andrey Konovalov3c9e3aa2018-12-28 00:30:16 -0800388
Andrey Konovalov1e8a05e2020-12-22 12:02:52 -0800389static inline void *kasan_reset_tag(const void *addr)
390{
391 return (void *)arch_kasan_reset_tag(addr);
392}
Andrey Konovalov3c9e3aa2018-12-28 00:30:16 -0800393
Vincenzo Frascinoc648ed52021-02-04 18:32:49 -0800394/**
395 * kasan_report - print a report about a bad memory access detected by KASAN
396 * @addr: address of the bad access
397 * @size: size of the bad access
398 * @is_write: whether the bad access is a write or a read
399 * @ip: instruction pointer for the accessibility check or the bad access itself
400 */
Walter Wu8cceeff2020-04-01 21:09:37 -0700401bool kasan_report(unsigned long addr, size_t size,
Andrey Konovalov41eea9c2018-12-28 00:30:54 -0800402 bool is_write, unsigned long ip);
403
Andrey Konovalov11167162020-12-22 12:02:10 -0800404#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
Andrey Konovalov3c9e3aa2018-12-28 00:30:16 -0800405
406static inline void *kasan_reset_tag(const void *addr)
407{
408 return (void *)addr;
409}
410
Andrey Konovalov11167162020-12-22 12:02:10 -0800411#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
412
Vincenzo Frascino9a928af2021-03-15 13:20:14 +0000413#ifdef CONFIG_KASAN_HW_TAGS
414
415void kasan_report_async(void);
416
417#endif /* CONFIG_KASAN_HW_TAGS */
418
Andrey Konovalov11167162020-12-22 12:02:10 -0800419#ifdef CONFIG_KASAN_SW_TAGS
420void __init kasan_init_sw_tags(void);
421#else
422static inline void kasan_init_sw_tags(void) { }
423#endif
424
425#ifdef CONFIG_KASAN_HW_TAGS
426void kasan_init_hw_tags_cpu(void);
427void __init kasan_init_hw_tags(void);
428#else
429static inline void kasan_init_hw_tags_cpu(void) { }
430static inline void kasan_init_hw_tags(void) { }
431#endif
Andrey Konovalov080eb832018-12-28 00:30:09 -0800432
Daniel Axtens3c5c3cf2019-11-30 17:54:50 -0800433#ifdef CONFIG_KASAN_VMALLOC
Andrey Konovalov24877122020-12-22 12:00:14 -0800434
Andrey Ryabinind98c9e82019-12-17 20:51:38 -0800435int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
436void kasan_poison_vmalloc(const void *start, unsigned long size);
437void kasan_unpoison_vmalloc(const void *start, unsigned long size);
Daniel Axtens3c5c3cf2019-11-30 17:54:50 -0800438void kasan_release_vmalloc(unsigned long start, unsigned long end,
439 unsigned long free_region_start,
440 unsigned long free_region_end);
Andrey Konovalov24877122020-12-22 12:00:14 -0800441
442#else /* CONFIG_KASAN_VMALLOC */
443
Andrey Ryabinind98c9e82019-12-17 20:51:38 -0800444static inline int kasan_populate_vmalloc(unsigned long start,
445 unsigned long size)
Daniel Axtens3c5c3cf2019-11-30 17:54:50 -0800446{
447 return 0;
448}
449
Andrey Ryabinind98c9e82019-12-17 20:51:38 -0800450static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
451{ }
452static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
453{ }
Daniel Axtens3c5c3cf2019-11-30 17:54:50 -0800454static inline void kasan_release_vmalloc(unsigned long start,
455 unsigned long end,
456 unsigned long free_region_start,
457 unsigned long free_region_end) {}
Andrey Konovalov24877122020-12-22 12:00:14 -0800458
459#endif /* CONFIG_KASAN_VMALLOC */
460
Andrey Konovalov67e914b2020-12-22 12:02:06 -0800461#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
462 !defined(CONFIG_KASAN_VMALLOC)
Andrey Konovalov24877122020-12-22 12:00:14 -0800463
464/*
465 * These functions provide a special case to support backing module
466 * allocations with real shadow memory. With KASAN vmalloc, the special
467 * case is unnecessary, as the work is handled in the generic case.
468 */
469int kasan_module_alloc(void *addr, size_t size);
470void kasan_free_shadow(const struct vm_struct *vm);
471
Andrey Konovalov67e914b2020-12-22 12:02:06 -0800472#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
Andrey Konovalov24877122020-12-22 12:00:14 -0800473
474static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
475static inline void kasan_free_shadow(const struct vm_struct *vm) {}
476
Andrey Konovalov67e914b2020-12-22 12:02:06 -0800477#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
Daniel Axtens3c5c3cf2019-11-30 17:54:50 -0800478
Jann Horn2f004ee2019-12-19 00:11:50 +0100479#ifdef CONFIG_KASAN_INLINE
480void kasan_non_canonical_hook(unsigned long addr);
481#else /* CONFIG_KASAN_INLINE */
482static inline void kasan_non_canonical_hook(unsigned long addr) { }
483#endif /* CONFIG_KASAN_INLINE */
484
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800485#endif /* LINUX_KASAN_H */