blob: 92196562687b61aa60ed0c047b29855b87bc5d89 [file] [log] [blame]
Andrey Konovalove886bf92018-12-28 00:31:14 -08001// SPDX-License-Identifier: GPL-2.0
Andrey Konovalovbffa9862018-12-28 00:29:45 -08002/*
Andrey Konovalovbb359db2020-12-22 12:00:32 -08003 * This file contains common KASAN code.
Andrey Konovalovbffa9862018-12-28 00:29:45 -08004 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
Andrey Konovalovbffa9862018-12-28 00:29:45 -080010 */
11
12#include <linux/export.h>
Andrey Konovalovbffa9862018-12-28 00:29:45 -080013#include <linux/init.h>
14#include <linux/kasan.h>
15#include <linux/kernel.h>
Andrey Konovalovbffa9862018-12-28 00:29:45 -080016#include <linux/linkage.h>
17#include <linux/memblock.h>
18#include <linux/memory.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/printk.h>
22#include <linux/sched.h>
23#include <linux/sched/task_stack.h>
24#include <linux/slab.h>
25#include <linux/stacktrace.h>
26#include <linux/string.h>
27#include <linux/types.h>
Andrey Konovalovbffa9862018-12-28 00:29:45 -080028#include <linux/bug.h>
29
30#include "kasan.h"
31#include "../slab.h"
32
Marco Elver7594b342021-11-05 13:35:43 -070033depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
Andrey Konovalovbffa9862018-12-28 00:29:45 -080034{
35 unsigned long entries[KASAN_STACK_DEPTH];
Thomas Gleixner880e0492019-04-25 11:45:02 +020036 unsigned int nr_entries;
Andrey Konovalovbffa9862018-12-28 00:29:45 -080037
Thomas Gleixner880e0492019-04-25 11:45:02 +020038 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
Marco Elver7594b342021-11-05 13:35:43 -070039 return __stack_depot_save(entries, nr_entries, flags, can_alloc);
Andrey Konovalovbffa9862018-12-28 00:29:45 -080040}
41
Walter Wue4b78182020-08-06 23:24:39 -070042void kasan_set_track(struct kasan_track *track, gfp_t flags)
Andrey Konovalovbffa9862018-12-28 00:29:45 -080043{
44 track->pid = current->pid;
Marco Elver7594b342021-11-05 13:35:43 -070045 track->stack = kasan_save_stack(flags, true);
Andrey Konovalovbffa9862018-12-28 00:29:45 -080046}
47
Andrey Konovalovd73b4932020-12-22 12:00:56 -080048#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
Andrey Konovalovbffa9862018-12-28 00:29:45 -080049void kasan_enable_current(void)
50{
51 current->kasan_depth++;
52}
Oliver Glitta1f9f78b2021-06-28 19:34:33 -070053EXPORT_SYMBOL(kasan_enable_current);
Andrey Konovalovbffa9862018-12-28 00:29:45 -080054
55void kasan_disable_current(void)
56{
57 current->kasan_depth--;
58}
Oliver Glitta1f9f78b2021-06-28 19:34:33 -070059EXPORT_SYMBOL(kasan_disable_current);
60
Andrey Konovalovd73b4932020-12-22 12:00:56 -080061#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
Andrey Konovalovbffa9862018-12-28 00:29:45 -080062
Andrey Konovalov34303242020-12-22 12:03:10 -080063void __kasan_unpoison_range(const void *address, size_t size)
Andrey Konovalovcebd0eb2020-12-22 12:00:21 -080064{
Andrey Konovalovaa5c2192021-04-29 22:59:59 -070065 kasan_unpoison(address, size, false);
Andrey Konovalovcebd0eb2020-12-22 12:00:21 -080066}
67
Walter Wu02c58772021-04-16 15:46:00 -070068#ifdef CONFIG_KASAN_STACK
Andrey Konovalovbffa9862018-12-28 00:29:45 -080069/* Unpoison the entire stack for a task. */
70void kasan_unpoison_task_stack(struct task_struct *task)
71{
Andrey Konovalov77f57c92020-12-22 12:02:49 -080072 void *base = task_stack_page(task);
73
Andrey Konovalovaa5c2192021-04-29 22:59:59 -070074 kasan_unpoison(base, THREAD_SIZE, false);
Andrey Konovalovbffa9862018-12-28 00:29:45 -080075}
76
77/* Unpoison the stack for the current task beyond a watermark sp value. */
78asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
79{
80 /*
81 * Calculate the task stack base address. Avoid using 'current'
82 * because this function is called by early resume code which hasn't
83 * yet set up the percpu register (%gs).
84 */
85 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
86
Andrey Konovalovaa5c2192021-04-29 22:59:59 -070087 kasan_unpoison(base, watermark - base, false);
Andrey Konovalovbffa9862018-12-28 00:29:45 -080088}
Andrey Konovalovd56a9ef2020-12-22 12:02:42 -080089#endif /* CONFIG_KASAN_STACK */
Andrey Konovalovbffa9862018-12-28 00:29:45 -080090
Andrey Konovalove86f8b092020-12-22 12:03:31 -080091/*
92 * Only allow cache merging when stack collection is disabled and no metadata
93 * is present.
94 */
95slab_flags_t __kasan_never_merge(void)
96{
97 if (kasan_stack_collection_enabled())
98 return SLAB_KASAN;
99 return 0;
100}
101
Peter Collingbourne7a3b8352021-06-02 16:52:28 -0700102void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800103{
Andrey Konovalov2813b9c2018-12-28 00:30:57 -0800104 u8 tag;
105 unsigned long i;
106
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800107 if (unlikely(PageHighMem(page)))
108 return;
Andrey Konovalov2813b9c2018-12-28 00:30:57 -0800109
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800110 tag = kasan_random_tag();
Andrey Konovalov2813b9c2018-12-28 00:30:57 -0800111 for (i = 0; i < (1 << order); i++)
112 page_kasan_tag_set(page + i, tag);
Andrey Konovalov1bb5eab2021-04-29 23:00:02 -0700113 kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800114}
115
Peter Collingbourne7a3b8352021-06-02 16:52:28 -0700116void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800117{
118 if (likely(!PageHighMem(page)))
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800119 kasan_poison(page_address(page), PAGE_SIZE << order,
Andrey Konovalov1bb5eab2021-04-29 23:00:02 -0700120 KASAN_FREE_PAGE, init);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800121}
122
123/*
124 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
125 * For larger allocations larger redzones are used.
126 */
127static inline unsigned int optimal_redzone(unsigned int object_size)
128{
129 return
130 object_size <= 64 - 16 ? 16 :
131 object_size <= 128 - 32 ? 32 :
132 object_size <= 512 - 64 ? 64 :
133 object_size <= 4096 - 128 ? 128 :
134 object_size <= (1 << 14) - 256 ? 256 :
135 object_size <= (1 << 15) - 512 ? 512 :
136 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
137}
138
Andrey Konovalov34303242020-12-22 12:03:10 -0800139void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
140 slab_flags_t *flags)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800141{
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800142 unsigned int ok_size;
143 unsigned int optimal_size;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800144
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800145 /*
146 * SLAB_KASAN is used to mark caches as ones that are sanitized by
147 * KASAN. Currently this flag is used in two places:
148 * 1. In slab_ksize() when calculating the size of the accessible
149 * memory within the object.
150 * 2. In slab_common.c to prevent merging of sanitized caches.
151 */
152 *flags |= SLAB_KASAN;
153
154 if (!kasan_stack_collection_enabled())
Andrey Konovalov8028caa2020-12-22 12:03:06 -0800155 return;
Andrey Konovalov8028caa2020-12-22 12:03:06 -0800156
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800157 ok_size = *size;
158
159 /* Add alloc meta into redzone. */
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800160 cache->kasan_info.alloc_meta_offset = *size;
161 *size += sizeof(struct kasan_alloc_meta);
162
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800163 /*
164 * If alloc meta doesn't fit, don't add it.
165 * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
166 * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
167 * larger sizes.
168 */
169 if (*size > KMALLOC_MAX_SIZE) {
170 cache->kasan_info.alloc_meta_offset = 0;
171 *size = ok_size;
172 /* Continue, since free meta might still fit. */
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800173 }
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800174
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800175 /* Only the generic mode uses free meta or flexible redzones. */
176 if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
177 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800178 return;
179 }
180
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800181 /*
182 * Add free meta into redzone when it's not possible to store
183 * it in the object. This is the case when:
184 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
185 * be touched after it was freed, or
186 * 2. Object has a constructor, which means it's expected to
187 * retain its content until the next allocation, or
188 * 3. Object is too small.
189 * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
190 */
191 if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
192 cache->object_size < sizeof(struct kasan_free_meta)) {
193 ok_size = *size;
194
195 cache->kasan_info.free_meta_offset = *size;
196 *size += sizeof(struct kasan_free_meta);
197
198 /* If free meta doesn't fit, don't add it. */
199 if (*size > KMALLOC_MAX_SIZE) {
200 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
201 *size = ok_size;
202 }
203 }
204
205 /* Calculate size with optimal redzone. */
206 optimal_size = cache->object_size + optimal_redzone(cache->object_size);
207 /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
208 if (optimal_size > KMALLOC_MAX_SIZE)
209 optimal_size = KMALLOC_MAX_SIZE;
210 /* Use optimal size if the size with added metas is not large enough. */
211 if (*size < optimal_size)
212 *size = optimal_size;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800213}
214
Andrey Konovalov92850132021-02-25 17:19:55 -0800215void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
216{
217 cache->kasan_info.is_kmalloc = true;
218}
219
Andrey Konovalov34303242020-12-22 12:03:10 -0800220size_t __kasan_metadata_size(struct kmem_cache *cache)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800221{
Andrey Konovalov8028caa2020-12-22 12:03:06 -0800222 if (!kasan_stack_collection_enabled())
223 return 0;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800224 return (cache->kasan_info.alloc_meta_offset ?
225 sizeof(struct kasan_alloc_meta) : 0) +
226 (cache->kasan_info.free_meta_offset ?
227 sizeof(struct kasan_free_meta) : 0);
228}
229
Andrey Konovalov64767922020-12-22 12:02:34 -0800230struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
231 const void *object)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800232{
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800233 if (!cache->kasan_info.alloc_meta_offset)
234 return NULL;
Andrey Konovalovc0054c52020-12-22 12:02:52 -0800235 return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800236}
237
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800238#ifdef CONFIG_KASAN_GENERIC
Andrey Konovalov64767922020-12-22 12:02:34 -0800239struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
240 const void *object)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800241{
242 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800243 if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
244 return NULL;
Andrey Konovalovc0054c52020-12-22 12:02:52 -0800245 return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800246}
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800247#endif
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800248
Matthew Wilcox (Oracle)6e48a962021-10-04 14:46:46 +0100249void __kasan_poison_slab(struct slab *slab)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800250{
Matthew Wilcox (Oracle)6e48a962021-10-04 14:46:46 +0100251 struct page *page = slab_page(slab);
Andrey Konovalov2813b9c2018-12-28 00:30:57 -0800252 unsigned long i;
253
Matthew Wilcox (Oracle)d8c65462019-09-23 15:34:30 -0700254 for (i = 0; i < compound_nr(page); i++)
Andrey Konovalov2813b9c2018-12-28 00:30:57 -0800255 page_kasan_tag_reset(page + i);
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800256 kasan_poison(page_address(page), page_size(page),
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700257 KASAN_KMALLOC_REDZONE, false);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800258}
259
Andrey Konovalov34303242020-12-22 12:03:10 -0800260void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800261{
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700262 kasan_unpoison(object, cache->object_size, false);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800263}
264
Andrey Konovalov34303242020-12-22 12:03:10 -0800265void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800266{
Andrey Konovalovcde8a7e2021-02-25 17:20:27 -0800267 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700268 KASAN_KMALLOC_REDZONE, false);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800269}
270
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800271/*
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800272 * This function assigns a tag to an object considering the following:
273 * 1. A cache might have a constructor, which might save a pointer to a slab
274 * object somewhere (e.g. in the object itself). We preassign a tag for
275 * each object in caches with constructors during slab creation and reuse
276 * the same tag each time a particular object is allocated.
277 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
278 * accessed after being freed. We preassign tags for objects in these
279 * caches as well.
280 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
281 * is stored as an array of indexes instead of a linked list. Assign tags
282 * based on objects indexes, so that objects that are next to each other
283 * get different tags.
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800284 */
Andrey Konovalovc80a0362021-02-25 17:20:35 -0800285static inline u8 assign_tag(struct kmem_cache *cache,
286 const void *object, bool init)
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800287{
Andrey Konovalov1ef31332020-12-22 12:03:20 -0800288 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
289 return 0xff;
290
Andrey Konovalove1db95b2019-02-20 22:19:01 -0800291 /*
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800292 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
293 * set, assign a tag when the object is being allocated (init == false).
294 */
295 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800296 return init ? KASAN_TAG_KERNEL : kasan_random_tag();
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800297
298 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800299#ifdef CONFIG_SLAB
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800300 /* For SLAB assign tags based on the object index in the freelist. */
Vlastimil Babka40f3bf02021-11-02 15:42:04 +0100301 return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object);
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800302#else
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800303 /*
304 * For SLUB assign a random tag during slab creation, otherwise reuse
305 * the already assigned tag.
306 */
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800307 return init ? kasan_random_tag() : get_tag(object);
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800308#endif
309}
310
Andrey Konovalov34303242020-12-22 12:03:10 -0800311void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
Andrey Konovalov66afc7f2018-12-28 00:31:01 -0800312 const void *object)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800313{
Andrey Konovalov64767922020-12-22 12:02:34 -0800314 struct kasan_alloc_meta *alloc_meta;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800315
Andrey Konovalov8028caa2020-12-22 12:03:06 -0800316 if (kasan_stack_collection_enabled()) {
Andrey Konovalov8028caa2020-12-22 12:03:06 -0800317 alloc_meta = kasan_get_alloc_meta(cache, object);
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800318 if (alloc_meta)
319 __memset(alloc_meta, 0, sizeof(*alloc_meta));
Andrey Konovalov8028caa2020-12-22 12:03:06 -0800320 }
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800321
Andrey Konovalov1ef31332020-12-22 12:03:20 -0800322 /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
Andrey Konovalove2db1a92021-02-25 17:19:59 -0800323 object = set_tag(object, assign_tag(cache, object, true));
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800324
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800325 return (void *)object;
326}
327
Andrey Konovalovd57a9642021-04-29 23:00:09 -0700328static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
329 unsigned long ip, bool quarantine, bool init)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800330{
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800331 u8 tag;
332 void *tagged_object;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800333
Daniel Axtensaf3751f32021-06-28 19:40:42 -0700334 if (!kasan_arch_is_ready())
335 return false;
336
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800337 tag = get_tag(object);
338 tagged_object = object;
Andrey Konovalovc0054c52020-12-22 12:02:52 -0800339 object = kasan_reset_tag(object);
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800340
Alexander Potapenko2b830522021-02-25 17:19:21 -0800341 if (is_kfence_address(object))
342 return false;
343
Vlastimil Babka40f3bf02021-11-02 15:42:04 +0100344 if (unlikely(nearest_obj(cache, virt_to_slab(object), object) !=
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800345 object)) {
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800346 kasan_report_invalid_free(tagged_object, ip);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800347 return true;
348 }
349
350 /* RCU slabs could be legally used after free within the RCU period */
351 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
352 return false;
353
Andrey Konovalov611806b2021-02-24 12:05:50 -0800354 if (!kasan_byte_accessible(tagged_object)) {
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800355 kasan_report_invalid_free(tagged_object, ip);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800356 return true;
357 }
358
Andrey Konovalovcde8a7e2021-02-25 17:20:27 -0800359 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
Andrey Konovalovd57a9642021-04-29 23:00:09 -0700360 KASAN_KMALLOC_FREE, init);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800361
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800362 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800363 return false;
364
Andrey Konovalovdf54b382021-02-25 17:20:07 -0800365 if (kasan_stack_collection_enabled())
366 kasan_set_free_info(cache, object, tag);
Walter Wuae8f06b2019-09-23 15:34:13 -0700367
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800368 return kasan_quarantine_put(cache, object);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800369}
370
Andrey Konovalovd57a9642021-04-29 23:00:09 -0700371bool __kasan_slab_free(struct kmem_cache *cache, void *object,
372 unsigned long ip, bool init)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800373{
Andrey Konovalovd57a9642021-04-29 23:00:09 -0700374 return ____kasan_slab_free(cache, object, ip, true, init);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800375}
376
Andrey Konovalovc80a0362021-02-25 17:20:35 -0800377static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
Andrey Konovalov200072c2021-02-25 17:20:11 -0800378{
379 if (ptr != page_address(virt_to_head_page(ptr))) {
380 kasan_report_invalid_free(ptr, ip);
381 return true;
382 }
383
384 if (!kasan_byte_accessible(ptr)) {
385 kasan_report_invalid_free(ptr, ip);
386 return true;
387 }
388
389 /*
390 * The object will be poisoned by kasan_free_pages() or
391 * kasan_slab_free_mempool().
392 */
393
394 return false;
395}
396
397void __kasan_kfree_large(void *ptr, unsigned long ip)
398{
399 ____kasan_kfree_large(ptr, ip);
400}
401
Andrey Konovaloveeb31602020-12-22 12:03:13 -0800402void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
403{
Matthew Wilcox (Oracle)6e48a962021-10-04 14:46:46 +0100404 struct folio *folio;
Andrey Konovaloveeb31602020-12-22 12:03:13 -0800405
Matthew Wilcox (Oracle)6e48a962021-10-04 14:46:46 +0100406 folio = virt_to_folio(ptr);
Andrey Konovaloveeb31602020-12-22 12:03:13 -0800407
408 /*
409 * Even though this function is only called for kmem_cache_alloc and
410 * kmalloc backed mempool allocations, those allocations can still be
411 * !PageSlab() when the size provided to kmalloc is larger than
412 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
413 */
Matthew Wilcox (Oracle)6e48a962021-10-04 14:46:46 +0100414 if (unlikely(!folio_test_slab(folio))) {
Andrey Konovalov200072c2021-02-25 17:20:11 -0800415 if (____kasan_kfree_large(ptr, ip))
Andrey Konovaloveeb31602020-12-22 12:03:13 -0800416 return;
Matthew Wilcox (Oracle)6e48a962021-10-04 14:46:46 +0100417 kasan_poison(ptr, folio_size(folio), KASAN_FREE_PAGE, false);
Andrey Konovaloveeb31602020-12-22 12:03:13 -0800418 } else {
Matthew Wilcox (Oracle)6e48a962021-10-04 14:46:46 +0100419 struct slab *slab = folio_slab(folio);
420
421 ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
Andrey Konovaloveeb31602020-12-22 12:03:13 -0800422 }
423}
424
Andrey Konovalov92850132021-02-25 17:19:55 -0800425static void set_alloc_info(struct kmem_cache *cache, void *object,
426 gfp_t flags, bool is_kmalloc)
Andrey Konovalov8bb00092020-12-22 12:02:38 -0800427{
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800428 struct kasan_alloc_meta *alloc_meta;
429
Andrey Konovalov92850132021-02-25 17:19:55 -0800430 /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
431 if (cache->kasan_info.is_kmalloc && !is_kmalloc)
432 return;
433
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800434 alloc_meta = kasan_get_alloc_meta(cache, object);
435 if (alloc_meta)
436 kasan_set_track(&alloc_meta->alloc_track, flags);
Andrey Konovalov8bb00092020-12-22 12:02:38 -0800437}
438
Andrey Konovalove2db1a92021-02-25 17:19:59 -0800439void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
Andrey Konovalovda844b72021-04-29 23:00:06 -0700440 void *object, gfp_t flags, bool init)
Andrey Konovalove2db1a92021-02-25 17:19:59 -0800441{
442 u8 tag;
443 void *tagged_object;
444
445 if (gfpflags_allow_blocking(flags))
446 kasan_quarantine_reduce();
447
448 if (unlikely(object == NULL))
449 return NULL;
450
451 if (is_kfence_address(object))
452 return (void *)object;
453
454 /*
455 * Generate and assign random tag for tag-based modes.
456 * Tag is ignored in set_tag() for the generic mode.
457 */
458 tag = assign_tag(cache, object, false);
459 tagged_object = set_tag(object, tag);
460
461 /*
462 * Unpoison the whole object.
463 * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
464 */
Andrey Konovalovda844b72021-04-29 23:00:06 -0700465 kasan_unpoison(tagged_object, cache->object_size, init);
Andrey Konovalove2db1a92021-02-25 17:19:59 -0800466
467 /* Save alloc info (if possible) for non-kmalloc() allocations. */
468 if (kasan_stack_collection_enabled())
469 set_alloc_info(cache, (void *)object, flags, false);
470
471 return tagged_object;
472}
473
Andrey Konovalovc80a0362021-02-25 17:20:35 -0800474static inline void *____kasan_kmalloc(struct kmem_cache *cache,
475 const void *object, size_t size, gfp_t flags)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800476{
477 unsigned long redzone_start;
478 unsigned long redzone_end;
479
480 if (gfpflags_allow_blocking(flags))
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800481 kasan_quarantine_reduce();
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800482
483 if (unlikely(object == NULL))
484 return NULL;
485
Alexander Potapenko2b830522021-02-25 17:19:21 -0800486 if (is_kfence_address(kasan_reset_tag(object)))
487 return (void *)object;
488
Andrey Konovalove2db1a92021-02-25 17:19:59 -0800489 /*
490 * The object has already been unpoisoned by kasan_slab_alloc() for
Andrey Konovalovd12d9ad2021-02-25 17:20:23 -0800491 * kmalloc() or by kasan_krealloc() for krealloc().
Andrey Konovalove2db1a92021-02-25 17:19:59 -0800492 */
493
494 /*
495 * The redzone has byte-level precision for the generic mode.
496 * Partially poison the last object granule to cover the unaligned
497 * part of the redzone.
498 */
499 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
500 kasan_poison_last_granule((void *)object, size);
501
502 /* Poison the aligned part of the redzone. */
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800503 redzone_start = round_up((unsigned long)(object + size),
Andrey Konovalov1f600622020-12-22 12:00:24 -0800504 KASAN_GRANULE_SIZE);
Andrey Konovalovcde8a7e2021-02-25 17:20:27 -0800505 redzone_end = round_up((unsigned long)(object + cache->object_size),
506 KASAN_GRANULE_SIZE);
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800507 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700508 KASAN_KMALLOC_REDZONE, false);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800509
Andrey Konovalove2db1a92021-02-25 17:19:59 -0800510 /*
511 * Save alloc info (if possible) for kmalloc() allocations.
512 * This also rewrites the alloc info when called from kasan_krealloc().
513 */
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800514 if (kasan_stack_collection_enabled())
Andrey Konovalove2db1a92021-02-25 17:19:59 -0800515 set_alloc_info(cache, (void *)object, flags, true);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800516
Andrey Konovalove2db1a92021-02-25 17:19:59 -0800517 /* Keep the tag that was set by kasan_slab_alloc(). */
518 return (void *)object;
Andrey Konovalove1db95b2019-02-20 22:19:01 -0800519}
520
Andrey Konovalov34303242020-12-22 12:03:10 -0800521void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
522 size_t size, gfp_t flags)
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800523{
Andrey Konovalove2db1a92021-02-25 17:19:59 -0800524 return ____kasan_kmalloc(cache, object, size, flags);
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800525}
Andrey Konovalov34303242020-12-22 12:03:10 -0800526EXPORT_SYMBOL(__kasan_kmalloc);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800527
Andrey Konovalov34303242020-12-22 12:03:10 -0800528void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
Andrey Konovalov66afc7f2018-12-28 00:31:01 -0800529 gfp_t flags)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800530{
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800531 unsigned long redzone_start;
532 unsigned long redzone_end;
533
534 if (gfpflags_allow_blocking(flags))
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800535 kasan_quarantine_reduce();
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800536
537 if (unlikely(ptr == NULL))
538 return NULL;
539
Andrey Konovalov43a219c2021-02-25 17:20:03 -0800540 /*
541 * The object has already been unpoisoned by kasan_alloc_pages() for
Andrey Konovalovd12d9ad2021-02-25 17:20:23 -0800542 * alloc_pages() or by kasan_krealloc() for krealloc().
Andrey Konovalov43a219c2021-02-25 17:20:03 -0800543 */
544
545 /*
546 * The redzone has byte-level precision for the generic mode.
547 * Partially poison the last object granule to cover the unaligned
548 * part of the redzone.
549 */
550 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
551 kasan_poison_last_granule(ptr, size);
552
553 /* Poison the aligned part of the redzone. */
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800554 redzone_start = round_up((unsigned long)(ptr + size),
Andrey Konovalov1f600622020-12-22 12:00:24 -0800555 KASAN_GRANULE_SIZE);
Andrey Konovalov43a219c2021-02-25 17:20:03 -0800556 redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800557 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700558 KASAN_PAGE_REDZONE, false);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800559
560 return (void *)ptr;
561}
562
Andrey Konovalov34303242020-12-22 12:03:10 -0800563void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800564{
Matthew Wilcox (Oracle)6e48a962021-10-04 14:46:46 +0100565 struct slab *slab;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800566
567 if (unlikely(object == ZERO_SIZE_PTR))
568 return (void *)object;
569
Andrey Konovalovd12d9ad2021-02-25 17:20:23 -0800570 /*
571 * Unpoison the object's data.
572 * Part of it might already have been unpoisoned, but it's unknown
573 * how big that part is.
574 */
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700575 kasan_unpoison(object, size, false);
Andrey Konovalovd12d9ad2021-02-25 17:20:23 -0800576
Matthew Wilcox (Oracle)6e48a962021-10-04 14:46:46 +0100577 slab = virt_to_slab(object);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800578
Andrey Konovalovd12d9ad2021-02-25 17:20:23 -0800579 /* Piggy-back on kmalloc() instrumentation to poison the redzone. */
Matthew Wilcox (Oracle)6e48a962021-10-04 14:46:46 +0100580 if (unlikely(!slab))
Andrey Konovalov34303242020-12-22 12:03:10 -0800581 return __kasan_kmalloc_large(object, size, flags);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800582 else
Matthew Wilcox (Oracle)6e48a962021-10-04 14:46:46 +0100583 return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800584}
585
Andrey Konovalov611806b2021-02-24 12:05:50 -0800586bool __kasan_check_byte(const void *address, unsigned long ip)
587{
588 if (!kasan_byte_accessible(address)) {
589 kasan_report((unsigned long)address, 1, false, ip);
590 return false;
591 }
592 return true;
593}