blob: b18189ef3a9274f535324b03e88cb1fb464fc8c0 [file] [log] [blame]
Andrey Konovalove886bf92018-12-28 00:31:14 -08001// SPDX-License-Identifier: GPL-2.0
Andrey Konovalovbffa9862018-12-28 00:29:45 -08002/*
Andrey Konovalovbb359db2020-12-22 12:00:32 -08003 * This file contains common KASAN code.
Andrey Konovalovbffa9862018-12-28 00:29:45 -08004 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
Andrey Konovalovbffa9862018-12-28 00:29:45 -080010 */
11
12#include <linux/export.h>
Andrey Konovalovbffa9862018-12-28 00:29:45 -080013#include <linux/init.h>
14#include <linux/kasan.h>
15#include <linux/kernel.h>
Andrey Konovalovbffa9862018-12-28 00:29:45 -080016#include <linux/linkage.h>
17#include <linux/memblock.h>
18#include <linux/memory.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/printk.h>
22#include <linux/sched.h>
23#include <linux/sched/task_stack.h>
24#include <linux/slab.h>
25#include <linux/stacktrace.h>
26#include <linux/string.h>
27#include <linux/types.h>
Andrey Konovalovbffa9862018-12-28 00:29:45 -080028#include <linux/bug.h>
29
30#include "kasan.h"
31#include "../slab.h"
32
Walter Wu26e760c2020-08-06 23:24:35 -070033depot_stack_handle_t kasan_save_stack(gfp_t flags)
Andrey Konovalovbffa9862018-12-28 00:29:45 -080034{
35 unsigned long entries[KASAN_STACK_DEPTH];
Thomas Gleixner880e0492019-04-25 11:45:02 +020036 unsigned int nr_entries;
Andrey Konovalovbffa9862018-12-28 00:29:45 -080037
Thomas Gleixner880e0492019-04-25 11:45:02 +020038 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
39 nr_entries = filter_irq_stacks(entries, nr_entries);
40 return stack_depot_save(entries, nr_entries, flags);
Andrey Konovalovbffa9862018-12-28 00:29:45 -080041}
42
Walter Wue4b78182020-08-06 23:24:39 -070043void kasan_set_track(struct kasan_track *track, gfp_t flags)
Andrey Konovalovbffa9862018-12-28 00:29:45 -080044{
45 track->pid = current->pid;
Walter Wu26e760c2020-08-06 23:24:35 -070046 track->stack = kasan_save_stack(flags);
Andrey Konovalovbffa9862018-12-28 00:29:45 -080047}
48
Andrey Konovalovd73b4932020-12-22 12:00:56 -080049#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
Andrey Konovalovbffa9862018-12-28 00:29:45 -080050void kasan_enable_current(void)
51{
52 current->kasan_depth++;
53}
54
55void kasan_disable_current(void)
56{
57 current->kasan_depth--;
58}
Andrey Konovalovd73b4932020-12-22 12:00:56 -080059#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
Andrey Konovalovbffa9862018-12-28 00:29:45 -080060
Andrey Konovalov34303242020-12-22 12:03:10 -080061void __kasan_unpoison_range(const void *address, size_t size)
Andrey Konovalovcebd0eb2020-12-22 12:00:21 -080062{
Andrey Konovalovf00748b2021-02-24 12:05:05 -080063 kasan_unpoison(address, size);
Andrey Konovalovcebd0eb2020-12-22 12:00:21 -080064}
65
Andrey Konovalovd56a9ef2020-12-22 12:02:42 -080066#if CONFIG_KASAN_STACK
Andrey Konovalovbffa9862018-12-28 00:29:45 -080067/* Unpoison the entire stack for a task. */
68void kasan_unpoison_task_stack(struct task_struct *task)
69{
Andrey Konovalov77f57c92020-12-22 12:02:49 -080070 void *base = task_stack_page(task);
71
Andrey Konovalovf00748b2021-02-24 12:05:05 -080072 kasan_unpoison(base, THREAD_SIZE);
Andrey Konovalovbffa9862018-12-28 00:29:45 -080073}
74
75/* Unpoison the stack for the current task beyond a watermark sp value. */
76asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
77{
78 /*
79 * Calculate the task stack base address. Avoid using 'current'
80 * because this function is called by early resume code which hasn't
81 * yet set up the percpu register (%gs).
82 */
83 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
84
Andrey Konovalovf00748b2021-02-24 12:05:05 -080085 kasan_unpoison(base, watermark - base);
Andrey Konovalovbffa9862018-12-28 00:29:45 -080086}
Andrey Konovalovd56a9ef2020-12-22 12:02:42 -080087#endif /* CONFIG_KASAN_STACK */
Andrey Konovalovbffa9862018-12-28 00:29:45 -080088
Andrey Konovalove86f8b092020-12-22 12:03:31 -080089/*
90 * Only allow cache merging when stack collection is disabled and no metadata
91 * is present.
92 */
93slab_flags_t __kasan_never_merge(void)
94{
95 if (kasan_stack_collection_enabled())
96 return SLAB_KASAN;
97 return 0;
98}
99
Andrey Konovalov34303242020-12-22 12:03:10 -0800100void __kasan_alloc_pages(struct page *page, unsigned int order)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800101{
Andrey Konovalov2813b9c2018-12-28 00:30:57 -0800102 u8 tag;
103 unsigned long i;
104
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800105 if (unlikely(PageHighMem(page)))
106 return;
Andrey Konovalov2813b9c2018-12-28 00:30:57 -0800107
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800108 tag = kasan_random_tag();
Andrey Konovalov2813b9c2018-12-28 00:30:57 -0800109 for (i = 0; i < (1 << order); i++)
110 page_kasan_tag_set(page + i, tag);
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800111 kasan_unpoison(page_address(page), PAGE_SIZE << order);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800112}
113
Andrey Konovalov34303242020-12-22 12:03:10 -0800114void __kasan_free_pages(struct page *page, unsigned int order)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800115{
116 if (likely(!PageHighMem(page)))
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800117 kasan_poison(page_address(page), PAGE_SIZE << order,
118 KASAN_FREE_PAGE);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800119}
120
121/*
122 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
123 * For larger allocations larger redzones are used.
124 */
125static inline unsigned int optimal_redzone(unsigned int object_size)
126{
127 return
128 object_size <= 64 - 16 ? 16 :
129 object_size <= 128 - 32 ? 32 :
130 object_size <= 512 - 64 ? 64 :
131 object_size <= 4096 - 128 ? 128 :
132 object_size <= (1 << 14) - 256 ? 256 :
133 object_size <= (1 << 15) - 512 ? 512 :
134 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
135}
136
Andrey Konovalov34303242020-12-22 12:03:10 -0800137void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
138 slab_flags_t *flags)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800139{
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800140 unsigned int ok_size;
141 unsigned int optimal_size;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800142
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800143 /*
144 * SLAB_KASAN is used to mark caches as ones that are sanitized by
145 * KASAN. Currently this flag is used in two places:
146 * 1. In slab_ksize() when calculating the size of the accessible
147 * memory within the object.
148 * 2. In slab_common.c to prevent merging of sanitized caches.
149 */
150 *flags |= SLAB_KASAN;
151
152 if (!kasan_stack_collection_enabled())
Andrey Konovalov8028caa2020-12-22 12:03:06 -0800153 return;
Andrey Konovalov8028caa2020-12-22 12:03:06 -0800154
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800155 ok_size = *size;
156
157 /* Add alloc meta into redzone. */
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800158 cache->kasan_info.alloc_meta_offset = *size;
159 *size += sizeof(struct kasan_alloc_meta);
160
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800161 /*
162 * If alloc meta doesn't fit, don't add it.
163 * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
164 * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
165 * larger sizes.
166 */
167 if (*size > KMALLOC_MAX_SIZE) {
168 cache->kasan_info.alloc_meta_offset = 0;
169 *size = ok_size;
170 /* Continue, since free meta might still fit. */
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800171 }
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800172
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800173 /* Only the generic mode uses free meta or flexible redzones. */
174 if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
175 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800176 return;
177 }
178
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800179 /*
180 * Add free meta into redzone when it's not possible to store
181 * it in the object. This is the case when:
182 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
183 * be touched after it was freed, or
184 * 2. Object has a constructor, which means it's expected to
185 * retain its content until the next allocation, or
186 * 3. Object is too small.
187 * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
188 */
189 if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
190 cache->object_size < sizeof(struct kasan_free_meta)) {
191 ok_size = *size;
192
193 cache->kasan_info.free_meta_offset = *size;
194 *size += sizeof(struct kasan_free_meta);
195
196 /* If free meta doesn't fit, don't add it. */
197 if (*size > KMALLOC_MAX_SIZE) {
198 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
199 *size = ok_size;
200 }
201 }
202
203 /* Calculate size with optimal redzone. */
204 optimal_size = cache->object_size + optimal_redzone(cache->object_size);
205 /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
206 if (optimal_size > KMALLOC_MAX_SIZE)
207 optimal_size = KMALLOC_MAX_SIZE;
208 /* Use optimal size if the size with added metas is not large enough. */
209 if (*size < optimal_size)
210 *size = optimal_size;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800211}
212
Andrey Konovalov34303242020-12-22 12:03:10 -0800213size_t __kasan_metadata_size(struct kmem_cache *cache)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800214{
Andrey Konovalov8028caa2020-12-22 12:03:06 -0800215 if (!kasan_stack_collection_enabled())
216 return 0;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800217 return (cache->kasan_info.alloc_meta_offset ?
218 sizeof(struct kasan_alloc_meta) : 0) +
219 (cache->kasan_info.free_meta_offset ?
220 sizeof(struct kasan_free_meta) : 0);
221}
222
Andrey Konovalov64767922020-12-22 12:02:34 -0800223struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
224 const void *object)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800225{
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800226 if (!cache->kasan_info.alloc_meta_offset)
227 return NULL;
Andrey Konovalovc0054c52020-12-22 12:02:52 -0800228 return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800229}
230
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800231#ifdef CONFIG_KASAN_GENERIC
Andrey Konovalov64767922020-12-22 12:02:34 -0800232struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
233 const void *object)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800234{
235 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800236 if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
237 return NULL;
Andrey Konovalovc0054c52020-12-22 12:02:52 -0800238 return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800239}
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800240#endif
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800241
Andrey Konovalov34303242020-12-22 12:03:10 -0800242void __kasan_poison_slab(struct page *page)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800243{
Andrey Konovalov2813b9c2018-12-28 00:30:57 -0800244 unsigned long i;
245
Matthew Wilcox (Oracle)d8c65462019-09-23 15:34:30 -0700246 for (i = 0; i < compound_nr(page); i++)
Andrey Konovalov2813b9c2018-12-28 00:30:57 -0800247 page_kasan_tag_reset(page + i);
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800248 kasan_poison(page_address(page), page_size(page),
Andrey Konovalovcebd0eb2020-12-22 12:00:21 -0800249 KASAN_KMALLOC_REDZONE);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800250}
251
Andrey Konovalov34303242020-12-22 12:03:10 -0800252void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800253{
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800254 kasan_unpoison(object, cache->object_size);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800255}
256
Andrey Konovalov34303242020-12-22 12:03:10 -0800257void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800258{
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800259 kasan_poison(object, cache->object_size, KASAN_KMALLOC_REDZONE);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800260}
261
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800262/*
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800263 * This function assigns a tag to an object considering the following:
264 * 1. A cache might have a constructor, which might save a pointer to a slab
265 * object somewhere (e.g. in the object itself). We preassign a tag for
266 * each object in caches with constructors during slab creation and reuse
267 * the same tag each time a particular object is allocated.
268 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
269 * accessed after being freed. We preassign tags for objects in these
270 * caches as well.
271 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
272 * is stored as an array of indexes instead of a linked list. Assign tags
273 * based on objects indexes, so that objects that are next to each other
274 * get different tags.
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800275 */
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800276static u8 assign_tag(struct kmem_cache *cache, const void *object,
Andrey Konovalove1db95b2019-02-20 22:19:01 -0800277 bool init, bool keep_tag)
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800278{
Andrey Konovalov1ef31332020-12-22 12:03:20 -0800279 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
280 return 0xff;
281
Andrey Konovalove1db95b2019-02-20 22:19:01 -0800282 /*
283 * 1. When an object is kmalloc()'ed, two hooks are called:
284 * kasan_slab_alloc() and kasan_kmalloc(). We assign the
285 * tag only in the first one.
286 * 2. We reuse the same tag for krealloc'ed objects.
287 */
288 if (keep_tag)
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800289 return get_tag(object);
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800290
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800291 /*
292 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
293 * set, assign a tag when the object is being allocated (init == false).
294 */
295 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800296 return init ? KASAN_TAG_KERNEL : kasan_random_tag();
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800297
298 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800299#ifdef CONFIG_SLAB
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800300 /* For SLAB assign tags based on the object index in the freelist. */
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800301 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
302#else
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800303 /*
304 * For SLUB assign a random tag during slab creation, otherwise reuse
305 * the already assigned tag.
306 */
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800307 return init ? kasan_random_tag() : get_tag(object);
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800308#endif
309}
310
Andrey Konovalov34303242020-12-22 12:03:10 -0800311void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
Andrey Konovalov66afc7f2018-12-28 00:31:01 -0800312 const void *object)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800313{
Andrey Konovalov64767922020-12-22 12:02:34 -0800314 struct kasan_alloc_meta *alloc_meta;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800315
Andrey Konovalov8028caa2020-12-22 12:03:06 -0800316 if (kasan_stack_collection_enabled()) {
Andrey Konovalov8028caa2020-12-22 12:03:06 -0800317 alloc_meta = kasan_get_alloc_meta(cache, object);
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800318 if (alloc_meta)
319 __memset(alloc_meta, 0, sizeof(*alloc_meta));
Andrey Konovalov8028caa2020-12-22 12:03:06 -0800320 }
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800321
Andrey Konovalov1ef31332020-12-22 12:03:20 -0800322 /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
323 object = set_tag(object, assign_tag(cache, object, true, false));
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800324
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800325 return (void *)object;
326}
327
Andrey Konovalov34303242020-12-22 12:03:10 -0800328static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800329 unsigned long ip, bool quarantine)
330{
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800331 u8 tag;
332 void *tagged_object;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800333
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800334 tag = get_tag(object);
335 tagged_object = object;
Andrey Konovalovc0054c52020-12-22 12:02:52 -0800336 object = kasan_reset_tag(object);
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800337
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800338 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
339 object)) {
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800340 kasan_report_invalid_free(tagged_object, ip);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800341 return true;
342 }
343
344 /* RCU slabs could be legally used after free within the RCU period */
345 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
346 return false;
347
Andrey Konovalov611806b2021-02-24 12:05:50 -0800348 if (!kasan_byte_accessible(tagged_object)) {
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800349 kasan_report_invalid_free(tagged_object, ip);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800350 return true;
351 }
352
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800353 kasan_poison(object, cache->object_size, KASAN_KMALLOC_FREE);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800354
Andrey Konovalov8028caa2020-12-22 12:03:06 -0800355 if (!kasan_stack_collection_enabled())
356 return false;
357
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800358 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800359 return false;
360
Walter Wuae8f06b2019-09-23 15:34:13 -0700361 kasan_set_free_info(cache, object, tag);
362
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800363 return kasan_quarantine_put(cache, object);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800364}
365
Andrey Konovalov34303242020-12-22 12:03:10 -0800366bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800367{
Andrey Konovalov34303242020-12-22 12:03:10 -0800368 return ____kasan_slab_free(cache, object, ip, true);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800369}
370
Andrey Konovaloveeb31602020-12-22 12:03:13 -0800371void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
372{
373 struct page *page;
374
375 page = virt_to_head_page(ptr);
376
377 /*
378 * Even though this function is only called for kmem_cache_alloc and
379 * kmalloc backed mempool allocations, those allocations can still be
380 * !PageSlab() when the size provided to kmalloc is larger than
381 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
382 */
383 if (unlikely(!PageSlab(page))) {
384 if (ptr != page_address(page)) {
385 kasan_report_invalid_free(ptr, ip);
386 return;
387 }
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800388 kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE);
Andrey Konovaloveeb31602020-12-22 12:03:13 -0800389 } else {
390 ____kasan_slab_free(page->slab_cache, ptr, ip, false);
391 }
392}
393
Andrey Konovalov8bb00092020-12-22 12:02:38 -0800394static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
395{
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800396 struct kasan_alloc_meta *alloc_meta;
397
398 alloc_meta = kasan_get_alloc_meta(cache, object);
399 if (alloc_meta)
400 kasan_set_track(&alloc_meta->alloc_track, flags);
Andrey Konovalov8bb00092020-12-22 12:02:38 -0800401}
402
Andrey Konovalov34303242020-12-22 12:03:10 -0800403static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
Andrey Konovalove1db95b2019-02-20 22:19:01 -0800404 size_t size, gfp_t flags, bool keep_tag)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800405{
406 unsigned long redzone_start;
407 unsigned long redzone_end;
Andrey Konovalov1ef31332020-12-22 12:03:20 -0800408 u8 tag;
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800409
410 if (gfpflags_allow_blocking(flags))
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800411 kasan_quarantine_reduce();
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800412
413 if (unlikely(object == NULL))
414 return NULL;
415
416 redzone_start = round_up((unsigned long)(object + size),
Andrey Konovalov1f600622020-12-22 12:00:24 -0800417 KASAN_GRANULE_SIZE);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800418 redzone_end = round_up((unsigned long)object + cache->object_size,
Andrey Konovalov1f600622020-12-22 12:00:24 -0800419 KASAN_GRANULE_SIZE);
Andrey Konovalov1ef31332020-12-22 12:03:20 -0800420 tag = assign_tag(cache, object, false, keep_tag);
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800421
Andrey Konovalov2e903b92020-12-22 12:02:10 -0800422 /* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800423 kasan_unpoison(set_tag(object, tag), size);
424 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
425 KASAN_KMALLOC_REDZONE);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800426
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800427 if (kasan_stack_collection_enabled())
Andrey Konovalov8bb00092020-12-22 12:02:38 -0800428 set_alloc_info(cache, (void *)object, flags);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800429
Andrey Konovalov7f94ffb2018-12-28 00:30:50 -0800430 return set_tag(object, tag);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800431}
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800432
Andrey Konovalov34303242020-12-22 12:03:10 -0800433void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
434 void *object, gfp_t flags)
Andrey Konovalove1db95b2019-02-20 22:19:01 -0800435{
Andrey Konovalov34303242020-12-22 12:03:10 -0800436 return ____kasan_kmalloc(cache, object, cache->object_size, flags, false);
Andrey Konovalove1db95b2019-02-20 22:19:01 -0800437}
438
Andrey Konovalov34303242020-12-22 12:03:10 -0800439void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
440 size_t size, gfp_t flags)
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800441{
Andrey Konovalov34303242020-12-22 12:03:10 -0800442 return ____kasan_kmalloc(cache, object, size, flags, true);
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800443}
Andrey Konovalov34303242020-12-22 12:03:10 -0800444EXPORT_SYMBOL(__kasan_kmalloc);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800445
Andrey Konovalov34303242020-12-22 12:03:10 -0800446void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
Andrey Konovalov66afc7f2018-12-28 00:31:01 -0800447 gfp_t flags)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800448{
449 struct page *page;
450 unsigned long redzone_start;
451 unsigned long redzone_end;
452
453 if (gfpflags_allow_blocking(flags))
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800454 kasan_quarantine_reduce();
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800455
456 if (unlikely(ptr == NULL))
457 return NULL;
458
459 page = virt_to_page(ptr);
460 redzone_start = round_up((unsigned long)(ptr + size),
Andrey Konovalov1f600622020-12-22 12:00:24 -0800461 KASAN_GRANULE_SIZE);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -0700462 redzone_end = (unsigned long)ptr + page_size(page);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800463
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800464 kasan_unpoison(ptr, size);
465 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
Andrey Konovalovcebd0eb2020-12-22 12:00:21 -0800466 KASAN_PAGE_REDZONE);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800467
468 return (void *)ptr;
469}
470
Andrey Konovalov34303242020-12-22 12:03:10 -0800471void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800472{
473 struct page *page;
474
475 if (unlikely(object == ZERO_SIZE_PTR))
476 return (void *)object;
477
478 page = virt_to_head_page(object);
479
480 if (unlikely(!PageSlab(page)))
Andrey Konovalov34303242020-12-22 12:03:10 -0800481 return __kasan_kmalloc_large(object, size, flags);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800482 else
Andrey Konovalov34303242020-12-22 12:03:10 -0800483 return ____kasan_kmalloc(page->slab_cache, object, size,
Andrey Konovalova3fe7cd2019-01-08 15:23:18 -0800484 flags, true);
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800485}
486
Andrey Konovalov34303242020-12-22 12:03:10 -0800487void __kasan_kfree_large(void *ptr, unsigned long ip)
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800488{
Andrey Konovalov2813b9c2018-12-28 00:30:57 -0800489 if (ptr != page_address(virt_to_head_page(ptr)))
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800490 kasan_report_invalid_free(ptr, ip);
Andrey Konovalov3933c172020-12-22 12:03:24 -0800491 /* The object will be poisoned by kasan_free_pages(). */
Andrey Konovalovbffa9862018-12-28 00:29:45 -0800492}
Andrey Konovalov611806b2021-02-24 12:05:50 -0800493
494bool __kasan_check_byte(const void *address, unsigned long ip)
495{
496 if (!kasan_byte_accessible(address)) {
497 kasan_report((unsigned long)address, 1, false, ip);
498 return false;
499 }
500 return true;
501}