Andrey Konovalov | e886bf9 | 2018-12-28 00:31:14 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 2 | /* |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 3 | * This file contains common KASAN code. |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 4 | * |
| 5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. |
| 6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
| 7 | * |
| 8 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
| 9 | * Andrey Konovalov <andreyknvl@gmail.com> |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/export.h> |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 13 | #include <linux/init.h> |
| 14 | #include <linux/kasan.h> |
| 15 | #include <linux/kernel.h> |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 16 | #include <linux/linkage.h> |
| 17 | #include <linux/memblock.h> |
| 18 | #include <linux/memory.h> |
| 19 | #include <linux/mm.h> |
| 20 | #include <linux/module.h> |
| 21 | #include <linux/printk.h> |
| 22 | #include <linux/sched.h> |
| 23 | #include <linux/sched/task_stack.h> |
| 24 | #include <linux/slab.h> |
| 25 | #include <linux/stacktrace.h> |
| 26 | #include <linux/string.h> |
| 27 | #include <linux/types.h> |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 28 | #include <linux/bug.h> |
| 29 | |
| 30 | #include "kasan.h" |
| 31 | #include "../slab.h" |
| 32 | |
Marco Elver | 7594b34 | 2021-11-05 13:35:43 -0700 | [diff] [blame] | 33 | depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 34 | { |
| 35 | unsigned long entries[KASAN_STACK_DEPTH]; |
Thomas Gleixner | 880e049 | 2019-04-25 11:45:02 +0200 | [diff] [blame] | 36 | unsigned int nr_entries; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 37 | |
Thomas Gleixner | 880e049 | 2019-04-25 11:45:02 +0200 | [diff] [blame] | 38 | nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); |
Marco Elver | 7594b34 | 2021-11-05 13:35:43 -0700 | [diff] [blame] | 39 | return __stack_depot_save(entries, nr_entries, flags, can_alloc); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 40 | } |
| 41 | |
Walter Wu | e4b7818 | 2020-08-06 23:24:39 -0700 | [diff] [blame] | 42 | void kasan_set_track(struct kasan_track *track, gfp_t flags) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 43 | { |
| 44 | track->pid = current->pid; |
Marco Elver | 7594b34 | 2021-11-05 13:35:43 -0700 | [diff] [blame] | 45 | track->stack = kasan_save_stack(flags, true); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 46 | } |
| 47 | |
Andrey Konovalov | d73b493 | 2020-12-22 12:00:56 -0800 | [diff] [blame] | 48 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 49 | void kasan_enable_current(void) |
| 50 | { |
| 51 | current->kasan_depth++; |
| 52 | } |
Oliver Glitta | 1f9f78b | 2021-06-28 19:34:33 -0700 | [diff] [blame] | 53 | EXPORT_SYMBOL(kasan_enable_current); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 54 | |
| 55 | void kasan_disable_current(void) |
| 56 | { |
| 57 | current->kasan_depth--; |
| 58 | } |
Oliver Glitta | 1f9f78b | 2021-06-28 19:34:33 -0700 | [diff] [blame] | 59 | EXPORT_SYMBOL(kasan_disable_current); |
| 60 | |
Andrey Konovalov | d73b493 | 2020-12-22 12:00:56 -0800 | [diff] [blame] | 61 | #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 62 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 63 | void __kasan_unpoison_range(const void *address, size_t size) |
Andrey Konovalov | cebd0eb | 2020-12-22 12:00:21 -0800 | [diff] [blame] | 64 | { |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 65 | kasan_unpoison(address, size, false); |
Andrey Konovalov | cebd0eb | 2020-12-22 12:00:21 -0800 | [diff] [blame] | 66 | } |
| 67 | |
Walter Wu | 02c5877 | 2021-04-16 15:46:00 -0700 | [diff] [blame] | 68 | #ifdef CONFIG_KASAN_STACK |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 69 | /* Unpoison the entire stack for a task. */ |
| 70 | void kasan_unpoison_task_stack(struct task_struct *task) |
| 71 | { |
Andrey Konovalov | 77f57c9 | 2020-12-22 12:02:49 -0800 | [diff] [blame] | 72 | void *base = task_stack_page(task); |
| 73 | |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 74 | kasan_unpoison(base, THREAD_SIZE, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 75 | } |
| 76 | |
| 77 | /* Unpoison the stack for the current task beyond a watermark sp value. */ |
| 78 | asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) |
| 79 | { |
| 80 | /* |
| 81 | * Calculate the task stack base address. Avoid using 'current' |
| 82 | * because this function is called by early resume code which hasn't |
| 83 | * yet set up the percpu register (%gs). |
| 84 | */ |
| 85 | void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); |
| 86 | |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 87 | kasan_unpoison(base, watermark - base, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 88 | } |
Andrey Konovalov | d56a9ef | 2020-12-22 12:02:42 -0800 | [diff] [blame] | 89 | #endif /* CONFIG_KASAN_STACK */ |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 90 | |
Andrey Konovalov | e86f8b09 | 2020-12-22 12:03:31 -0800 | [diff] [blame] | 91 | /* |
| 92 | * Only allow cache merging when stack collection is disabled and no metadata |
| 93 | * is present. |
| 94 | */ |
| 95 | slab_flags_t __kasan_never_merge(void) |
| 96 | { |
| 97 | if (kasan_stack_collection_enabled()) |
| 98 | return SLAB_KASAN; |
| 99 | return 0; |
| 100 | } |
| 101 | |
Peter Collingbourne | 7a3b835 | 2021-06-02 16:52:28 -0700 | [diff] [blame] | 102 | void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 103 | { |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 104 | u8 tag; |
| 105 | unsigned long i; |
| 106 | |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 107 | if (unlikely(PageHighMem(page))) |
| 108 | return; |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 109 | |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 110 | tag = kasan_random_tag(); |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 111 | for (i = 0; i < (1 << order); i++) |
| 112 | page_kasan_tag_set(page + i, tag); |
Andrey Konovalov | 1bb5eab | 2021-04-29 23:00:02 -0700 | [diff] [blame] | 113 | kasan_unpoison(page_address(page), PAGE_SIZE << order, init); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 114 | } |
| 115 | |
Peter Collingbourne | 7a3b835 | 2021-06-02 16:52:28 -0700 | [diff] [blame] | 116 | void __kasan_poison_pages(struct page *page, unsigned int order, bool init) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 117 | { |
| 118 | if (likely(!PageHighMem(page))) |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 119 | kasan_poison(page_address(page), PAGE_SIZE << order, |
Andrey Konovalov | 1bb5eab | 2021-04-29 23:00:02 -0700 | [diff] [blame] | 120 | KASAN_FREE_PAGE, init); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 121 | } |
| 122 | |
| 123 | /* |
| 124 | * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. |
| 125 | * For larger allocations larger redzones are used. |
| 126 | */ |
| 127 | static inline unsigned int optimal_redzone(unsigned int object_size) |
| 128 | { |
| 129 | return |
| 130 | object_size <= 64 - 16 ? 16 : |
| 131 | object_size <= 128 - 32 ? 32 : |
| 132 | object_size <= 512 - 64 ? 64 : |
| 133 | object_size <= 4096 - 128 ? 128 : |
| 134 | object_size <= (1 << 14) - 256 ? 256 : |
| 135 | object_size <= (1 << 15) - 512 ? 512 : |
| 136 | object_size <= (1 << 16) - 1024 ? 1024 : 2048; |
| 137 | } |
| 138 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 139 | void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, |
| 140 | slab_flags_t *flags) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 141 | { |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 142 | unsigned int ok_size; |
| 143 | unsigned int optimal_size; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 144 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 145 | /* |
| 146 | * SLAB_KASAN is used to mark caches as ones that are sanitized by |
| 147 | * KASAN. Currently this flag is used in two places: |
| 148 | * 1. In slab_ksize() when calculating the size of the accessible |
| 149 | * memory within the object. |
| 150 | * 2. In slab_common.c to prevent merging of sanitized caches. |
| 151 | */ |
| 152 | *flags |= SLAB_KASAN; |
| 153 | |
| 154 | if (!kasan_stack_collection_enabled()) |
Andrey Konovalov | 8028caa | 2020-12-22 12:03:06 -0800 | [diff] [blame] | 155 | return; |
Andrey Konovalov | 8028caa | 2020-12-22 12:03:06 -0800 | [diff] [blame] | 156 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 157 | ok_size = *size; |
| 158 | |
| 159 | /* Add alloc meta into redzone. */ |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 160 | cache->kasan_info.alloc_meta_offset = *size; |
| 161 | *size += sizeof(struct kasan_alloc_meta); |
| 162 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 163 | /* |
| 164 | * If alloc meta doesn't fit, don't add it. |
| 165 | * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal |
| 166 | * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for |
| 167 | * larger sizes. |
| 168 | */ |
| 169 | if (*size > KMALLOC_MAX_SIZE) { |
| 170 | cache->kasan_info.alloc_meta_offset = 0; |
| 171 | *size = ok_size; |
| 172 | /* Continue, since free meta might still fit. */ |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 173 | } |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 174 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 175 | /* Only the generic mode uses free meta or flexible redzones. */ |
| 176 | if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) { |
| 177 | cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 178 | return; |
| 179 | } |
| 180 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 181 | /* |
| 182 | * Add free meta into redzone when it's not possible to store |
| 183 | * it in the object. This is the case when: |
| 184 | * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can |
| 185 | * be touched after it was freed, or |
| 186 | * 2. Object has a constructor, which means it's expected to |
| 187 | * retain its content until the next allocation, or |
| 188 | * 3. Object is too small. |
| 189 | * Otherwise cache->kasan_info.free_meta_offset = 0 is implied. |
| 190 | */ |
| 191 | if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor || |
| 192 | cache->object_size < sizeof(struct kasan_free_meta)) { |
| 193 | ok_size = *size; |
| 194 | |
| 195 | cache->kasan_info.free_meta_offset = *size; |
| 196 | *size += sizeof(struct kasan_free_meta); |
| 197 | |
| 198 | /* If free meta doesn't fit, don't add it. */ |
| 199 | if (*size > KMALLOC_MAX_SIZE) { |
| 200 | cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; |
| 201 | *size = ok_size; |
| 202 | } |
| 203 | } |
| 204 | |
| 205 | /* Calculate size with optimal redzone. */ |
| 206 | optimal_size = cache->object_size + optimal_redzone(cache->object_size); |
| 207 | /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */ |
| 208 | if (optimal_size > KMALLOC_MAX_SIZE) |
| 209 | optimal_size = KMALLOC_MAX_SIZE; |
| 210 | /* Use optimal size if the size with added metas is not large enough. */ |
| 211 | if (*size < optimal_size) |
| 212 | *size = optimal_size; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 213 | } |
| 214 | |
Andrey Konovalov | 9285013 | 2021-02-25 17:19:55 -0800 | [diff] [blame] | 215 | void __kasan_cache_create_kmalloc(struct kmem_cache *cache) |
| 216 | { |
| 217 | cache->kasan_info.is_kmalloc = true; |
| 218 | } |
| 219 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 220 | size_t __kasan_metadata_size(struct kmem_cache *cache) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 221 | { |
Andrey Konovalov | 8028caa | 2020-12-22 12:03:06 -0800 | [diff] [blame] | 222 | if (!kasan_stack_collection_enabled()) |
| 223 | return 0; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 224 | return (cache->kasan_info.alloc_meta_offset ? |
| 225 | sizeof(struct kasan_alloc_meta) : 0) + |
| 226 | (cache->kasan_info.free_meta_offset ? |
| 227 | sizeof(struct kasan_free_meta) : 0); |
| 228 | } |
| 229 | |
Andrey Konovalov | 6476792 | 2020-12-22 12:02:34 -0800 | [diff] [blame] | 230 | struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, |
| 231 | const void *object) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 232 | { |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 233 | if (!cache->kasan_info.alloc_meta_offset) |
| 234 | return NULL; |
Andrey Konovalov | c0054c5 | 2020-12-22 12:02:52 -0800 | [diff] [blame] | 235 | return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 236 | } |
| 237 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 238 | #ifdef CONFIG_KASAN_GENERIC |
Andrey Konovalov | 6476792 | 2020-12-22 12:02:34 -0800 | [diff] [blame] | 239 | struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, |
| 240 | const void *object) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 241 | { |
| 242 | BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 243 | if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META) |
| 244 | return NULL; |
Andrey Konovalov | c0054c5 | 2020-12-22 12:02:52 -0800 | [diff] [blame] | 245 | return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 246 | } |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 247 | #endif |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 248 | |
Matthew Wilcox (Oracle) | 6e48a96 | 2021-10-04 14:46:46 +0100 | [diff] [blame] | 249 | void __kasan_poison_slab(struct slab *slab) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 250 | { |
Matthew Wilcox (Oracle) | 6e48a96 | 2021-10-04 14:46:46 +0100 | [diff] [blame] | 251 | struct page *page = slab_page(slab); |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 252 | unsigned long i; |
| 253 | |
Matthew Wilcox (Oracle) | d8c6546 | 2019-09-23 15:34:30 -0700 | [diff] [blame] | 254 | for (i = 0; i < compound_nr(page); i++) |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 255 | page_kasan_tag_reset(page + i); |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 256 | kasan_poison(page_address(page), page_size(page), |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 257 | KASAN_KMALLOC_REDZONE, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 258 | } |
| 259 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 260 | void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 261 | { |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 262 | kasan_unpoison(object, cache->object_size, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 263 | } |
| 264 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 265 | void __kasan_poison_object_data(struct kmem_cache *cache, void *object) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 266 | { |
Andrey Konovalov | cde8a7e | 2021-02-25 17:20:27 -0800 | [diff] [blame] | 267 | kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 268 | KASAN_KMALLOC_REDZONE, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 269 | } |
| 270 | |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 271 | /* |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 272 | * This function assigns a tag to an object considering the following: |
| 273 | * 1. A cache might have a constructor, which might save a pointer to a slab |
| 274 | * object somewhere (e.g. in the object itself). We preassign a tag for |
| 275 | * each object in caches with constructors during slab creation and reuse |
| 276 | * the same tag each time a particular object is allocated. |
| 277 | * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be |
| 278 | * accessed after being freed. We preassign tags for objects in these |
| 279 | * caches as well. |
| 280 | * 3. For SLAB allocator we can't preassign tags randomly since the freelist |
| 281 | * is stored as an array of indexes instead of a linked list. Assign tags |
| 282 | * based on objects indexes, so that objects that are next to each other |
| 283 | * get different tags. |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 284 | */ |
Andrey Konovalov | c80a036 | 2021-02-25 17:20:35 -0800 | [diff] [blame] | 285 | static inline u8 assign_tag(struct kmem_cache *cache, |
| 286 | const void *object, bool init) |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 287 | { |
Andrey Konovalov | 1ef3133 | 2020-12-22 12:03:20 -0800 | [diff] [blame] | 288 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
| 289 | return 0xff; |
| 290 | |
Andrey Konovalov | e1db95b | 2019-02-20 22:19:01 -0800 | [diff] [blame] | 291 | /* |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 292 | * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU |
| 293 | * set, assign a tag when the object is being allocated (init == false). |
| 294 | */ |
| 295 | if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 296 | return init ? KASAN_TAG_KERNEL : kasan_random_tag(); |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 297 | |
| 298 | /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */ |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 299 | #ifdef CONFIG_SLAB |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 300 | /* For SLAB assign tags based on the object index in the freelist. */ |
Vlastimil Babka | 40f3bf0 | 2021-11-02 15:42:04 +0100 | [diff] [blame] | 301 | return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object); |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 302 | #else |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 303 | /* |
| 304 | * For SLUB assign a random tag during slab creation, otherwise reuse |
| 305 | * the already assigned tag. |
| 306 | */ |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 307 | return init ? kasan_random_tag() : get_tag(object); |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 308 | #endif |
| 309 | } |
| 310 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 311 | void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, |
Andrey Konovalov | 66afc7f | 2018-12-28 00:31:01 -0800 | [diff] [blame] | 312 | const void *object) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 313 | { |
Andrey Konovalov | 6476792 | 2020-12-22 12:02:34 -0800 | [diff] [blame] | 314 | struct kasan_alloc_meta *alloc_meta; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 315 | |
Andrey Konovalov | 8028caa | 2020-12-22 12:03:06 -0800 | [diff] [blame] | 316 | if (kasan_stack_collection_enabled()) { |
Andrey Konovalov | 8028caa | 2020-12-22 12:03:06 -0800 | [diff] [blame] | 317 | alloc_meta = kasan_get_alloc_meta(cache, object); |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 318 | if (alloc_meta) |
| 319 | __memset(alloc_meta, 0, sizeof(*alloc_meta)); |
Andrey Konovalov | 8028caa | 2020-12-22 12:03:06 -0800 | [diff] [blame] | 320 | } |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 321 | |
Andrey Konovalov | 1ef3133 | 2020-12-22 12:03:20 -0800 | [diff] [blame] | 322 | /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */ |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 323 | object = set_tag(object, assign_tag(cache, object, true)); |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 324 | |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 325 | return (void *)object; |
| 326 | } |
| 327 | |
Andrey Konovalov | d57a964 | 2021-04-29 23:00:09 -0700 | [diff] [blame] | 328 | static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object, |
| 329 | unsigned long ip, bool quarantine, bool init) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 330 | { |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 331 | u8 tag; |
| 332 | void *tagged_object; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 333 | |
Daniel Axtens | af3751f3 | 2021-06-28 19:40:42 -0700 | [diff] [blame] | 334 | if (!kasan_arch_is_ready()) |
| 335 | return false; |
| 336 | |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 337 | tag = get_tag(object); |
| 338 | tagged_object = object; |
Andrey Konovalov | c0054c5 | 2020-12-22 12:02:52 -0800 | [diff] [blame] | 339 | object = kasan_reset_tag(object); |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 340 | |
Alexander Potapenko | 2b83052 | 2021-02-25 17:19:21 -0800 | [diff] [blame] | 341 | if (is_kfence_address(object)) |
| 342 | return false; |
| 343 | |
Vlastimil Babka | 40f3bf0 | 2021-11-02 15:42:04 +0100 | [diff] [blame] | 344 | if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 345 | object)) { |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 346 | kasan_report_invalid_free(tagged_object, ip); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 347 | return true; |
| 348 | } |
| 349 | |
| 350 | /* RCU slabs could be legally used after free within the RCU period */ |
| 351 | if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) |
| 352 | return false; |
| 353 | |
Andrey Konovalov | 611806b | 2021-02-24 12:05:50 -0800 | [diff] [blame] | 354 | if (!kasan_byte_accessible(tagged_object)) { |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 355 | kasan_report_invalid_free(tagged_object, ip); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 356 | return true; |
| 357 | } |
| 358 | |
Andrey Konovalov | cde8a7e | 2021-02-25 17:20:27 -0800 | [diff] [blame] | 359 | kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), |
Andrey Konovalov | d57a964 | 2021-04-29 23:00:09 -0700 | [diff] [blame] | 360 | KASAN_KMALLOC_FREE, init); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 361 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 362 | if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine)) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 363 | return false; |
| 364 | |
Andrey Konovalov | df54b38 | 2021-02-25 17:20:07 -0800 | [diff] [blame] | 365 | if (kasan_stack_collection_enabled()) |
| 366 | kasan_set_free_info(cache, object, tag); |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 367 | |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 368 | return kasan_quarantine_put(cache, object); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 369 | } |
| 370 | |
Andrey Konovalov | d57a964 | 2021-04-29 23:00:09 -0700 | [diff] [blame] | 371 | bool __kasan_slab_free(struct kmem_cache *cache, void *object, |
| 372 | unsigned long ip, bool init) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 373 | { |
Andrey Konovalov | d57a964 | 2021-04-29 23:00:09 -0700 | [diff] [blame] | 374 | return ____kasan_slab_free(cache, object, ip, true, init); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 375 | } |
| 376 | |
Andrey Konovalov | c80a036 | 2021-02-25 17:20:35 -0800 | [diff] [blame] | 377 | static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip) |
Andrey Konovalov | 200072c | 2021-02-25 17:20:11 -0800 | [diff] [blame] | 378 | { |
| 379 | if (ptr != page_address(virt_to_head_page(ptr))) { |
| 380 | kasan_report_invalid_free(ptr, ip); |
| 381 | return true; |
| 382 | } |
| 383 | |
| 384 | if (!kasan_byte_accessible(ptr)) { |
| 385 | kasan_report_invalid_free(ptr, ip); |
| 386 | return true; |
| 387 | } |
| 388 | |
| 389 | /* |
| 390 | * The object will be poisoned by kasan_free_pages() or |
| 391 | * kasan_slab_free_mempool(). |
| 392 | */ |
| 393 | |
| 394 | return false; |
| 395 | } |
| 396 | |
| 397 | void __kasan_kfree_large(void *ptr, unsigned long ip) |
| 398 | { |
| 399 | ____kasan_kfree_large(ptr, ip); |
| 400 | } |
| 401 | |
Andrey Konovalov | eeb3160 | 2020-12-22 12:03:13 -0800 | [diff] [blame] | 402 | void __kasan_slab_free_mempool(void *ptr, unsigned long ip) |
| 403 | { |
Matthew Wilcox (Oracle) | 6e48a96 | 2021-10-04 14:46:46 +0100 | [diff] [blame] | 404 | struct folio *folio; |
Andrey Konovalov | eeb3160 | 2020-12-22 12:03:13 -0800 | [diff] [blame] | 405 | |
Matthew Wilcox (Oracle) | 6e48a96 | 2021-10-04 14:46:46 +0100 | [diff] [blame] | 406 | folio = virt_to_folio(ptr); |
Andrey Konovalov | eeb3160 | 2020-12-22 12:03:13 -0800 | [diff] [blame] | 407 | |
| 408 | /* |
| 409 | * Even though this function is only called for kmem_cache_alloc and |
| 410 | * kmalloc backed mempool allocations, those allocations can still be |
| 411 | * !PageSlab() when the size provided to kmalloc is larger than |
| 412 | * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. |
| 413 | */ |
Matthew Wilcox (Oracle) | 6e48a96 | 2021-10-04 14:46:46 +0100 | [diff] [blame] | 414 | if (unlikely(!folio_test_slab(folio))) { |
Andrey Konovalov | 200072c | 2021-02-25 17:20:11 -0800 | [diff] [blame] | 415 | if (____kasan_kfree_large(ptr, ip)) |
Andrey Konovalov | eeb3160 | 2020-12-22 12:03:13 -0800 | [diff] [blame] | 416 | return; |
Matthew Wilcox (Oracle) | 6e48a96 | 2021-10-04 14:46:46 +0100 | [diff] [blame] | 417 | kasan_poison(ptr, folio_size(folio), KASAN_FREE_PAGE, false); |
Andrey Konovalov | eeb3160 | 2020-12-22 12:03:13 -0800 | [diff] [blame] | 418 | } else { |
Matthew Wilcox (Oracle) | 6e48a96 | 2021-10-04 14:46:46 +0100 | [diff] [blame] | 419 | struct slab *slab = folio_slab(folio); |
| 420 | |
| 421 | ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false); |
Andrey Konovalov | eeb3160 | 2020-12-22 12:03:13 -0800 | [diff] [blame] | 422 | } |
| 423 | } |
| 424 | |
Andrey Konovalov | 9285013 | 2021-02-25 17:19:55 -0800 | [diff] [blame] | 425 | static void set_alloc_info(struct kmem_cache *cache, void *object, |
| 426 | gfp_t flags, bool is_kmalloc) |
Andrey Konovalov | 8bb0009 | 2020-12-22 12:02:38 -0800 | [diff] [blame] | 427 | { |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 428 | struct kasan_alloc_meta *alloc_meta; |
| 429 | |
Andrey Konovalov | 9285013 | 2021-02-25 17:19:55 -0800 | [diff] [blame] | 430 | /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */ |
| 431 | if (cache->kasan_info.is_kmalloc && !is_kmalloc) |
| 432 | return; |
| 433 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 434 | alloc_meta = kasan_get_alloc_meta(cache, object); |
| 435 | if (alloc_meta) |
| 436 | kasan_set_track(&alloc_meta->alloc_track, flags); |
Andrey Konovalov | 8bb0009 | 2020-12-22 12:02:38 -0800 | [diff] [blame] | 437 | } |
| 438 | |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 439 | void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, |
Andrey Konovalov | da844b7 | 2021-04-29 23:00:06 -0700 | [diff] [blame] | 440 | void *object, gfp_t flags, bool init) |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 441 | { |
| 442 | u8 tag; |
| 443 | void *tagged_object; |
| 444 | |
| 445 | if (gfpflags_allow_blocking(flags)) |
| 446 | kasan_quarantine_reduce(); |
| 447 | |
| 448 | if (unlikely(object == NULL)) |
| 449 | return NULL; |
| 450 | |
| 451 | if (is_kfence_address(object)) |
| 452 | return (void *)object; |
| 453 | |
| 454 | /* |
| 455 | * Generate and assign random tag for tag-based modes. |
| 456 | * Tag is ignored in set_tag() for the generic mode. |
| 457 | */ |
| 458 | tag = assign_tag(cache, object, false); |
| 459 | tagged_object = set_tag(object, tag); |
| 460 | |
| 461 | /* |
| 462 | * Unpoison the whole object. |
| 463 | * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning. |
| 464 | */ |
Andrey Konovalov | da844b7 | 2021-04-29 23:00:06 -0700 | [diff] [blame] | 465 | kasan_unpoison(tagged_object, cache->object_size, init); |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 466 | |
| 467 | /* Save alloc info (if possible) for non-kmalloc() allocations. */ |
| 468 | if (kasan_stack_collection_enabled()) |
| 469 | set_alloc_info(cache, (void *)object, flags, false); |
| 470 | |
| 471 | return tagged_object; |
| 472 | } |
| 473 | |
Andrey Konovalov | c80a036 | 2021-02-25 17:20:35 -0800 | [diff] [blame] | 474 | static inline void *____kasan_kmalloc(struct kmem_cache *cache, |
| 475 | const void *object, size_t size, gfp_t flags) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 476 | { |
| 477 | unsigned long redzone_start; |
| 478 | unsigned long redzone_end; |
| 479 | |
| 480 | if (gfpflags_allow_blocking(flags)) |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 481 | kasan_quarantine_reduce(); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 482 | |
| 483 | if (unlikely(object == NULL)) |
| 484 | return NULL; |
| 485 | |
Alexander Potapenko | 2b83052 | 2021-02-25 17:19:21 -0800 | [diff] [blame] | 486 | if (is_kfence_address(kasan_reset_tag(object))) |
| 487 | return (void *)object; |
| 488 | |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 489 | /* |
| 490 | * The object has already been unpoisoned by kasan_slab_alloc() for |
Andrey Konovalov | d12d9ad | 2021-02-25 17:20:23 -0800 | [diff] [blame] | 491 | * kmalloc() or by kasan_krealloc() for krealloc(). |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 492 | */ |
| 493 | |
| 494 | /* |
| 495 | * The redzone has byte-level precision for the generic mode. |
| 496 | * Partially poison the last object granule to cover the unaligned |
| 497 | * part of the redzone. |
| 498 | */ |
| 499 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
| 500 | kasan_poison_last_granule((void *)object, size); |
| 501 | |
| 502 | /* Poison the aligned part of the redzone. */ |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 503 | redzone_start = round_up((unsigned long)(object + size), |
Andrey Konovalov | 1f60062 | 2020-12-22 12:00:24 -0800 | [diff] [blame] | 504 | KASAN_GRANULE_SIZE); |
Andrey Konovalov | cde8a7e | 2021-02-25 17:20:27 -0800 | [diff] [blame] | 505 | redzone_end = round_up((unsigned long)(object + cache->object_size), |
| 506 | KASAN_GRANULE_SIZE); |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 507 | kasan_poison((void *)redzone_start, redzone_end - redzone_start, |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 508 | KASAN_KMALLOC_REDZONE, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 509 | |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 510 | /* |
| 511 | * Save alloc info (if possible) for kmalloc() allocations. |
| 512 | * This also rewrites the alloc info when called from kasan_krealloc(). |
| 513 | */ |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 514 | if (kasan_stack_collection_enabled()) |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 515 | set_alloc_info(cache, (void *)object, flags, true); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 516 | |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 517 | /* Keep the tag that was set by kasan_slab_alloc(). */ |
| 518 | return (void *)object; |
Andrey Konovalov | e1db95b | 2019-02-20 22:19:01 -0800 | [diff] [blame] | 519 | } |
| 520 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 521 | void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object, |
| 522 | size_t size, gfp_t flags) |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 523 | { |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 524 | return ____kasan_kmalloc(cache, object, size, flags); |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 525 | } |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 526 | EXPORT_SYMBOL(__kasan_kmalloc); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 527 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 528 | void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, |
Andrey Konovalov | 66afc7f | 2018-12-28 00:31:01 -0800 | [diff] [blame] | 529 | gfp_t flags) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 530 | { |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 531 | unsigned long redzone_start; |
| 532 | unsigned long redzone_end; |
| 533 | |
| 534 | if (gfpflags_allow_blocking(flags)) |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 535 | kasan_quarantine_reduce(); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 536 | |
| 537 | if (unlikely(ptr == NULL)) |
| 538 | return NULL; |
| 539 | |
Andrey Konovalov | 43a219c | 2021-02-25 17:20:03 -0800 | [diff] [blame] | 540 | /* |
| 541 | * The object has already been unpoisoned by kasan_alloc_pages() for |
Andrey Konovalov | d12d9ad | 2021-02-25 17:20:23 -0800 | [diff] [blame] | 542 | * alloc_pages() or by kasan_krealloc() for krealloc(). |
Andrey Konovalov | 43a219c | 2021-02-25 17:20:03 -0800 | [diff] [blame] | 543 | */ |
| 544 | |
| 545 | /* |
| 546 | * The redzone has byte-level precision for the generic mode. |
| 547 | * Partially poison the last object granule to cover the unaligned |
| 548 | * part of the redzone. |
| 549 | */ |
| 550 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
| 551 | kasan_poison_last_granule(ptr, size); |
| 552 | |
| 553 | /* Poison the aligned part of the redzone. */ |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 554 | redzone_start = round_up((unsigned long)(ptr + size), |
Andrey Konovalov | 1f60062 | 2020-12-22 12:00:24 -0800 | [diff] [blame] | 555 | KASAN_GRANULE_SIZE); |
Andrey Konovalov | 43a219c | 2021-02-25 17:20:03 -0800 | [diff] [blame] | 556 | redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr)); |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 557 | kasan_poison((void *)redzone_start, redzone_end - redzone_start, |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 558 | KASAN_PAGE_REDZONE, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 559 | |
| 560 | return (void *)ptr; |
| 561 | } |
| 562 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 563 | void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 564 | { |
Matthew Wilcox (Oracle) | 6e48a96 | 2021-10-04 14:46:46 +0100 | [diff] [blame] | 565 | struct slab *slab; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 566 | |
| 567 | if (unlikely(object == ZERO_SIZE_PTR)) |
| 568 | return (void *)object; |
| 569 | |
Andrey Konovalov | d12d9ad | 2021-02-25 17:20:23 -0800 | [diff] [blame] | 570 | /* |
| 571 | * Unpoison the object's data. |
| 572 | * Part of it might already have been unpoisoned, but it's unknown |
| 573 | * how big that part is. |
| 574 | */ |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame] | 575 | kasan_unpoison(object, size, false); |
Andrey Konovalov | d12d9ad | 2021-02-25 17:20:23 -0800 | [diff] [blame] | 576 | |
Matthew Wilcox (Oracle) | 6e48a96 | 2021-10-04 14:46:46 +0100 | [diff] [blame] | 577 | slab = virt_to_slab(object); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 578 | |
Andrey Konovalov | d12d9ad | 2021-02-25 17:20:23 -0800 | [diff] [blame] | 579 | /* Piggy-back on kmalloc() instrumentation to poison the redzone. */ |
Matthew Wilcox (Oracle) | 6e48a96 | 2021-10-04 14:46:46 +0100 | [diff] [blame] | 580 | if (unlikely(!slab)) |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 581 | return __kasan_kmalloc_large(object, size, flags); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 582 | else |
Matthew Wilcox (Oracle) | 6e48a96 | 2021-10-04 14:46:46 +0100 | [diff] [blame] | 583 | return ____kasan_kmalloc(slab->slab_cache, object, size, flags); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 584 | } |
| 585 | |
Andrey Konovalov | 611806b | 2021-02-24 12:05:50 -0800 | [diff] [blame] | 586 | bool __kasan_check_byte(const void *address, unsigned long ip) |
| 587 | { |
| 588 | if (!kasan_byte_accessible(address)) { |
| 589 | kasan_report((unsigned long)address, 1, false, ip); |
| 590 | return false; |
| 591 | } |
| 592 | return true; |
| 593 | } |