Andrey Konovalov | e886bf9 | 2018-12-28 00:31:14 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 2 | /* |
Andrey Konovalov | bb359db | 2020-12-22 12:00:32 -0800 | [diff] [blame] | 3 | * This file contains common KASAN code. |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 4 | * |
| 5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. |
| 6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
| 7 | * |
| 8 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
| 9 | * Andrey Konovalov <andreyknvl@gmail.com> |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/export.h> |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 13 | #include <linux/init.h> |
| 14 | #include <linux/kasan.h> |
| 15 | #include <linux/kernel.h> |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 16 | #include <linux/linkage.h> |
| 17 | #include <linux/memblock.h> |
| 18 | #include <linux/memory.h> |
| 19 | #include <linux/mm.h> |
| 20 | #include <linux/module.h> |
| 21 | #include <linux/printk.h> |
| 22 | #include <linux/sched.h> |
| 23 | #include <linux/sched/task_stack.h> |
| 24 | #include <linux/slab.h> |
| 25 | #include <linux/stacktrace.h> |
| 26 | #include <linux/string.h> |
| 27 | #include <linux/types.h> |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 28 | #include <linux/bug.h> |
| 29 | |
| 30 | #include "kasan.h" |
| 31 | #include "../slab.h" |
| 32 | |
Walter Wu | 26e760c | 2020-08-06 23:24:35 -0700 | [diff] [blame] | 33 | depot_stack_handle_t kasan_save_stack(gfp_t flags) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 34 | { |
| 35 | unsigned long entries[KASAN_STACK_DEPTH]; |
Thomas Gleixner | 880e049 | 2019-04-25 11:45:02 +0200 | [diff] [blame] | 36 | unsigned int nr_entries; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 37 | |
Thomas Gleixner | 880e049 | 2019-04-25 11:45:02 +0200 | [diff] [blame] | 38 | nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); |
| 39 | nr_entries = filter_irq_stacks(entries, nr_entries); |
| 40 | return stack_depot_save(entries, nr_entries, flags); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 41 | } |
| 42 | |
Walter Wu | e4b7818 | 2020-08-06 23:24:39 -0700 | [diff] [blame] | 43 | void kasan_set_track(struct kasan_track *track, gfp_t flags) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 44 | { |
| 45 | track->pid = current->pid; |
Walter Wu | 26e760c | 2020-08-06 23:24:35 -0700 | [diff] [blame] | 46 | track->stack = kasan_save_stack(flags); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 47 | } |
| 48 | |
Andrey Konovalov | d73b493 | 2020-12-22 12:00:56 -0800 | [diff] [blame] | 49 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 50 | void kasan_enable_current(void) |
| 51 | { |
| 52 | current->kasan_depth++; |
| 53 | } |
| 54 | |
| 55 | void kasan_disable_current(void) |
| 56 | { |
| 57 | current->kasan_depth--; |
| 58 | } |
Andrey Konovalov | d73b493 | 2020-12-22 12:00:56 -0800 | [diff] [blame] | 59 | #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 60 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 61 | void __kasan_unpoison_range(const void *address, size_t size) |
Andrey Konovalov | cebd0eb | 2020-12-22 12:00:21 -0800 | [diff] [blame] | 62 | { |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame^] | 63 | kasan_unpoison(address, size, false); |
Andrey Konovalov | cebd0eb | 2020-12-22 12:00:21 -0800 | [diff] [blame] | 64 | } |
| 65 | |
Walter Wu | 02c5877 | 2021-04-16 15:46:00 -0700 | [diff] [blame] | 66 | #ifdef CONFIG_KASAN_STACK |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 67 | /* Unpoison the entire stack for a task. */ |
| 68 | void kasan_unpoison_task_stack(struct task_struct *task) |
| 69 | { |
Andrey Konovalov | 77f57c9 | 2020-12-22 12:02:49 -0800 | [diff] [blame] | 70 | void *base = task_stack_page(task); |
| 71 | |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame^] | 72 | kasan_unpoison(base, THREAD_SIZE, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 73 | } |
| 74 | |
| 75 | /* Unpoison the stack for the current task beyond a watermark sp value. */ |
| 76 | asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) |
| 77 | { |
| 78 | /* |
| 79 | * Calculate the task stack base address. Avoid using 'current' |
| 80 | * because this function is called by early resume code which hasn't |
| 81 | * yet set up the percpu register (%gs). |
| 82 | */ |
| 83 | void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); |
| 84 | |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame^] | 85 | kasan_unpoison(base, watermark - base, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 86 | } |
Andrey Konovalov | d56a9ef | 2020-12-22 12:02:42 -0800 | [diff] [blame] | 87 | #endif /* CONFIG_KASAN_STACK */ |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 88 | |
Andrey Konovalov | e86f8b09 | 2020-12-22 12:03:31 -0800 | [diff] [blame] | 89 | /* |
| 90 | * Only allow cache merging when stack collection is disabled and no metadata |
| 91 | * is present. |
| 92 | */ |
| 93 | slab_flags_t __kasan_never_merge(void) |
| 94 | { |
| 95 | if (kasan_stack_collection_enabled()) |
| 96 | return SLAB_KASAN; |
| 97 | return 0; |
| 98 | } |
| 99 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 100 | void __kasan_alloc_pages(struct page *page, unsigned int order) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 101 | { |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 102 | u8 tag; |
| 103 | unsigned long i; |
| 104 | |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 105 | if (unlikely(PageHighMem(page))) |
| 106 | return; |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 107 | |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 108 | tag = kasan_random_tag(); |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 109 | for (i = 0; i < (1 << order); i++) |
| 110 | page_kasan_tag_set(page + i, tag); |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame^] | 111 | kasan_unpoison(page_address(page), PAGE_SIZE << order, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 112 | } |
| 113 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 114 | void __kasan_free_pages(struct page *page, unsigned int order) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 115 | { |
| 116 | if (likely(!PageHighMem(page))) |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 117 | kasan_poison(page_address(page), PAGE_SIZE << order, |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame^] | 118 | KASAN_FREE_PAGE, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 119 | } |
| 120 | |
| 121 | /* |
| 122 | * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. |
| 123 | * For larger allocations larger redzones are used. |
| 124 | */ |
| 125 | static inline unsigned int optimal_redzone(unsigned int object_size) |
| 126 | { |
| 127 | return |
| 128 | object_size <= 64 - 16 ? 16 : |
| 129 | object_size <= 128 - 32 ? 32 : |
| 130 | object_size <= 512 - 64 ? 64 : |
| 131 | object_size <= 4096 - 128 ? 128 : |
| 132 | object_size <= (1 << 14) - 256 ? 256 : |
| 133 | object_size <= (1 << 15) - 512 ? 512 : |
| 134 | object_size <= (1 << 16) - 1024 ? 1024 : 2048; |
| 135 | } |
| 136 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 137 | void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, |
| 138 | slab_flags_t *flags) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 139 | { |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 140 | unsigned int ok_size; |
| 141 | unsigned int optimal_size; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 142 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 143 | /* |
| 144 | * SLAB_KASAN is used to mark caches as ones that are sanitized by |
| 145 | * KASAN. Currently this flag is used in two places: |
| 146 | * 1. In slab_ksize() when calculating the size of the accessible |
| 147 | * memory within the object. |
| 148 | * 2. In slab_common.c to prevent merging of sanitized caches. |
| 149 | */ |
| 150 | *flags |= SLAB_KASAN; |
| 151 | |
| 152 | if (!kasan_stack_collection_enabled()) |
Andrey Konovalov | 8028caa | 2020-12-22 12:03:06 -0800 | [diff] [blame] | 153 | return; |
Andrey Konovalov | 8028caa | 2020-12-22 12:03:06 -0800 | [diff] [blame] | 154 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 155 | ok_size = *size; |
| 156 | |
| 157 | /* Add alloc meta into redzone. */ |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 158 | cache->kasan_info.alloc_meta_offset = *size; |
| 159 | *size += sizeof(struct kasan_alloc_meta); |
| 160 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 161 | /* |
| 162 | * If alloc meta doesn't fit, don't add it. |
| 163 | * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal |
| 164 | * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for |
| 165 | * larger sizes. |
| 166 | */ |
| 167 | if (*size > KMALLOC_MAX_SIZE) { |
| 168 | cache->kasan_info.alloc_meta_offset = 0; |
| 169 | *size = ok_size; |
| 170 | /* Continue, since free meta might still fit. */ |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 171 | } |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 172 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 173 | /* Only the generic mode uses free meta or flexible redzones. */ |
| 174 | if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) { |
| 175 | cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 176 | return; |
| 177 | } |
| 178 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 179 | /* |
| 180 | * Add free meta into redzone when it's not possible to store |
| 181 | * it in the object. This is the case when: |
| 182 | * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can |
| 183 | * be touched after it was freed, or |
| 184 | * 2. Object has a constructor, which means it's expected to |
| 185 | * retain its content until the next allocation, or |
| 186 | * 3. Object is too small. |
| 187 | * Otherwise cache->kasan_info.free_meta_offset = 0 is implied. |
| 188 | */ |
| 189 | if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor || |
| 190 | cache->object_size < sizeof(struct kasan_free_meta)) { |
| 191 | ok_size = *size; |
| 192 | |
| 193 | cache->kasan_info.free_meta_offset = *size; |
| 194 | *size += sizeof(struct kasan_free_meta); |
| 195 | |
| 196 | /* If free meta doesn't fit, don't add it. */ |
| 197 | if (*size > KMALLOC_MAX_SIZE) { |
| 198 | cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; |
| 199 | *size = ok_size; |
| 200 | } |
| 201 | } |
| 202 | |
| 203 | /* Calculate size with optimal redzone. */ |
| 204 | optimal_size = cache->object_size + optimal_redzone(cache->object_size); |
| 205 | /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */ |
| 206 | if (optimal_size > KMALLOC_MAX_SIZE) |
| 207 | optimal_size = KMALLOC_MAX_SIZE; |
| 208 | /* Use optimal size if the size with added metas is not large enough. */ |
| 209 | if (*size < optimal_size) |
| 210 | *size = optimal_size; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 211 | } |
| 212 | |
Andrey Konovalov | 9285013 | 2021-02-25 17:19:55 -0800 | [diff] [blame] | 213 | void __kasan_cache_create_kmalloc(struct kmem_cache *cache) |
| 214 | { |
| 215 | cache->kasan_info.is_kmalloc = true; |
| 216 | } |
| 217 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 218 | size_t __kasan_metadata_size(struct kmem_cache *cache) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 219 | { |
Andrey Konovalov | 8028caa | 2020-12-22 12:03:06 -0800 | [diff] [blame] | 220 | if (!kasan_stack_collection_enabled()) |
| 221 | return 0; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 222 | return (cache->kasan_info.alloc_meta_offset ? |
| 223 | sizeof(struct kasan_alloc_meta) : 0) + |
| 224 | (cache->kasan_info.free_meta_offset ? |
| 225 | sizeof(struct kasan_free_meta) : 0); |
| 226 | } |
| 227 | |
Andrey Konovalov | 6476792 | 2020-12-22 12:02:34 -0800 | [diff] [blame] | 228 | struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, |
| 229 | const void *object) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 230 | { |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 231 | if (!cache->kasan_info.alloc_meta_offset) |
| 232 | return NULL; |
Andrey Konovalov | c0054c5 | 2020-12-22 12:02:52 -0800 | [diff] [blame] | 233 | return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 234 | } |
| 235 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 236 | #ifdef CONFIG_KASAN_GENERIC |
Andrey Konovalov | 6476792 | 2020-12-22 12:02:34 -0800 | [diff] [blame] | 237 | struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, |
| 238 | const void *object) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 239 | { |
| 240 | BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 241 | if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META) |
| 242 | return NULL; |
Andrey Konovalov | c0054c5 | 2020-12-22 12:02:52 -0800 | [diff] [blame] | 243 | return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 244 | } |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 245 | #endif |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 246 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 247 | void __kasan_poison_slab(struct page *page) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 248 | { |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 249 | unsigned long i; |
| 250 | |
Matthew Wilcox (Oracle) | d8c6546 | 2019-09-23 15:34:30 -0700 | [diff] [blame] | 251 | for (i = 0; i < compound_nr(page); i++) |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 252 | page_kasan_tag_reset(page + i); |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 253 | kasan_poison(page_address(page), page_size(page), |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame^] | 254 | KASAN_KMALLOC_REDZONE, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 255 | } |
| 256 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 257 | void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 258 | { |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame^] | 259 | kasan_unpoison(object, cache->object_size, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 260 | } |
| 261 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 262 | void __kasan_poison_object_data(struct kmem_cache *cache, void *object) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 263 | { |
Andrey Konovalov | cde8a7e | 2021-02-25 17:20:27 -0800 | [diff] [blame] | 264 | kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame^] | 265 | KASAN_KMALLOC_REDZONE, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 266 | } |
| 267 | |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 268 | /* |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 269 | * This function assigns a tag to an object considering the following: |
| 270 | * 1. A cache might have a constructor, which might save a pointer to a slab |
| 271 | * object somewhere (e.g. in the object itself). We preassign a tag for |
| 272 | * each object in caches with constructors during slab creation and reuse |
| 273 | * the same tag each time a particular object is allocated. |
| 274 | * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be |
| 275 | * accessed after being freed. We preassign tags for objects in these |
| 276 | * caches as well. |
| 277 | * 3. For SLAB allocator we can't preassign tags randomly since the freelist |
| 278 | * is stored as an array of indexes instead of a linked list. Assign tags |
| 279 | * based on objects indexes, so that objects that are next to each other |
| 280 | * get different tags. |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 281 | */ |
Andrey Konovalov | c80a036 | 2021-02-25 17:20:35 -0800 | [diff] [blame] | 282 | static inline u8 assign_tag(struct kmem_cache *cache, |
| 283 | const void *object, bool init) |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 284 | { |
Andrey Konovalov | 1ef3133 | 2020-12-22 12:03:20 -0800 | [diff] [blame] | 285 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
| 286 | return 0xff; |
| 287 | |
Andrey Konovalov | e1db95b | 2019-02-20 22:19:01 -0800 | [diff] [blame] | 288 | /* |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 289 | * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU |
| 290 | * set, assign a tag when the object is being allocated (init == false). |
| 291 | */ |
| 292 | if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 293 | return init ? KASAN_TAG_KERNEL : kasan_random_tag(); |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 294 | |
| 295 | /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */ |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 296 | #ifdef CONFIG_SLAB |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 297 | /* For SLAB assign tags based on the object index in the freelist. */ |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 298 | return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); |
| 299 | #else |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 300 | /* |
| 301 | * For SLUB assign a random tag during slab creation, otherwise reuse |
| 302 | * the already assigned tag. |
| 303 | */ |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 304 | return init ? kasan_random_tag() : get_tag(object); |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 305 | #endif |
| 306 | } |
| 307 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 308 | void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, |
Andrey Konovalov | 66afc7f | 2018-12-28 00:31:01 -0800 | [diff] [blame] | 309 | const void *object) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 310 | { |
Andrey Konovalov | 6476792 | 2020-12-22 12:02:34 -0800 | [diff] [blame] | 311 | struct kasan_alloc_meta *alloc_meta; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 312 | |
Andrey Konovalov | 8028caa | 2020-12-22 12:03:06 -0800 | [diff] [blame] | 313 | if (kasan_stack_collection_enabled()) { |
Andrey Konovalov | 8028caa | 2020-12-22 12:03:06 -0800 | [diff] [blame] | 314 | alloc_meta = kasan_get_alloc_meta(cache, object); |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 315 | if (alloc_meta) |
| 316 | __memset(alloc_meta, 0, sizeof(*alloc_meta)); |
Andrey Konovalov | 8028caa | 2020-12-22 12:03:06 -0800 | [diff] [blame] | 317 | } |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 318 | |
Andrey Konovalov | 1ef3133 | 2020-12-22 12:03:20 -0800 | [diff] [blame] | 319 | /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */ |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 320 | object = set_tag(object, assign_tag(cache, object, true)); |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 321 | |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 322 | return (void *)object; |
| 323 | } |
| 324 | |
Andrey Konovalov | c80a036 | 2021-02-25 17:20:35 -0800 | [diff] [blame] | 325 | static inline bool ____kasan_slab_free(struct kmem_cache *cache, |
| 326 | void *object, unsigned long ip, bool quarantine) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 327 | { |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 328 | u8 tag; |
| 329 | void *tagged_object; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 330 | |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 331 | tag = get_tag(object); |
| 332 | tagged_object = object; |
Andrey Konovalov | c0054c5 | 2020-12-22 12:02:52 -0800 | [diff] [blame] | 333 | object = kasan_reset_tag(object); |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 334 | |
Alexander Potapenko | 2b83052 | 2021-02-25 17:19:21 -0800 | [diff] [blame] | 335 | if (is_kfence_address(object)) |
| 336 | return false; |
| 337 | |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 338 | if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != |
| 339 | object)) { |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 340 | kasan_report_invalid_free(tagged_object, ip); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 341 | return true; |
| 342 | } |
| 343 | |
| 344 | /* RCU slabs could be legally used after free within the RCU period */ |
| 345 | if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) |
| 346 | return false; |
| 347 | |
Andrey Konovalov | 611806b | 2021-02-24 12:05:50 -0800 | [diff] [blame] | 348 | if (!kasan_byte_accessible(tagged_object)) { |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 349 | kasan_report_invalid_free(tagged_object, ip); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 350 | return true; |
| 351 | } |
| 352 | |
Andrey Konovalov | cde8a7e | 2021-02-25 17:20:27 -0800 | [diff] [blame] | 353 | kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame^] | 354 | KASAN_KMALLOC_FREE, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 355 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 356 | if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine)) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 357 | return false; |
| 358 | |
Andrey Konovalov | df54b38 | 2021-02-25 17:20:07 -0800 | [diff] [blame] | 359 | if (kasan_stack_collection_enabled()) |
| 360 | kasan_set_free_info(cache, object, tag); |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 361 | |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 362 | return kasan_quarantine_put(cache, object); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 363 | } |
| 364 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 365 | bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 366 | { |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 367 | return ____kasan_slab_free(cache, object, ip, true); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 368 | } |
| 369 | |
Andrey Konovalov | c80a036 | 2021-02-25 17:20:35 -0800 | [diff] [blame] | 370 | static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip) |
Andrey Konovalov | 200072c | 2021-02-25 17:20:11 -0800 | [diff] [blame] | 371 | { |
| 372 | if (ptr != page_address(virt_to_head_page(ptr))) { |
| 373 | kasan_report_invalid_free(ptr, ip); |
| 374 | return true; |
| 375 | } |
| 376 | |
| 377 | if (!kasan_byte_accessible(ptr)) { |
| 378 | kasan_report_invalid_free(ptr, ip); |
| 379 | return true; |
| 380 | } |
| 381 | |
| 382 | /* |
| 383 | * The object will be poisoned by kasan_free_pages() or |
| 384 | * kasan_slab_free_mempool(). |
| 385 | */ |
| 386 | |
| 387 | return false; |
| 388 | } |
| 389 | |
| 390 | void __kasan_kfree_large(void *ptr, unsigned long ip) |
| 391 | { |
| 392 | ____kasan_kfree_large(ptr, ip); |
| 393 | } |
| 394 | |
Andrey Konovalov | eeb3160 | 2020-12-22 12:03:13 -0800 | [diff] [blame] | 395 | void __kasan_slab_free_mempool(void *ptr, unsigned long ip) |
| 396 | { |
| 397 | struct page *page; |
| 398 | |
| 399 | page = virt_to_head_page(ptr); |
| 400 | |
| 401 | /* |
| 402 | * Even though this function is only called for kmem_cache_alloc and |
| 403 | * kmalloc backed mempool allocations, those allocations can still be |
| 404 | * !PageSlab() when the size provided to kmalloc is larger than |
| 405 | * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. |
| 406 | */ |
| 407 | if (unlikely(!PageSlab(page))) { |
Andrey Konovalov | 200072c | 2021-02-25 17:20:11 -0800 | [diff] [blame] | 408 | if (____kasan_kfree_large(ptr, ip)) |
Andrey Konovalov | eeb3160 | 2020-12-22 12:03:13 -0800 | [diff] [blame] | 409 | return; |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame^] | 410 | kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false); |
Andrey Konovalov | eeb3160 | 2020-12-22 12:03:13 -0800 | [diff] [blame] | 411 | } else { |
| 412 | ____kasan_slab_free(page->slab_cache, ptr, ip, false); |
| 413 | } |
| 414 | } |
| 415 | |
Andrey Konovalov | 9285013 | 2021-02-25 17:19:55 -0800 | [diff] [blame] | 416 | static void set_alloc_info(struct kmem_cache *cache, void *object, |
| 417 | gfp_t flags, bool is_kmalloc) |
Andrey Konovalov | 8bb0009 | 2020-12-22 12:02:38 -0800 | [diff] [blame] | 418 | { |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 419 | struct kasan_alloc_meta *alloc_meta; |
| 420 | |
Andrey Konovalov | 9285013 | 2021-02-25 17:19:55 -0800 | [diff] [blame] | 421 | /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */ |
| 422 | if (cache->kasan_info.is_kmalloc && !is_kmalloc) |
| 423 | return; |
| 424 | |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 425 | alloc_meta = kasan_get_alloc_meta(cache, object); |
| 426 | if (alloc_meta) |
| 427 | kasan_set_track(&alloc_meta->alloc_track, flags); |
Andrey Konovalov | 8bb0009 | 2020-12-22 12:02:38 -0800 | [diff] [blame] | 428 | } |
| 429 | |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 430 | void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, |
| 431 | void *object, gfp_t flags) |
| 432 | { |
| 433 | u8 tag; |
| 434 | void *tagged_object; |
| 435 | |
| 436 | if (gfpflags_allow_blocking(flags)) |
| 437 | kasan_quarantine_reduce(); |
| 438 | |
| 439 | if (unlikely(object == NULL)) |
| 440 | return NULL; |
| 441 | |
| 442 | if (is_kfence_address(object)) |
| 443 | return (void *)object; |
| 444 | |
| 445 | /* |
| 446 | * Generate and assign random tag for tag-based modes. |
| 447 | * Tag is ignored in set_tag() for the generic mode. |
| 448 | */ |
| 449 | tag = assign_tag(cache, object, false); |
| 450 | tagged_object = set_tag(object, tag); |
| 451 | |
| 452 | /* |
| 453 | * Unpoison the whole object. |
| 454 | * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning. |
| 455 | */ |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame^] | 456 | kasan_unpoison(tagged_object, cache->object_size, false); |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 457 | |
| 458 | /* Save alloc info (if possible) for non-kmalloc() allocations. */ |
| 459 | if (kasan_stack_collection_enabled()) |
| 460 | set_alloc_info(cache, (void *)object, flags, false); |
| 461 | |
| 462 | return tagged_object; |
| 463 | } |
| 464 | |
Andrey Konovalov | c80a036 | 2021-02-25 17:20:35 -0800 | [diff] [blame] | 465 | static inline void *____kasan_kmalloc(struct kmem_cache *cache, |
| 466 | const void *object, size_t size, gfp_t flags) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 467 | { |
| 468 | unsigned long redzone_start; |
| 469 | unsigned long redzone_end; |
| 470 | |
| 471 | if (gfpflags_allow_blocking(flags)) |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 472 | kasan_quarantine_reduce(); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 473 | |
| 474 | if (unlikely(object == NULL)) |
| 475 | return NULL; |
| 476 | |
Alexander Potapenko | 2b83052 | 2021-02-25 17:19:21 -0800 | [diff] [blame] | 477 | if (is_kfence_address(kasan_reset_tag(object))) |
| 478 | return (void *)object; |
| 479 | |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 480 | /* |
| 481 | * The object has already been unpoisoned by kasan_slab_alloc() for |
Andrey Konovalov | d12d9ad | 2021-02-25 17:20:23 -0800 | [diff] [blame] | 482 | * kmalloc() or by kasan_krealloc() for krealloc(). |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 483 | */ |
| 484 | |
| 485 | /* |
| 486 | * The redzone has byte-level precision for the generic mode. |
| 487 | * Partially poison the last object granule to cover the unaligned |
| 488 | * part of the redzone. |
| 489 | */ |
| 490 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
| 491 | kasan_poison_last_granule((void *)object, size); |
| 492 | |
| 493 | /* Poison the aligned part of the redzone. */ |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 494 | redzone_start = round_up((unsigned long)(object + size), |
Andrey Konovalov | 1f60062 | 2020-12-22 12:00:24 -0800 | [diff] [blame] | 495 | KASAN_GRANULE_SIZE); |
Andrey Konovalov | cde8a7e | 2021-02-25 17:20:27 -0800 | [diff] [blame] | 496 | redzone_end = round_up((unsigned long)(object + cache->object_size), |
| 497 | KASAN_GRANULE_SIZE); |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 498 | kasan_poison((void *)redzone_start, redzone_end - redzone_start, |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame^] | 499 | KASAN_KMALLOC_REDZONE, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 500 | |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 501 | /* |
| 502 | * Save alloc info (if possible) for kmalloc() allocations. |
| 503 | * This also rewrites the alloc info when called from kasan_krealloc(). |
| 504 | */ |
Andrey Konovalov | 97593ca | 2020-12-22 12:03:28 -0800 | [diff] [blame] | 505 | if (kasan_stack_collection_enabled()) |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 506 | set_alloc_info(cache, (void *)object, flags, true); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 507 | |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 508 | /* Keep the tag that was set by kasan_slab_alloc(). */ |
| 509 | return (void *)object; |
Andrey Konovalov | e1db95b | 2019-02-20 22:19:01 -0800 | [diff] [blame] | 510 | } |
| 511 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 512 | void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object, |
| 513 | size_t size, gfp_t flags) |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 514 | { |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 515 | return ____kasan_kmalloc(cache, object, size, flags); |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 516 | } |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 517 | EXPORT_SYMBOL(__kasan_kmalloc); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 518 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 519 | void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, |
Andrey Konovalov | 66afc7f | 2018-12-28 00:31:01 -0800 | [diff] [blame] | 520 | gfp_t flags) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 521 | { |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 522 | unsigned long redzone_start; |
| 523 | unsigned long redzone_end; |
| 524 | |
| 525 | if (gfpflags_allow_blocking(flags)) |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 526 | kasan_quarantine_reduce(); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 527 | |
| 528 | if (unlikely(ptr == NULL)) |
| 529 | return NULL; |
| 530 | |
Andrey Konovalov | 43a219c | 2021-02-25 17:20:03 -0800 | [diff] [blame] | 531 | /* |
| 532 | * The object has already been unpoisoned by kasan_alloc_pages() for |
Andrey Konovalov | d12d9ad | 2021-02-25 17:20:23 -0800 | [diff] [blame] | 533 | * alloc_pages() or by kasan_krealloc() for krealloc(). |
Andrey Konovalov | 43a219c | 2021-02-25 17:20:03 -0800 | [diff] [blame] | 534 | */ |
| 535 | |
| 536 | /* |
| 537 | * The redzone has byte-level precision for the generic mode. |
| 538 | * Partially poison the last object granule to cover the unaligned |
| 539 | * part of the redzone. |
| 540 | */ |
| 541 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
| 542 | kasan_poison_last_granule(ptr, size); |
| 543 | |
| 544 | /* Poison the aligned part of the redzone. */ |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 545 | redzone_start = round_up((unsigned long)(ptr + size), |
Andrey Konovalov | 1f60062 | 2020-12-22 12:00:24 -0800 | [diff] [blame] | 546 | KASAN_GRANULE_SIZE); |
Andrey Konovalov | 43a219c | 2021-02-25 17:20:03 -0800 | [diff] [blame] | 547 | redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr)); |
Andrey Konovalov | f00748b | 2021-02-24 12:05:05 -0800 | [diff] [blame] | 548 | kasan_poison((void *)redzone_start, redzone_end - redzone_start, |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame^] | 549 | KASAN_PAGE_REDZONE, false); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 550 | |
| 551 | return (void *)ptr; |
| 552 | } |
| 553 | |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 554 | void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 555 | { |
| 556 | struct page *page; |
| 557 | |
| 558 | if (unlikely(object == ZERO_SIZE_PTR)) |
| 559 | return (void *)object; |
| 560 | |
Andrey Konovalov | d12d9ad | 2021-02-25 17:20:23 -0800 | [diff] [blame] | 561 | /* |
| 562 | * Unpoison the object's data. |
| 563 | * Part of it might already have been unpoisoned, but it's unknown |
| 564 | * how big that part is. |
| 565 | */ |
Andrey Konovalov | aa5c219 | 2021-04-29 22:59:59 -0700 | [diff] [blame^] | 566 | kasan_unpoison(object, size, false); |
Andrey Konovalov | d12d9ad | 2021-02-25 17:20:23 -0800 | [diff] [blame] | 567 | |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 568 | page = virt_to_head_page(object); |
| 569 | |
Andrey Konovalov | d12d9ad | 2021-02-25 17:20:23 -0800 | [diff] [blame] | 570 | /* Piggy-back on kmalloc() instrumentation to poison the redzone. */ |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 571 | if (unlikely(!PageSlab(page))) |
Andrey Konovalov | 3430324 | 2020-12-22 12:03:10 -0800 | [diff] [blame] | 572 | return __kasan_kmalloc_large(object, size, flags); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 573 | else |
Andrey Konovalov | e2db1a9 | 2021-02-25 17:19:59 -0800 | [diff] [blame] | 574 | return ____kasan_kmalloc(page->slab_cache, object, size, flags); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 575 | } |
| 576 | |
Andrey Konovalov | 611806b | 2021-02-24 12:05:50 -0800 | [diff] [blame] | 577 | bool __kasan_check_byte(const void *address, unsigned long ip) |
| 578 | { |
| 579 | if (!kasan_byte_accessible(address)) { |
| 580 | kasan_report((unsigned long)address, 1, false, ip); |
| 581 | return false; |
| 582 | } |
| 583 | return true; |
| 584 | } |