Andrey Konovalov | e886bf9 | 2018-12-28 00:31:14 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 2 | /* |
| 3 | * This file contains common generic and tag-based KASAN code. |
| 4 | * |
| 5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. |
| 6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
| 7 | * |
| 8 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
| 9 | * Andrey Konovalov <andreyknvl@gmail.com> |
| 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of the GNU General Public License version 2 as |
| 13 | * published by the Free Software Foundation. |
| 14 | * |
| 15 | */ |
| 16 | |
| 17 | #include <linux/export.h> |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 18 | #include <linux/init.h> |
| 19 | #include <linux/kasan.h> |
| 20 | #include <linux/kernel.h> |
| 21 | #include <linux/kmemleak.h> |
| 22 | #include <linux/linkage.h> |
| 23 | #include <linux/memblock.h> |
| 24 | #include <linux/memory.h> |
| 25 | #include <linux/mm.h> |
| 26 | #include <linux/module.h> |
| 27 | #include <linux/printk.h> |
| 28 | #include <linux/sched.h> |
| 29 | #include <linux/sched/task_stack.h> |
| 30 | #include <linux/slab.h> |
| 31 | #include <linux/stacktrace.h> |
| 32 | #include <linux/string.h> |
| 33 | #include <linux/types.h> |
| 34 | #include <linux/vmalloc.h> |
| 35 | #include <linux/bug.h> |
Peter Zijlstra | 57b78a62 | 2019-04-03 09:39:50 +0200 | [diff] [blame] | 36 | #include <linux/uaccess.h> |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 37 | |
zhong jiang | 2e7d317 | 2019-12-04 16:49:43 -0800 | [diff] [blame] | 38 | #include <asm/cacheflush.h> |
Daniel Axtens | 3c5c3cf | 2019-11-30 17:54:50 -0800 | [diff] [blame] | 39 | #include <asm/tlbflush.h> |
| 40 | |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 41 | #include "kasan.h" |
| 42 | #include "../slab.h" |
| 43 | |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 44 | static inline depot_stack_handle_t save_stack(gfp_t flags) |
| 45 | { |
| 46 | unsigned long entries[KASAN_STACK_DEPTH]; |
Thomas Gleixner | 880e049 | 2019-04-25 11:45:02 +0200 | [diff] [blame] | 47 | unsigned int nr_entries; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 48 | |
Thomas Gleixner | 880e049 | 2019-04-25 11:45:02 +0200 | [diff] [blame] | 49 | nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); |
| 50 | nr_entries = filter_irq_stacks(entries, nr_entries); |
| 51 | return stack_depot_save(entries, nr_entries, flags); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 52 | } |
| 53 | |
| 54 | static inline void set_track(struct kasan_track *track, gfp_t flags) |
| 55 | { |
| 56 | track->pid = current->pid; |
| 57 | track->stack = save_stack(flags); |
| 58 | } |
| 59 | |
| 60 | void kasan_enable_current(void) |
| 61 | { |
| 62 | current->kasan_depth++; |
| 63 | } |
| 64 | |
| 65 | void kasan_disable_current(void) |
| 66 | { |
| 67 | current->kasan_depth--; |
| 68 | } |
| 69 | |
Marco Elver | b5f6e0f | 2019-07-11 20:54:07 -0700 | [diff] [blame] | 70 | bool __kasan_check_read(const volatile void *p, unsigned int size) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 71 | { |
Marco Elver | b5f6e0f | 2019-07-11 20:54:07 -0700 | [diff] [blame] | 72 | return check_memory_region((unsigned long)p, size, false, _RET_IP_); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 73 | } |
Marco Elver | 7d8ad89 | 2019-07-11 20:54:03 -0700 | [diff] [blame] | 74 | EXPORT_SYMBOL(__kasan_check_read); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 75 | |
Marco Elver | b5f6e0f | 2019-07-11 20:54:07 -0700 | [diff] [blame] | 76 | bool __kasan_check_write(const volatile void *p, unsigned int size) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 77 | { |
Marco Elver | b5f6e0f | 2019-07-11 20:54:07 -0700 | [diff] [blame] | 78 | return check_memory_region((unsigned long)p, size, true, _RET_IP_); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 79 | } |
Marco Elver | 7d8ad89 | 2019-07-11 20:54:03 -0700 | [diff] [blame] | 80 | EXPORT_SYMBOL(__kasan_check_write); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 81 | |
| 82 | #undef memset |
| 83 | void *memset(void *addr, int c, size_t len) |
| 84 | { |
Walter Wu | 8cceeff | 2020-04-01 21:09:37 -0700 | [diff] [blame] | 85 | if (!check_memory_region((unsigned long)addr, len, true, _RET_IP_)) |
| 86 | return NULL; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 87 | |
| 88 | return __memset(addr, c, len); |
| 89 | } |
| 90 | |
Nick Hu | 57ee58e | 2019-10-28 10:40:59 +0800 | [diff] [blame] | 91 | #ifdef __HAVE_ARCH_MEMMOVE |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 92 | #undef memmove |
| 93 | void *memmove(void *dest, const void *src, size_t len) |
| 94 | { |
Walter Wu | 8cceeff | 2020-04-01 21:09:37 -0700 | [diff] [blame] | 95 | if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) || |
| 96 | !check_memory_region((unsigned long)dest, len, true, _RET_IP_)) |
| 97 | return NULL; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 98 | |
| 99 | return __memmove(dest, src, len); |
| 100 | } |
Nick Hu | 57ee58e | 2019-10-28 10:40:59 +0800 | [diff] [blame] | 101 | #endif |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 102 | |
| 103 | #undef memcpy |
| 104 | void *memcpy(void *dest, const void *src, size_t len) |
| 105 | { |
Walter Wu | 8cceeff | 2020-04-01 21:09:37 -0700 | [diff] [blame] | 106 | if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) || |
| 107 | !check_memory_region((unsigned long)dest, len, true, _RET_IP_)) |
| 108 | return NULL; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 109 | |
| 110 | return __memcpy(dest, src, len); |
| 111 | } |
| 112 | |
| 113 | /* |
| 114 | * Poisons the shadow memory for 'size' bytes starting from 'addr'. |
| 115 | * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. |
| 116 | */ |
| 117 | void kasan_poison_shadow(const void *address, size_t size, u8 value) |
| 118 | { |
| 119 | void *shadow_start, *shadow_end; |
| 120 | |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 121 | /* |
| 122 | * Perform shadow offset calculation based on untagged address, as |
| 123 | * some of the callers (e.g. kasan_poison_object_data) pass tagged |
| 124 | * addresses to this function. |
| 125 | */ |
| 126 | address = reset_tag(address); |
| 127 | |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 128 | shadow_start = kasan_mem_to_shadow(address); |
| 129 | shadow_end = kasan_mem_to_shadow(address + size); |
| 130 | |
| 131 | __memset(shadow_start, value, shadow_end - shadow_start); |
| 132 | } |
| 133 | |
| 134 | void kasan_unpoison_shadow(const void *address, size_t size) |
| 135 | { |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 136 | u8 tag = get_tag(address); |
| 137 | |
| 138 | /* |
| 139 | * Perform shadow offset calculation based on untagged address, as |
| 140 | * some of the callers (e.g. kasan_unpoison_object_data) pass tagged |
| 141 | * addresses to this function. |
| 142 | */ |
| 143 | address = reset_tag(address); |
| 144 | |
| 145 | kasan_poison_shadow(address, size, tag); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 146 | |
| 147 | if (size & KASAN_SHADOW_MASK) { |
| 148 | u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 149 | |
| 150 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
| 151 | *shadow = tag; |
| 152 | else |
| 153 | *shadow = size & KASAN_SHADOW_MASK; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 154 | } |
| 155 | } |
| 156 | |
| 157 | static void __kasan_unpoison_stack(struct task_struct *task, const void *sp) |
| 158 | { |
| 159 | void *base = task_stack_page(task); |
| 160 | size_t size = sp - base; |
| 161 | |
| 162 | kasan_unpoison_shadow(base, size); |
| 163 | } |
| 164 | |
| 165 | /* Unpoison the entire stack for a task. */ |
| 166 | void kasan_unpoison_task_stack(struct task_struct *task) |
| 167 | { |
| 168 | __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE); |
| 169 | } |
| 170 | |
| 171 | /* Unpoison the stack for the current task beyond a watermark sp value. */ |
| 172 | asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) |
| 173 | { |
| 174 | /* |
| 175 | * Calculate the task stack base address. Avoid using 'current' |
| 176 | * because this function is called by early resume code which hasn't |
| 177 | * yet set up the percpu register (%gs). |
| 178 | */ |
| 179 | void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); |
| 180 | |
| 181 | kasan_unpoison_shadow(base, watermark - base); |
| 182 | } |
| 183 | |
| 184 | /* |
| 185 | * Clear all poison for the region between the current SP and a provided |
| 186 | * watermark value, as is sometimes required prior to hand-crafted asm function |
| 187 | * returns in the middle of functions. |
| 188 | */ |
| 189 | void kasan_unpoison_stack_above_sp_to(const void *watermark) |
| 190 | { |
| 191 | const void *sp = __builtin_frame_address(0); |
| 192 | size_t size = watermark - sp; |
| 193 | |
| 194 | if (WARN_ON(sp > watermark)) |
| 195 | return; |
| 196 | kasan_unpoison_shadow(sp, size); |
| 197 | } |
| 198 | |
| 199 | void kasan_alloc_pages(struct page *page, unsigned int order) |
| 200 | { |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 201 | u8 tag; |
| 202 | unsigned long i; |
| 203 | |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 204 | if (unlikely(PageHighMem(page))) |
| 205 | return; |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 206 | |
| 207 | tag = random_tag(); |
| 208 | for (i = 0; i < (1 << order); i++) |
| 209 | page_kasan_tag_set(page + i, tag); |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 210 | kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 211 | } |
| 212 | |
| 213 | void kasan_free_pages(struct page *page, unsigned int order) |
| 214 | { |
| 215 | if (likely(!PageHighMem(page))) |
| 216 | kasan_poison_shadow(page_address(page), |
| 217 | PAGE_SIZE << order, |
| 218 | KASAN_FREE_PAGE); |
| 219 | } |
| 220 | |
| 221 | /* |
| 222 | * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. |
| 223 | * For larger allocations larger redzones are used. |
| 224 | */ |
| 225 | static inline unsigned int optimal_redzone(unsigned int object_size) |
| 226 | { |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 227 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
| 228 | return 0; |
| 229 | |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 230 | return |
| 231 | object_size <= 64 - 16 ? 16 : |
| 232 | object_size <= 128 - 32 ? 32 : |
| 233 | object_size <= 512 - 64 ? 64 : |
| 234 | object_size <= 4096 - 128 ? 128 : |
| 235 | object_size <= (1 << 14) - 256 ? 256 : |
| 236 | object_size <= (1 << 15) - 512 ? 512 : |
| 237 | object_size <= (1 << 16) - 1024 ? 1024 : 2048; |
| 238 | } |
| 239 | |
| 240 | void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, |
| 241 | slab_flags_t *flags) |
| 242 | { |
| 243 | unsigned int orig_size = *size; |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 244 | unsigned int redzone_size; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 245 | int redzone_adjust; |
| 246 | |
| 247 | /* Add alloc meta. */ |
| 248 | cache->kasan_info.alloc_meta_offset = *size; |
| 249 | *size += sizeof(struct kasan_alloc_meta); |
| 250 | |
| 251 | /* Add free meta. */ |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 252 | if (IS_ENABLED(CONFIG_KASAN_GENERIC) && |
| 253 | (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || |
| 254 | cache->object_size < sizeof(struct kasan_free_meta))) { |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 255 | cache->kasan_info.free_meta_offset = *size; |
| 256 | *size += sizeof(struct kasan_free_meta); |
| 257 | } |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 258 | |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 259 | redzone_size = optimal_redzone(cache->object_size); |
| 260 | redzone_adjust = redzone_size - (*size - cache->object_size); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 261 | if (redzone_adjust > 0) |
| 262 | *size += redzone_adjust; |
| 263 | |
| 264 | *size = min_t(unsigned int, KMALLOC_MAX_SIZE, |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 265 | max(*size, cache->object_size + redzone_size)); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 266 | |
| 267 | /* |
| 268 | * If the metadata doesn't fit, don't enable KASAN at all. |
| 269 | */ |
| 270 | if (*size <= cache->kasan_info.alloc_meta_offset || |
| 271 | *size <= cache->kasan_info.free_meta_offset) { |
| 272 | cache->kasan_info.alloc_meta_offset = 0; |
| 273 | cache->kasan_info.free_meta_offset = 0; |
| 274 | *size = orig_size; |
| 275 | return; |
| 276 | } |
| 277 | |
| 278 | *flags |= SLAB_KASAN; |
| 279 | } |
| 280 | |
| 281 | size_t kasan_metadata_size(struct kmem_cache *cache) |
| 282 | { |
| 283 | return (cache->kasan_info.alloc_meta_offset ? |
| 284 | sizeof(struct kasan_alloc_meta) : 0) + |
| 285 | (cache->kasan_info.free_meta_offset ? |
| 286 | sizeof(struct kasan_free_meta) : 0); |
| 287 | } |
| 288 | |
| 289 | struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, |
| 290 | const void *object) |
| 291 | { |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 292 | return (void *)object + cache->kasan_info.alloc_meta_offset; |
| 293 | } |
| 294 | |
| 295 | struct kasan_free_meta *get_free_info(struct kmem_cache *cache, |
| 296 | const void *object) |
| 297 | { |
| 298 | BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); |
| 299 | return (void *)object + cache->kasan_info.free_meta_offset; |
| 300 | } |
| 301 | |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 302 | |
| 303 | static void kasan_set_free_info(struct kmem_cache *cache, |
| 304 | void *object, u8 tag) |
| 305 | { |
| 306 | struct kasan_alloc_meta *alloc_meta; |
| 307 | u8 idx = 0; |
| 308 | |
| 309 | alloc_meta = get_alloc_info(cache, object); |
| 310 | |
| 311 | #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY |
| 312 | idx = alloc_meta->free_track_idx; |
| 313 | alloc_meta->free_pointer_tag[idx] = tag; |
| 314 | alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS; |
| 315 | #endif |
| 316 | |
| 317 | set_track(&alloc_meta->free_track[idx], GFP_NOWAIT); |
| 318 | } |
| 319 | |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 320 | void kasan_poison_slab(struct page *page) |
| 321 | { |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 322 | unsigned long i; |
| 323 | |
Matthew Wilcox (Oracle) | d8c6546 | 2019-09-23 15:34:30 -0700 | [diff] [blame] | 324 | for (i = 0; i < compound_nr(page); i++) |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 325 | page_kasan_tag_reset(page + i); |
Matthew Wilcox (Oracle) | a50b854 | 2019-09-23 15:34:25 -0700 | [diff] [blame] | 326 | kasan_poison_shadow(page_address(page), page_size(page), |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 327 | KASAN_KMALLOC_REDZONE); |
| 328 | } |
| 329 | |
| 330 | void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) |
| 331 | { |
| 332 | kasan_unpoison_shadow(object, cache->object_size); |
| 333 | } |
| 334 | |
| 335 | void kasan_poison_object_data(struct kmem_cache *cache, void *object) |
| 336 | { |
| 337 | kasan_poison_shadow(object, |
| 338 | round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), |
| 339 | KASAN_KMALLOC_REDZONE); |
| 340 | } |
| 341 | |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 342 | /* |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 343 | * This function assigns a tag to an object considering the following: |
| 344 | * 1. A cache might have a constructor, which might save a pointer to a slab |
| 345 | * object somewhere (e.g. in the object itself). We preassign a tag for |
| 346 | * each object in caches with constructors during slab creation and reuse |
| 347 | * the same tag each time a particular object is allocated. |
| 348 | * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be |
| 349 | * accessed after being freed. We preassign tags for objects in these |
| 350 | * caches as well. |
| 351 | * 3. For SLAB allocator we can't preassign tags randomly since the freelist |
| 352 | * is stored as an array of indexes instead of a linked list. Assign tags |
| 353 | * based on objects indexes, so that objects that are next to each other |
| 354 | * get different tags. |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 355 | */ |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 356 | static u8 assign_tag(struct kmem_cache *cache, const void *object, |
Andrey Konovalov | e1db95b | 2019-02-20 22:19:01 -0800 | [diff] [blame] | 357 | bool init, bool keep_tag) |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 358 | { |
Andrey Konovalov | e1db95b | 2019-02-20 22:19:01 -0800 | [diff] [blame] | 359 | /* |
| 360 | * 1. When an object is kmalloc()'ed, two hooks are called: |
| 361 | * kasan_slab_alloc() and kasan_kmalloc(). We assign the |
| 362 | * tag only in the first one. |
| 363 | * 2. We reuse the same tag for krealloc'ed objects. |
| 364 | */ |
| 365 | if (keep_tag) |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 366 | return get_tag(object); |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 367 | |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 368 | /* |
| 369 | * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU |
| 370 | * set, assign a tag when the object is being allocated (init == false). |
| 371 | */ |
| 372 | if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) |
| 373 | return init ? KASAN_TAG_KERNEL : random_tag(); |
| 374 | |
| 375 | /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */ |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 376 | #ifdef CONFIG_SLAB |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 377 | /* For SLAB assign tags based on the object index in the freelist. */ |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 378 | return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); |
| 379 | #else |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 380 | /* |
| 381 | * For SLUB assign a random tag during slab creation, otherwise reuse |
| 382 | * the already assigned tag. |
| 383 | */ |
| 384 | return init ? random_tag() : get_tag(object); |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 385 | #endif |
| 386 | } |
| 387 | |
Andrey Konovalov | 66afc7f | 2018-12-28 00:31:01 -0800 | [diff] [blame] | 388 | void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, |
| 389 | const void *object) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 390 | { |
| 391 | struct kasan_alloc_meta *alloc_info; |
| 392 | |
| 393 | if (!(cache->flags & SLAB_KASAN)) |
| 394 | return (void *)object; |
| 395 | |
| 396 | alloc_info = get_alloc_info(cache, object); |
| 397 | __memset(alloc_info, 0, sizeof(*alloc_info)); |
| 398 | |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 399 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 400 | object = set_tag(object, |
| 401 | assign_tag(cache, object, true, false)); |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 402 | |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 403 | return (void *)object; |
| 404 | } |
| 405 | |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 406 | static inline bool shadow_invalid(u8 tag, s8 shadow_byte) |
| 407 | { |
| 408 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
| 409 | return shadow_byte < 0 || |
| 410 | shadow_byte >= KASAN_SHADOW_SCALE_SIZE; |
Andrey Ryabinin | 00fb24a | 2019-08-24 17:55:09 -0700 | [diff] [blame] | 411 | |
| 412 | /* else CONFIG_KASAN_SW_TAGS: */ |
| 413 | if ((u8)shadow_byte == KASAN_TAG_INVALID) |
| 414 | return true; |
| 415 | if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte)) |
| 416 | return true; |
| 417 | |
| 418 | return false; |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 419 | } |
| 420 | |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 421 | static bool __kasan_slab_free(struct kmem_cache *cache, void *object, |
| 422 | unsigned long ip, bool quarantine) |
| 423 | { |
| 424 | s8 shadow_byte; |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 425 | u8 tag; |
| 426 | void *tagged_object; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 427 | unsigned long rounded_up_size; |
| 428 | |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 429 | tag = get_tag(object); |
| 430 | tagged_object = object; |
| 431 | object = reset_tag(object); |
| 432 | |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 433 | if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != |
| 434 | object)) { |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 435 | kasan_report_invalid_free(tagged_object, ip); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 436 | return true; |
| 437 | } |
| 438 | |
| 439 | /* RCU slabs could be legally used after free within the RCU period */ |
| 440 | if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) |
| 441 | return false; |
| 442 | |
| 443 | shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 444 | if (shadow_invalid(tag, shadow_byte)) { |
| 445 | kasan_report_invalid_free(tagged_object, ip); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 446 | return true; |
| 447 | } |
| 448 | |
| 449 | rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); |
| 450 | kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); |
| 451 | |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 452 | if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) || |
| 453 | unlikely(!(cache->flags & SLAB_KASAN))) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 454 | return false; |
| 455 | |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 456 | kasan_set_free_info(cache, object, tag); |
| 457 | |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 458 | quarantine_put(get_free_info(cache, object), cache); |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 459 | |
| 460 | return IS_ENABLED(CONFIG_KASAN_GENERIC); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 461 | } |
| 462 | |
| 463 | bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) |
| 464 | { |
| 465 | return __kasan_slab_free(cache, object, ip, true); |
| 466 | } |
| 467 | |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 468 | static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, |
Andrey Konovalov | e1db95b | 2019-02-20 22:19:01 -0800 | [diff] [blame] | 469 | size_t size, gfp_t flags, bool keep_tag) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 470 | { |
| 471 | unsigned long redzone_start; |
| 472 | unsigned long redzone_end; |
Nathan Chancellor | 0600597 | 2019-05-31 22:30:42 -0700 | [diff] [blame] | 473 | u8 tag = 0xff; |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 474 | |
| 475 | if (gfpflags_allow_blocking(flags)) |
| 476 | quarantine_reduce(); |
| 477 | |
| 478 | if (unlikely(object == NULL)) |
| 479 | return NULL; |
| 480 | |
| 481 | redzone_start = round_up((unsigned long)(object + size), |
| 482 | KASAN_SHADOW_SCALE_SIZE); |
| 483 | redzone_end = round_up((unsigned long)object + cache->object_size, |
| 484 | KASAN_SHADOW_SCALE_SIZE); |
| 485 | |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 486 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
Andrey Konovalov | e1db95b | 2019-02-20 22:19:01 -0800 | [diff] [blame] | 487 | tag = assign_tag(cache, object, false, keep_tag); |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 488 | |
| 489 | /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ |
| 490 | kasan_unpoison_shadow(set_tag(object, tag), size); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 491 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, |
| 492 | KASAN_KMALLOC_REDZONE); |
| 493 | |
| 494 | if (cache->flags & SLAB_KASAN) |
| 495 | set_track(&get_alloc_info(cache, object)->alloc_track, flags); |
| 496 | |
Andrey Konovalov | 7f94ffb | 2018-12-28 00:30:50 -0800 | [diff] [blame] | 497 | return set_tag(object, tag); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 498 | } |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 499 | |
Andrey Konovalov | e1db95b | 2019-02-20 22:19:01 -0800 | [diff] [blame] | 500 | void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, |
| 501 | gfp_t flags) |
| 502 | { |
| 503 | return __kasan_kmalloc(cache, object, cache->object_size, flags, false); |
| 504 | } |
| 505 | |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 506 | void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, |
| 507 | size_t size, gfp_t flags) |
| 508 | { |
Andrey Konovalov | e1db95b | 2019-02-20 22:19:01 -0800 | [diff] [blame] | 509 | return __kasan_kmalloc(cache, object, size, flags, true); |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 510 | } |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 511 | EXPORT_SYMBOL(kasan_kmalloc); |
| 512 | |
Andrey Konovalov | 66afc7f | 2018-12-28 00:31:01 -0800 | [diff] [blame] | 513 | void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, |
| 514 | gfp_t flags) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 515 | { |
| 516 | struct page *page; |
| 517 | unsigned long redzone_start; |
| 518 | unsigned long redzone_end; |
| 519 | |
| 520 | if (gfpflags_allow_blocking(flags)) |
| 521 | quarantine_reduce(); |
| 522 | |
| 523 | if (unlikely(ptr == NULL)) |
| 524 | return NULL; |
| 525 | |
| 526 | page = virt_to_page(ptr); |
| 527 | redzone_start = round_up((unsigned long)(ptr + size), |
| 528 | KASAN_SHADOW_SCALE_SIZE); |
Matthew Wilcox (Oracle) | a50b854 | 2019-09-23 15:34:25 -0700 | [diff] [blame] | 529 | redzone_end = (unsigned long)ptr + page_size(page); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 530 | |
| 531 | kasan_unpoison_shadow(ptr, size); |
| 532 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, |
| 533 | KASAN_PAGE_REDZONE); |
| 534 | |
| 535 | return (void *)ptr; |
| 536 | } |
| 537 | |
Andrey Konovalov | 66afc7f | 2018-12-28 00:31:01 -0800 | [diff] [blame] | 538 | void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 539 | { |
| 540 | struct page *page; |
| 541 | |
| 542 | if (unlikely(object == ZERO_SIZE_PTR)) |
| 543 | return (void *)object; |
| 544 | |
| 545 | page = virt_to_head_page(object); |
| 546 | |
| 547 | if (unlikely(!PageSlab(page))) |
| 548 | return kasan_kmalloc_large(object, size, flags); |
| 549 | else |
Andrey Konovalov | a3fe7cd | 2019-01-08 15:23:18 -0800 | [diff] [blame] | 550 | return __kasan_kmalloc(page->slab_cache, object, size, |
| 551 | flags, true); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 552 | } |
| 553 | |
| 554 | void kasan_poison_kfree(void *ptr, unsigned long ip) |
| 555 | { |
| 556 | struct page *page; |
| 557 | |
| 558 | page = virt_to_head_page(ptr); |
| 559 | |
| 560 | if (unlikely(!PageSlab(page))) { |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 561 | if (ptr != page_address(page)) { |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 562 | kasan_report_invalid_free(ptr, ip); |
| 563 | return; |
| 564 | } |
Matthew Wilcox (Oracle) | a50b854 | 2019-09-23 15:34:25 -0700 | [diff] [blame] | 565 | kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 566 | } else { |
| 567 | __kasan_slab_free(page->slab_cache, ptr, ip, false); |
| 568 | } |
| 569 | } |
| 570 | |
| 571 | void kasan_kfree_large(void *ptr, unsigned long ip) |
| 572 | { |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 573 | if (ptr != page_address(virt_to_head_page(ptr))) |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 574 | kasan_report_invalid_free(ptr, ip); |
| 575 | /* The object will be poisoned by page_alloc. */ |
| 576 | } |
| 577 | |
Daniel Axtens | 3c5c3cf | 2019-11-30 17:54:50 -0800 | [diff] [blame] | 578 | #ifndef CONFIG_KASAN_VMALLOC |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 579 | int kasan_module_alloc(void *addr, size_t size) |
| 580 | { |
| 581 | void *ret; |
| 582 | size_t scaled_size; |
| 583 | size_t shadow_size; |
| 584 | unsigned long shadow_start; |
| 585 | |
| 586 | shadow_start = (unsigned long)kasan_mem_to_shadow(addr); |
| 587 | scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT; |
| 588 | shadow_size = round_up(scaled_size, PAGE_SIZE); |
| 589 | |
| 590 | if (WARN_ON(!PAGE_ALIGNED(shadow_start))) |
| 591 | return -EINVAL; |
| 592 | |
| 593 | ret = __vmalloc_node_range(shadow_size, 1, shadow_start, |
| 594 | shadow_start + shadow_size, |
Andrey Konovalov | 080eb83 | 2018-12-28 00:30:09 -0800 | [diff] [blame] | 595 | GFP_KERNEL, |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 596 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, |
| 597 | __builtin_return_address(0)); |
| 598 | |
| 599 | if (ret) { |
Andrey Konovalov | 080eb83 | 2018-12-28 00:30:09 -0800 | [diff] [blame] | 600 | __memset(ret, KASAN_SHADOW_INIT, shadow_size); |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 601 | find_vm_area(addr)->flags |= VM_KASAN; |
| 602 | kmemleak_ignore(ret); |
| 603 | return 0; |
| 604 | } |
| 605 | |
| 606 | return -ENOMEM; |
| 607 | } |
| 608 | |
| 609 | void kasan_free_shadow(const struct vm_struct *vm) |
| 610 | { |
| 611 | if (vm->flags & VM_KASAN) |
| 612 | vfree(kasan_mem_to_shadow(vm->addr)); |
| 613 | } |
Daniel Axtens | 3c5c3cf | 2019-11-30 17:54:50 -0800 | [diff] [blame] | 614 | #endif |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 615 | |
Peter Zijlstra | 57b78a62 | 2019-04-03 09:39:50 +0200 | [diff] [blame] | 616 | extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); |
Walter Wu | 8cceeff | 2020-04-01 21:09:37 -0700 | [diff] [blame] | 617 | extern bool report_enabled(void); |
Peter Zijlstra | 57b78a62 | 2019-04-03 09:39:50 +0200 | [diff] [blame] | 618 | |
Walter Wu | 8cceeff | 2020-04-01 21:09:37 -0700 | [diff] [blame] | 619 | bool kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) |
Peter Zijlstra | 57b78a62 | 2019-04-03 09:39:50 +0200 | [diff] [blame] | 620 | { |
| 621 | unsigned long flags = user_access_save(); |
Walter Wu | 8cceeff | 2020-04-01 21:09:37 -0700 | [diff] [blame] | 622 | bool ret = false; |
| 623 | |
| 624 | if (likely(report_enabled())) { |
| 625 | __kasan_report(addr, size, is_write, ip); |
| 626 | ret = true; |
| 627 | } |
| 628 | |
Peter Zijlstra | 57b78a62 | 2019-04-03 09:39:50 +0200 | [diff] [blame] | 629 | user_access_restore(flags); |
Walter Wu | 8cceeff | 2020-04-01 21:09:37 -0700 | [diff] [blame] | 630 | |
| 631 | return ret; |
Peter Zijlstra | 57b78a62 | 2019-04-03 09:39:50 +0200 | [diff] [blame] | 632 | } |
| 633 | |
Andrey Konovalov | bffa986 | 2018-12-28 00:29:45 -0800 | [diff] [blame] | 634 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 635 | static bool shadow_mapped(unsigned long addr) |
| 636 | { |
| 637 | pgd_t *pgd = pgd_offset_k(addr); |
| 638 | p4d_t *p4d; |
| 639 | pud_t *pud; |
| 640 | pmd_t *pmd; |
| 641 | pte_t *pte; |
| 642 | |
| 643 | if (pgd_none(*pgd)) |
| 644 | return false; |
| 645 | p4d = p4d_offset(pgd, addr); |
| 646 | if (p4d_none(*p4d)) |
| 647 | return false; |
| 648 | pud = pud_offset(p4d, addr); |
| 649 | if (pud_none(*pud)) |
| 650 | return false; |
| 651 | |
| 652 | /* |
| 653 | * We can't use pud_large() or pud_huge(), the first one is |
| 654 | * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse |
| 655 | * pud_bad(), if pud is bad then it's bad because it's huge. |
| 656 | */ |
| 657 | if (pud_bad(*pud)) |
| 658 | return true; |
| 659 | pmd = pmd_offset(pud, addr); |
| 660 | if (pmd_none(*pmd)) |
| 661 | return false; |
| 662 | |
| 663 | if (pmd_bad(*pmd)) |
| 664 | return true; |
| 665 | pte = pte_offset_kernel(pmd, addr); |
| 666 | return !pte_none(*pte); |
| 667 | } |
| 668 | |
| 669 | static int __meminit kasan_mem_notifier(struct notifier_block *nb, |
| 670 | unsigned long action, void *data) |
| 671 | { |
| 672 | struct memory_notify *mem_data = data; |
| 673 | unsigned long nr_shadow_pages, start_kaddr, shadow_start; |
| 674 | unsigned long shadow_end, shadow_size; |
| 675 | |
| 676 | nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; |
| 677 | start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); |
| 678 | shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); |
| 679 | shadow_size = nr_shadow_pages << PAGE_SHIFT; |
| 680 | shadow_end = shadow_start + shadow_size; |
| 681 | |
| 682 | if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) || |
| 683 | WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT))) |
| 684 | return NOTIFY_BAD; |
| 685 | |
| 686 | switch (action) { |
| 687 | case MEM_GOING_ONLINE: { |
| 688 | void *ret; |
| 689 | |
| 690 | /* |
| 691 | * If shadow is mapped already than it must have been mapped |
| 692 | * during the boot. This could happen if we onlining previously |
| 693 | * offlined memory. |
| 694 | */ |
| 695 | if (shadow_mapped(shadow_start)) |
| 696 | return NOTIFY_OK; |
| 697 | |
| 698 | ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, |
| 699 | shadow_end, GFP_KERNEL, |
| 700 | PAGE_KERNEL, VM_NO_GUARD, |
| 701 | pfn_to_nid(mem_data->start_pfn), |
| 702 | __builtin_return_address(0)); |
| 703 | if (!ret) |
| 704 | return NOTIFY_BAD; |
| 705 | |
| 706 | kmemleak_ignore(ret); |
| 707 | return NOTIFY_OK; |
| 708 | } |
| 709 | case MEM_CANCEL_ONLINE: |
| 710 | case MEM_OFFLINE: { |
| 711 | struct vm_struct *vm; |
| 712 | |
| 713 | /* |
| 714 | * shadow_start was either mapped during boot by kasan_init() |
| 715 | * or during memory online by __vmalloc_node_range(). |
| 716 | * In the latter case we can use vfree() to free shadow. |
| 717 | * Non-NULL result of the find_vm_area() will tell us if |
| 718 | * that was the second case. |
| 719 | * |
| 720 | * Currently it's not possible to free shadow mapped |
| 721 | * during boot by kasan_init(). It's because the code |
| 722 | * to do that hasn't been written yet. So we'll just |
| 723 | * leak the memory. |
| 724 | */ |
| 725 | vm = find_vm_area((void *)shadow_start); |
| 726 | if (vm) |
| 727 | vfree((void *)shadow_start); |
| 728 | } |
| 729 | } |
| 730 | |
| 731 | return NOTIFY_OK; |
| 732 | } |
| 733 | |
| 734 | static int __init kasan_memhotplug_init(void) |
| 735 | { |
| 736 | hotplug_memory_notifier(kasan_mem_notifier, 0); |
| 737 | |
| 738 | return 0; |
| 739 | } |
| 740 | |
| 741 | core_initcall(kasan_memhotplug_init); |
| 742 | #endif |
Daniel Axtens | 3c5c3cf | 2019-11-30 17:54:50 -0800 | [diff] [blame] | 743 | |
| 744 | #ifdef CONFIG_KASAN_VMALLOC |
| 745 | static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, |
| 746 | void *unused) |
| 747 | { |
| 748 | unsigned long page; |
| 749 | pte_t pte; |
| 750 | |
| 751 | if (likely(!pte_none(*ptep))) |
| 752 | return 0; |
| 753 | |
| 754 | page = __get_free_page(GFP_KERNEL); |
| 755 | if (!page) |
| 756 | return -ENOMEM; |
| 757 | |
| 758 | memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE); |
| 759 | pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); |
| 760 | |
| 761 | spin_lock(&init_mm.page_table_lock); |
| 762 | if (likely(pte_none(*ptep))) { |
| 763 | set_pte_at(&init_mm, addr, ptep, pte); |
| 764 | page = 0; |
| 765 | } |
| 766 | spin_unlock(&init_mm.page_table_lock); |
| 767 | if (page) |
| 768 | free_page(page); |
| 769 | return 0; |
| 770 | } |
| 771 | |
Andrey Ryabinin | d98c9e8 | 2019-12-17 20:51:38 -0800 | [diff] [blame] | 772 | int kasan_populate_vmalloc(unsigned long addr, unsigned long size) |
Daniel Axtens | 3c5c3cf | 2019-11-30 17:54:50 -0800 | [diff] [blame] | 773 | { |
| 774 | unsigned long shadow_start, shadow_end; |
| 775 | int ret; |
| 776 | |
Andrey Ryabinin | d98c9e8 | 2019-12-17 20:51:38 -0800 | [diff] [blame] | 777 | if (!is_vmalloc_or_module_addr((void *)addr)) |
| 778 | return 0; |
| 779 | |
| 780 | shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr); |
Daniel Axtens | 3c5c3cf | 2019-11-30 17:54:50 -0800 | [diff] [blame] | 781 | shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE); |
Andrey Ryabinin | d98c9e8 | 2019-12-17 20:51:38 -0800 | [diff] [blame] | 782 | shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size); |
Daniel Axtens | 3c5c3cf | 2019-11-30 17:54:50 -0800 | [diff] [blame] | 783 | shadow_end = ALIGN(shadow_end, PAGE_SIZE); |
| 784 | |
| 785 | ret = apply_to_page_range(&init_mm, shadow_start, |
| 786 | shadow_end - shadow_start, |
| 787 | kasan_populate_vmalloc_pte, NULL); |
| 788 | if (ret) |
| 789 | return ret; |
| 790 | |
| 791 | flush_cache_vmap(shadow_start, shadow_end); |
| 792 | |
Daniel Axtens | 3c5c3cf | 2019-11-30 17:54:50 -0800 | [diff] [blame] | 793 | /* |
| 794 | * We need to be careful about inter-cpu effects here. Consider: |
| 795 | * |
| 796 | * CPU#0 CPU#1 |
| 797 | * WRITE_ONCE(p, vmalloc(100)); while (x = READ_ONCE(p)) ; |
| 798 | * p[99] = 1; |
| 799 | * |
| 800 | * With compiler instrumentation, that ends up looking like this: |
| 801 | * |
| 802 | * CPU#0 CPU#1 |
| 803 | * // vmalloc() allocates memory |
| 804 | * // let a = area->addr |
| 805 | * // we reach kasan_populate_vmalloc |
| 806 | * // and call kasan_unpoison_shadow: |
| 807 | * STORE shadow(a), unpoison_val |
| 808 | * ... |
| 809 | * STORE shadow(a+99), unpoison_val x = LOAD p |
| 810 | * // rest of vmalloc process <data dependency> |
| 811 | * STORE p, a LOAD shadow(x+99) |
| 812 | * |
| 813 | * If there is no barrier between the end of unpoisioning the shadow |
| 814 | * and the store of the result to p, the stores could be committed |
| 815 | * in a different order by CPU#0, and CPU#1 could erroneously observe |
| 816 | * poison in the shadow. |
| 817 | * |
| 818 | * We need some sort of barrier between the stores. |
| 819 | * |
| 820 | * In the vmalloc() case, this is provided by a smp_wmb() in |
| 821 | * clear_vm_uninitialized_flag(). In the per-cpu allocator and in |
| 822 | * get_vm_area() and friends, the caller gets shadow allocated but |
| 823 | * doesn't have any pages mapped into the virtual address space that |
| 824 | * has been reserved. Mapping those pages in will involve taking and |
| 825 | * releasing a page-table lock, which will provide the barrier. |
| 826 | */ |
| 827 | |
| 828 | return 0; |
| 829 | } |
| 830 | |
| 831 | /* |
| 832 | * Poison the shadow for a vmalloc region. Called as part of the |
| 833 | * freeing process at the time the region is freed. |
| 834 | */ |
Andrey Ryabinin | d98c9e8 | 2019-12-17 20:51:38 -0800 | [diff] [blame] | 835 | void kasan_poison_vmalloc(const void *start, unsigned long size) |
Daniel Axtens | 3c5c3cf | 2019-11-30 17:54:50 -0800 | [diff] [blame] | 836 | { |
Andrey Ryabinin | d98c9e8 | 2019-12-17 20:51:38 -0800 | [diff] [blame] | 837 | if (!is_vmalloc_or_module_addr(start)) |
| 838 | return; |
| 839 | |
Daniel Axtens | 3c5c3cf | 2019-11-30 17:54:50 -0800 | [diff] [blame] | 840 | size = round_up(size, KASAN_SHADOW_SCALE_SIZE); |
| 841 | kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID); |
| 842 | } |
| 843 | |
Andrey Ryabinin | d98c9e8 | 2019-12-17 20:51:38 -0800 | [diff] [blame] | 844 | void kasan_unpoison_vmalloc(const void *start, unsigned long size) |
| 845 | { |
| 846 | if (!is_vmalloc_or_module_addr(start)) |
| 847 | return; |
| 848 | |
| 849 | kasan_unpoison_shadow(start, size); |
| 850 | } |
| 851 | |
Daniel Axtens | 3c5c3cf | 2019-11-30 17:54:50 -0800 | [diff] [blame] | 852 | static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, |
| 853 | void *unused) |
| 854 | { |
| 855 | unsigned long page; |
| 856 | |
| 857 | page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT); |
| 858 | |
| 859 | spin_lock(&init_mm.page_table_lock); |
| 860 | |
| 861 | if (likely(!pte_none(*ptep))) { |
| 862 | pte_clear(&init_mm, addr, ptep); |
| 863 | free_page(page); |
| 864 | } |
| 865 | spin_unlock(&init_mm.page_table_lock); |
| 866 | |
| 867 | return 0; |
| 868 | } |
| 869 | |
| 870 | /* |
| 871 | * Release the backing for the vmalloc region [start, end), which |
| 872 | * lies within the free region [free_region_start, free_region_end). |
| 873 | * |
| 874 | * This can be run lazily, long after the region was freed. It runs |
| 875 | * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap |
| 876 | * infrastructure. |
| 877 | * |
| 878 | * How does this work? |
| 879 | * ------------------- |
| 880 | * |
| 881 | * We have a region that is page aligned, labelled as A. |
| 882 | * That might not map onto the shadow in a way that is page-aligned: |
| 883 | * |
| 884 | * start end |
| 885 | * v v |
| 886 | * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc |
| 887 | * -------- -------- -------- -------- -------- |
| 888 | * | | | | | |
| 889 | * | | | /-------/ | |
| 890 | * \-------\|/------/ |/---------------/ |
| 891 | * ||| || |
| 892 | * |??AAAAAA|AAAAAAAA|AA??????| < shadow |
| 893 | * (1) (2) (3) |
| 894 | * |
| 895 | * First we align the start upwards and the end downwards, so that the |
| 896 | * shadow of the region aligns with shadow page boundaries. In the |
| 897 | * example, this gives us the shadow page (2). This is the shadow entirely |
| 898 | * covered by this allocation. |
| 899 | * |
| 900 | * Then we have the tricky bits. We want to know if we can free the |
| 901 | * partially covered shadow pages - (1) and (3) in the example. For this, |
| 902 | * we are given the start and end of the free region that contains this |
| 903 | * allocation. Extending our previous example, we could have: |
| 904 | * |
| 905 | * free_region_start free_region_end |
| 906 | * | start end | |
| 907 | * v v v v |
| 908 | * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc |
| 909 | * -------- -------- -------- -------- -------- |
| 910 | * | | | | | |
| 911 | * | | | /-------/ | |
| 912 | * \-------\|/------/ |/---------------/ |
| 913 | * ||| || |
| 914 | * |FFAAAAAA|AAAAAAAA|AAF?????| < shadow |
| 915 | * (1) (2) (3) |
| 916 | * |
| 917 | * Once again, we align the start of the free region up, and the end of |
| 918 | * the free region down so that the shadow is page aligned. So we can free |
| 919 | * page (1) - we know no allocation currently uses anything in that page, |
| 920 | * because all of it is in the vmalloc free region. But we cannot free |
| 921 | * page (3), because we can't be sure that the rest of it is unused. |
| 922 | * |
| 923 | * We only consider pages that contain part of the original region for |
| 924 | * freeing: we don't try to free other pages from the free region or we'd |
| 925 | * end up trying to free huge chunks of virtual address space. |
| 926 | * |
| 927 | * Concurrency |
| 928 | * ----------- |
| 929 | * |
| 930 | * How do we know that we're not freeing a page that is simultaneously |
| 931 | * being used for a fresh allocation in kasan_populate_vmalloc(_pte)? |
| 932 | * |
| 933 | * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running |
| 934 | * at the same time. While we run under free_vmap_area_lock, the population |
| 935 | * code does not. |
| 936 | * |
| 937 | * free_vmap_area_lock instead operates to ensure that the larger range |
| 938 | * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and |
| 939 | * the per-cpu region-finding algorithm both run under free_vmap_area_lock, |
| 940 | * no space identified as free will become used while we are running. This |
| 941 | * means that so long as we are careful with alignment and only free shadow |
| 942 | * pages entirely covered by the free region, we will not run in to any |
| 943 | * trouble - any simultaneous allocations will be for disjoint regions. |
| 944 | */ |
| 945 | void kasan_release_vmalloc(unsigned long start, unsigned long end, |
| 946 | unsigned long free_region_start, |
| 947 | unsigned long free_region_end) |
| 948 | { |
| 949 | void *shadow_start, *shadow_end; |
| 950 | unsigned long region_start, region_end; |
Daniel Axtens | e218f1c | 2019-12-17 20:51:46 -0800 | [diff] [blame] | 951 | unsigned long size; |
Daniel Axtens | 3c5c3cf | 2019-11-30 17:54:50 -0800 | [diff] [blame] | 952 | |
| 953 | region_start = ALIGN(start, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE); |
| 954 | region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE); |
| 955 | |
| 956 | free_region_start = ALIGN(free_region_start, |
| 957 | PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE); |
| 958 | |
| 959 | if (start != region_start && |
| 960 | free_region_start < region_start) |
| 961 | region_start -= PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE; |
| 962 | |
| 963 | free_region_end = ALIGN_DOWN(free_region_end, |
| 964 | PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE); |
| 965 | |
| 966 | if (end != region_end && |
| 967 | free_region_end > region_end) |
| 968 | region_end += PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE; |
| 969 | |
| 970 | shadow_start = kasan_mem_to_shadow((void *)region_start); |
| 971 | shadow_end = kasan_mem_to_shadow((void *)region_end); |
| 972 | |
| 973 | if (shadow_end > shadow_start) { |
Daniel Axtens | e218f1c | 2019-12-17 20:51:46 -0800 | [diff] [blame] | 974 | size = shadow_end - shadow_start; |
| 975 | apply_to_existing_page_range(&init_mm, |
| 976 | (unsigned long)shadow_start, |
| 977 | size, kasan_depopulate_vmalloc_pte, |
| 978 | NULL); |
Daniel Axtens | 3c5c3cf | 2019-11-30 17:54:50 -0800 | [diff] [blame] | 979 | flush_tlb_kernel_range((unsigned long)shadow_start, |
| 980 | (unsigned long)shadow_end); |
| 981 | } |
| 982 | } |
| 983 | #endif |