Andrey Konovalov | e886bf9 | 2018-12-28 00:31:14 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 2 | /* |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 3 | * This file contains common generic and tag-based KASAN error reporting code. |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 4 | * |
| 5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. |
Andrey Ryabinin | 2baf9e8 | 2015-08-14 15:35:13 -0700 | [diff] [blame] | 6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 7 | * |
Andrey Konovalov | 5d0926e | 2015-11-05 18:51:12 -0800 | [diff] [blame] | 8 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
Andrey Konovalov | 5f21f3a | 2018-02-06 15:36:41 -0800 | [diff] [blame] | 9 | * Andrey Konovalov <andreyknvl@gmail.com> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of the GNU General Public License version 2 as |
| 13 | * published by the Free Software Foundation. |
| 14 | * |
| 15 | */ |
| 16 | |
Mark Rutland | b0845ce | 2017-03-31 15:12:04 -0700 | [diff] [blame] | 17 | #include <linux/bitops.h> |
Peter Zijlstra | 4f40c6e | 2017-02-03 13:13:12 -0800 | [diff] [blame] | 18 | #include <linux/ftrace.h> |
Mark Rutland | b0845ce | 2017-03-31 15:12:04 -0700 | [diff] [blame] | 19 | #include <linux/init.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 20 | #include <linux/kernel.h> |
| 21 | #include <linux/mm.h> |
| 22 | #include <linux/printk.h> |
| 23 | #include <linux/sched.h> |
| 24 | #include <linux/slab.h> |
Alexander Potapenko | cd11016 | 2016-03-25 14:22:08 -0700 | [diff] [blame] | 25 | #include <linux/stackdepot.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 26 | #include <linux/stacktrace.h> |
| 27 | #include <linux/string.h> |
| 28 | #include <linux/types.h> |
| 29 | #include <linux/kasan.h> |
Aneesh Kumar K.V | 527f215 | 2015-11-05 18:50:46 -0800 | [diff] [blame] | 30 | #include <linux/module.h> |
Marco Elver | e896921 | 2019-07-11 20:53:49 -0700 | [diff] [blame] | 31 | #include <linux/sched/task_stack.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 32 | |
Andrey Ryabinin | bebf56a | 2015-02-13 14:40:17 -0800 | [diff] [blame] | 33 | #include <asm/sections.h> |
| 34 | |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 35 | #include "kasan.h" |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 36 | #include "../slab.h" |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 37 | |
| 38 | /* Shadow layout customization. */ |
| 39 | #define SHADOW_BYTES_PER_BLOCK 1 |
| 40 | #define SHADOW_BLOCKS_PER_ROW 16 |
| 41 | #define SHADOW_BYTES_PER_ROW (SHADOW_BLOCKS_PER_ROW * SHADOW_BYTES_PER_BLOCK) |
| 42 | #define SHADOW_ROWS_AROUND_ADDR 2 |
| 43 | |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 44 | static unsigned long kasan_flags; |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 45 | |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 46 | #define KASAN_BIT_REPORTED 0 |
| 47 | #define KASAN_BIT_MULTI_SHOT 1 |
| 48 | |
| 49 | bool kasan_save_enable_multi_shot(void) |
| 50 | { |
| 51 | return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 52 | } |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 53 | EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 54 | |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 55 | void kasan_restore_multi_shot(bool enabled) |
Andrey Konovalov | 5e82cd1 | 2017-05-03 14:56:25 -0700 | [diff] [blame] | 56 | { |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 57 | if (!enabled) |
| 58 | clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); |
Andrey Konovalov | 5e82cd1 | 2017-05-03 14:56:25 -0700 | [diff] [blame] | 59 | } |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 60 | EXPORT_SYMBOL_GPL(kasan_restore_multi_shot); |
Andrey Konovalov | 5e82cd1 | 2017-05-03 14:56:25 -0700 | [diff] [blame] | 61 | |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 62 | static int __init kasan_set_multi_shot(char *str) |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 63 | { |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 64 | set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); |
| 65 | return 1; |
Andrey Konovalov | 5e82cd1 | 2017-05-03 14:56:25 -0700 | [diff] [blame] | 66 | } |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 67 | __setup("kasan_multi_shot", kasan_set_multi_shot); |
Andrey Konovalov | 5e82cd1 | 2017-05-03 14:56:25 -0700 | [diff] [blame] | 68 | |
Andrey Konovalov | 121e8f8 | 2018-12-28 00:30:42 -0800 | [diff] [blame] | 69 | static void print_error_description(struct kasan_access_info *info) |
Andrey Konovalov | 5e82cd1 | 2017-05-03 14:56:25 -0700 | [diff] [blame] | 70 | { |
Andrey Konovalov | 7f0a84c | 2017-05-03 14:56:38 -0700 | [diff] [blame] | 71 | pr_err("BUG: KASAN: %s in %pS\n", |
Andrey Konovalov | 121e8f8 | 2018-12-28 00:30:42 -0800 | [diff] [blame] | 72 | get_bug_type(info), (void *)info->ip); |
Tobin C. Harding | 6424f6b | 2017-11-01 15:32:22 +1100 | [diff] [blame] | 73 | pr_err("%s of size %zu at addr %px by task %s/%d\n", |
Andrey Konovalov | 7d418f7 | 2017-05-03 14:56:28 -0700 | [diff] [blame] | 74 | info->is_write ? "Write" : "Read", info->access_size, |
Andrey Konovalov | 7f0a84c | 2017-05-03 14:56:38 -0700 | [diff] [blame] | 75 | info->access_addr, current->comm, task_pid_nr(current)); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 76 | } |
| 77 | |
Andrey Ryabinin | 7e08897 | 2016-08-02 14:02:55 -0700 | [diff] [blame] | 78 | static DEFINE_SPINLOCK(report_lock); |
| 79 | |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 80 | static void start_report(unsigned long *flags) |
Andrey Ryabinin | 7e08897 | 2016-08-02 14:02:55 -0700 | [diff] [blame] | 81 | { |
| 82 | /* |
| 83 | * Make sure we don't end up in loop. |
| 84 | */ |
| 85 | kasan_disable_current(); |
| 86 | spin_lock_irqsave(&report_lock, *flags); |
| 87 | pr_err("==================================================================\n"); |
| 88 | } |
| 89 | |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 90 | static void end_report(unsigned long *flags) |
Andrey Ryabinin | 7e08897 | 2016-08-02 14:02:55 -0700 | [diff] [blame] | 91 | { |
| 92 | pr_err("==================================================================\n"); |
| 93 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); |
| 94 | spin_unlock_irqrestore(&report_lock, *flags); |
Kees Cook | 1d2252f | 2020-04-06 20:12:42 -0700 | [diff] [blame] | 95 | if (panic_on_warn) { |
| 96 | /* |
| 97 | * This thread may hit another WARN() in the panic path. |
| 98 | * Resetting this prevents additional WARN() from panicking the |
| 99 | * system on this thread. Other threads are blocked by the |
| 100 | * panic_mutex in panic(). |
| 101 | */ |
| 102 | panic_on_warn = 0; |
Dmitry Vyukov | 5c5c1f3 | 2016-12-12 16:44:53 -0800 | [diff] [blame] | 103 | panic("panic_on_warn set ...\n"); |
Kees Cook | 1d2252f | 2020-04-06 20:12:42 -0700 | [diff] [blame] | 104 | } |
Andrey Ryabinin | 7e08897 | 2016-08-02 14:02:55 -0700 | [diff] [blame] | 105 | kasan_enable_current(); |
| 106 | } |
| 107 | |
Andrey Konovalov | b6b72f4 | 2017-05-03 14:56:31 -0700 | [diff] [blame] | 108 | static void print_track(struct kasan_track *track, const char *prefix) |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 109 | { |
Andrey Konovalov | b6b72f4 | 2017-05-03 14:56:31 -0700 | [diff] [blame] | 110 | pr_err("%s by task %u:\n", prefix, track->pid); |
Alexander Potapenko | cd11016 | 2016-03-25 14:22:08 -0700 | [diff] [blame] | 111 | if (track->stack) { |
Thomas Gleixner | 880e049 | 2019-04-25 11:45:02 +0200 | [diff] [blame] | 112 | unsigned long *entries; |
| 113 | unsigned int nr_entries; |
Alexander Potapenko | cd11016 | 2016-03-25 14:22:08 -0700 | [diff] [blame] | 114 | |
Thomas Gleixner | 880e049 | 2019-04-25 11:45:02 +0200 | [diff] [blame] | 115 | nr_entries = stack_depot_fetch(track->stack, &entries); |
| 116 | stack_trace_print(entries, nr_entries, 0); |
Alexander Potapenko | cd11016 | 2016-03-25 14:22:08 -0700 | [diff] [blame] | 117 | } else { |
| 118 | pr_err("(stack is not available)\n"); |
| 119 | } |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 120 | } |
| 121 | |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 122 | struct page *kasan_addr_to_page(const void *addr) |
Andrey Konovalov | db429f1 | 2017-05-03 14:56:34 -0700 | [diff] [blame] | 123 | { |
| 124 | if ((addr >= (void *)PAGE_OFFSET) && |
| 125 | (addr < high_memory)) |
| 126 | return virt_to_head_page(addr); |
| 127 | return NULL; |
| 128 | } |
| 129 | |
Andrey Konovalov | 0c06f1f | 2017-05-03 14:56:41 -0700 | [diff] [blame] | 130 | static void describe_object_addr(struct kmem_cache *cache, void *object, |
| 131 | const void *addr) |
| 132 | { |
| 133 | unsigned long access_addr = (unsigned long)addr; |
| 134 | unsigned long object_addr = (unsigned long)object; |
| 135 | const char *rel_type; |
| 136 | int rel_bytes; |
| 137 | |
Tobin C. Harding | 6424f6b | 2017-11-01 15:32:22 +1100 | [diff] [blame] | 138 | pr_err("The buggy address belongs to the object at %px\n" |
Andrey Konovalov | 0c06f1f | 2017-05-03 14:56:41 -0700 | [diff] [blame] | 139 | " which belongs to the cache %s of size %d\n", |
| 140 | object, cache->name, cache->object_size); |
| 141 | |
| 142 | if (!addr) |
| 143 | return; |
| 144 | |
| 145 | if (access_addr < object_addr) { |
| 146 | rel_type = "to the left"; |
| 147 | rel_bytes = object_addr - access_addr; |
| 148 | } else if (access_addr >= object_addr + cache->object_size) { |
| 149 | rel_type = "to the right"; |
| 150 | rel_bytes = access_addr - (object_addr + cache->object_size); |
| 151 | } else { |
| 152 | rel_type = "inside"; |
| 153 | rel_bytes = access_addr - object_addr; |
| 154 | } |
| 155 | |
| 156 | pr_err("The buggy address is located %d bytes %s of\n" |
Tobin C. Harding | 6424f6b | 2017-11-01 15:32:22 +1100 | [diff] [blame] | 157 | " %d-byte region [%px, %px)\n", |
Andrey Konovalov | 0c06f1f | 2017-05-03 14:56:41 -0700 | [diff] [blame] | 158 | rel_bytes, rel_type, cache->object_size, (void *)object_addr, |
| 159 | (void *)(object_addr + cache->object_size)); |
| 160 | } |
| 161 | |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 162 | static struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, |
| 163 | void *object, u8 tag) |
| 164 | { |
| 165 | struct kasan_alloc_meta *alloc_meta; |
| 166 | int i = 0; |
| 167 | |
| 168 | alloc_meta = get_alloc_info(cache, object); |
| 169 | |
| 170 | #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY |
| 171 | for (i = 0; i < KASAN_NR_FREE_STACKS; i++) { |
| 172 | if (alloc_meta->free_pointer_tag[i] == tag) |
| 173 | break; |
| 174 | } |
| 175 | if (i == KASAN_NR_FREE_STACKS) |
| 176 | i = alloc_meta->free_track_idx; |
| 177 | #endif |
| 178 | |
| 179 | return &alloc_meta->free_track[i]; |
| 180 | } |
| 181 | |
Andrey Konovalov | 0c06f1f | 2017-05-03 14:56:41 -0700 | [diff] [blame] | 182 | static void describe_object(struct kmem_cache *cache, void *object, |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 183 | const void *addr, u8 tag) |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 184 | { |
| 185 | struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object); |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 186 | |
Andrey Konovalov | 0c06f1f | 2017-05-03 14:56:41 -0700 | [diff] [blame] | 187 | if (cache->flags & SLAB_KASAN) { |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 188 | struct kasan_track *free_track; |
| 189 | |
Andrey Konovalov | 0c06f1f | 2017-05-03 14:56:41 -0700 | [diff] [blame] | 190 | print_track(&alloc_info->alloc_track, "Allocated"); |
Andrey Konovalov | b193859 | 2017-05-03 14:56:50 -0700 | [diff] [blame] | 191 | pr_err("\n"); |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 192 | free_track = kasan_get_free_track(cache, object, tag); |
| 193 | print_track(free_track, "Freed"); |
Andrey Konovalov | b193859 | 2017-05-03 14:56:50 -0700 | [diff] [blame] | 194 | pr_err("\n"); |
Andrey Konovalov | 0c06f1f | 2017-05-03 14:56:41 -0700 | [diff] [blame] | 195 | } |
Andrey Ryabinin | 47b5c2a | 2016-08-02 14:02:49 -0700 | [diff] [blame] | 196 | |
Andrey Konovalov | 0c06f1f | 2017-05-03 14:56:41 -0700 | [diff] [blame] | 197 | describe_object_addr(cache, object, addr); |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 198 | } |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 199 | |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 200 | static inline bool kernel_or_module_addr(const void *addr) |
| 201 | { |
| 202 | if (addr >= (void *)_stext && addr < (void *)_end) |
| 203 | return true; |
| 204 | if (is_module_address((unsigned long)addr)) |
| 205 | return true; |
| 206 | return false; |
| 207 | } |
| 208 | |
| 209 | static inline bool init_task_stack_addr(const void *addr) |
| 210 | { |
| 211 | return addr >= (void *)&init_thread_union.stack && |
| 212 | (addr <= (void *)&init_thread_union.stack + |
| 213 | sizeof(init_thread_union.stack)); |
| 214 | } |
| 215 | |
Marco Elver | e896921 | 2019-07-11 20:53:49 -0700 | [diff] [blame] | 216 | static bool __must_check tokenize_frame_descr(const char **frame_descr, |
| 217 | char *token, size_t max_tok_len, |
| 218 | unsigned long *value) |
| 219 | { |
| 220 | const char *sep = strchr(*frame_descr, ' '); |
| 221 | |
| 222 | if (sep == NULL) |
| 223 | sep = *frame_descr + strlen(*frame_descr); |
| 224 | |
| 225 | if (token != NULL) { |
| 226 | const size_t tok_len = sep - *frame_descr; |
| 227 | |
| 228 | if (tok_len + 1 > max_tok_len) { |
| 229 | pr_err("KASAN internal error: frame description too long: %s\n", |
| 230 | *frame_descr); |
| 231 | return false; |
| 232 | } |
| 233 | |
| 234 | /* Copy token (+ 1 byte for '\0'). */ |
| 235 | strlcpy(token, *frame_descr, tok_len + 1); |
| 236 | } |
| 237 | |
| 238 | /* Advance frame_descr past separator. */ |
| 239 | *frame_descr = sep + 1; |
| 240 | |
| 241 | if (value != NULL && kstrtoul(token, 10, value)) { |
| 242 | pr_err("KASAN internal error: not a valid number: %s\n", token); |
| 243 | return false; |
| 244 | } |
| 245 | |
| 246 | return true; |
| 247 | } |
| 248 | |
| 249 | static void print_decoded_frame_descr(const char *frame_descr) |
| 250 | { |
| 251 | /* |
| 252 | * We need to parse the following string: |
| 253 | * "n alloc_1 alloc_2 ... alloc_n" |
| 254 | * where alloc_i looks like |
| 255 | * "offset size len name" |
| 256 | * or "offset size len name:line". |
| 257 | */ |
| 258 | |
| 259 | char token[64]; |
| 260 | unsigned long num_objects; |
| 261 | |
| 262 | if (!tokenize_frame_descr(&frame_descr, token, sizeof(token), |
| 263 | &num_objects)) |
| 264 | return; |
| 265 | |
| 266 | pr_err("\n"); |
| 267 | pr_err("this frame has %lu %s:\n", num_objects, |
| 268 | num_objects == 1 ? "object" : "objects"); |
| 269 | |
| 270 | while (num_objects--) { |
| 271 | unsigned long offset; |
| 272 | unsigned long size; |
| 273 | |
| 274 | /* access offset */ |
| 275 | if (!tokenize_frame_descr(&frame_descr, token, sizeof(token), |
| 276 | &offset)) |
| 277 | return; |
| 278 | /* access size */ |
| 279 | if (!tokenize_frame_descr(&frame_descr, token, sizeof(token), |
| 280 | &size)) |
| 281 | return; |
| 282 | /* name length (unused) */ |
| 283 | if (!tokenize_frame_descr(&frame_descr, NULL, 0, NULL)) |
| 284 | return; |
| 285 | /* object name */ |
| 286 | if (!tokenize_frame_descr(&frame_descr, token, sizeof(token), |
| 287 | NULL)) |
| 288 | return; |
| 289 | |
| 290 | /* Strip line number; without filename it's not very helpful. */ |
| 291 | strreplace(token, ':', '\0'); |
| 292 | |
| 293 | /* Finally, print object information. */ |
| 294 | pr_err(" [%lu, %lu) '%s'", offset, offset + size, token); |
| 295 | } |
| 296 | } |
| 297 | |
| 298 | static bool __must_check get_address_stack_frame_info(const void *addr, |
| 299 | unsigned long *offset, |
| 300 | const char **frame_descr, |
| 301 | const void **frame_pc) |
| 302 | { |
| 303 | unsigned long aligned_addr; |
| 304 | unsigned long mem_ptr; |
| 305 | const u8 *shadow_bottom; |
| 306 | const u8 *shadow_ptr; |
| 307 | const unsigned long *frame; |
| 308 | |
| 309 | BUILD_BUG_ON(IS_ENABLED(CONFIG_STACK_GROWSUP)); |
| 310 | |
| 311 | /* |
| 312 | * NOTE: We currently only support printing frame information for |
| 313 | * accesses to the task's own stack. |
| 314 | */ |
| 315 | if (!object_is_on_stack(addr)) |
| 316 | return false; |
| 317 | |
| 318 | aligned_addr = round_down((unsigned long)addr, sizeof(long)); |
| 319 | mem_ptr = round_down(aligned_addr, KASAN_SHADOW_SCALE_SIZE); |
| 320 | shadow_ptr = kasan_mem_to_shadow((void *)aligned_addr); |
| 321 | shadow_bottom = kasan_mem_to_shadow(end_of_stack(current)); |
| 322 | |
| 323 | while (shadow_ptr >= shadow_bottom && *shadow_ptr != KASAN_STACK_LEFT) { |
| 324 | shadow_ptr--; |
| 325 | mem_ptr -= KASAN_SHADOW_SCALE_SIZE; |
| 326 | } |
| 327 | |
| 328 | while (shadow_ptr >= shadow_bottom && *shadow_ptr == KASAN_STACK_LEFT) { |
| 329 | shadow_ptr--; |
| 330 | mem_ptr -= KASAN_SHADOW_SCALE_SIZE; |
| 331 | } |
| 332 | |
| 333 | if (shadow_ptr < shadow_bottom) |
| 334 | return false; |
| 335 | |
| 336 | frame = (const unsigned long *)(mem_ptr + KASAN_SHADOW_SCALE_SIZE); |
| 337 | if (frame[0] != KASAN_CURRENT_STACK_FRAME_MAGIC) { |
| 338 | pr_err("KASAN internal error: frame info validation failed; invalid marker: %lu\n", |
| 339 | frame[0]); |
| 340 | return false; |
| 341 | } |
| 342 | |
| 343 | *offset = (unsigned long)addr - (unsigned long)frame; |
| 344 | *frame_descr = (const char *)frame[1]; |
| 345 | *frame_pc = (void *)frame[2]; |
| 346 | |
| 347 | return true; |
| 348 | } |
| 349 | |
| 350 | static void print_address_stack_frame(const void *addr) |
| 351 | { |
| 352 | unsigned long offset; |
| 353 | const char *frame_descr; |
| 354 | const void *frame_pc; |
| 355 | |
| 356 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
| 357 | return; |
| 358 | |
| 359 | if (!get_address_stack_frame_info(addr, &offset, &frame_descr, |
| 360 | &frame_pc)) |
| 361 | return; |
| 362 | |
| 363 | /* |
| 364 | * get_address_stack_frame_info only returns true if the given addr is |
| 365 | * on the current task's stack. |
| 366 | */ |
| 367 | pr_err("\n"); |
| 368 | pr_err("addr %px is located in stack of task %s/%d at offset %lu in frame:\n", |
| 369 | addr, current->comm, task_pid_nr(current), offset); |
| 370 | pr_err(" %pS\n", frame_pc); |
| 371 | |
| 372 | if (!frame_descr) |
| 373 | return; |
| 374 | |
| 375 | print_decoded_frame_descr(frame_descr); |
| 376 | } |
| 377 | |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 378 | static void print_address_description(void *addr, u8 tag) |
Andrey Ryabinin | 7e08897 | 2016-08-02 14:02:55 -0700 | [diff] [blame] | 379 | { |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 380 | struct page *page = kasan_addr_to_page(addr); |
Andrey Ryabinin | b8c73fc | 2015-02-13 14:39:28 -0800 | [diff] [blame] | 381 | |
Andrey Konovalov | db429f1 | 2017-05-03 14:56:34 -0700 | [diff] [blame] | 382 | dump_stack(); |
Andrey Konovalov | b193859 | 2017-05-03 14:56:50 -0700 | [diff] [blame] | 383 | pr_err("\n"); |
Andrey Konovalov | db429f1 | 2017-05-03 14:56:34 -0700 | [diff] [blame] | 384 | |
| 385 | if (page && PageSlab(page)) { |
| 386 | struct kmem_cache *cache = page->slab_cache; |
Andrey Konovalov | 0c06f1f | 2017-05-03 14:56:41 -0700 | [diff] [blame] | 387 | void *object = nearest_obj(cache, page, addr); |
Andrey Konovalov | db429f1 | 2017-05-03 14:56:34 -0700 | [diff] [blame] | 388 | |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 389 | describe_object(cache, object, addr, tag); |
Andrey Ryabinin | b8c73fc | 2015-02-13 14:39:28 -0800 | [diff] [blame] | 390 | } |
| 391 | |
Andrey Konovalov | 430a05f | 2017-05-03 14:56:44 -0700 | [diff] [blame] | 392 | if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) { |
| 393 | pr_err("The buggy address belongs to the variable:\n"); |
| 394 | pr_err(" %pS\n", addr); |
| 395 | } |
| 396 | |
| 397 | if (page) { |
| 398 | pr_err("The buggy address belongs to the page:\n"); |
| 399 | dump_page(page, "kasan: bad access detected"); |
Andrey Ryabinin | bebf56a | 2015-02-13 14:40:17 -0800 | [diff] [blame] | 400 | } |
Marco Elver | e896921 | 2019-07-11 20:53:49 -0700 | [diff] [blame] | 401 | |
| 402 | print_address_stack_frame(addr); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 403 | } |
| 404 | |
| 405 | static bool row_is_guilty(const void *row, const void *guilty) |
| 406 | { |
| 407 | return (row <= guilty) && (guilty < row + SHADOW_BYTES_PER_ROW); |
| 408 | } |
| 409 | |
| 410 | static int shadow_pointer_offset(const void *row, const void *shadow) |
| 411 | { |
| 412 | /* The length of ">ff00ff00ff00ff00: " is |
| 413 | * 3 + (BITS_PER_LONG/8)*2 chars. |
| 414 | */ |
| 415 | return 3 + (BITS_PER_LONG/8)*2 + (shadow - row)*2 + |
| 416 | (shadow - row) / SHADOW_BYTES_PER_BLOCK + 1; |
| 417 | } |
| 418 | |
| 419 | static void print_shadow_for_address(const void *addr) |
| 420 | { |
| 421 | int i; |
| 422 | const void *shadow = kasan_mem_to_shadow(addr); |
| 423 | const void *shadow_row; |
| 424 | |
| 425 | shadow_row = (void *)round_down((unsigned long)shadow, |
| 426 | SHADOW_BYTES_PER_ROW) |
| 427 | - SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW; |
| 428 | |
| 429 | pr_err("Memory state around the buggy address:\n"); |
| 430 | |
| 431 | for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) { |
| 432 | const void *kaddr = kasan_shadow_to_mem(shadow_row); |
| 433 | char buffer[4 + (BITS_PER_LONG/8)*2]; |
Aneesh Kumar K.V | f2377d4 | 2015-11-05 18:50:48 -0800 | [diff] [blame] | 434 | char shadow_buf[SHADOW_BYTES_PER_ROW]; |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 435 | |
| 436 | snprintf(buffer, sizeof(buffer), |
Tobin C. Harding | 6424f6b | 2017-11-01 15:32:22 +1100 | [diff] [blame] | 437 | (i == 0) ? ">%px: " : " %px: ", kaddr); |
Aneesh Kumar K.V | f2377d4 | 2015-11-05 18:50:48 -0800 | [diff] [blame] | 438 | /* |
| 439 | * We should not pass a shadow pointer to generic |
| 440 | * function, because generic functions may try to |
| 441 | * access kasan mapping for the passed address. |
| 442 | */ |
Aneesh Kumar K.V | f2377d4 | 2015-11-05 18:50:48 -0800 | [diff] [blame] | 443 | memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 444 | print_hex_dump(KERN_ERR, buffer, |
| 445 | DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1, |
Aneesh Kumar K.V | f2377d4 | 2015-11-05 18:50:48 -0800 | [diff] [blame] | 446 | shadow_buf, SHADOW_BYTES_PER_ROW, 0); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 447 | |
| 448 | if (row_is_guilty(shadow_row, shadow)) |
| 449 | pr_err("%*c\n", |
| 450 | shadow_pointer_offset(shadow_row, shadow), |
| 451 | '^'); |
| 452 | |
| 453 | shadow_row += SHADOW_BYTES_PER_ROW; |
| 454 | } |
| 455 | } |
| 456 | |
Walter Wu | 8cceeff | 2020-04-01 21:09:37 -0700 | [diff] [blame] | 457 | bool report_enabled(void) |
Mark Rutland | b0845ce | 2017-03-31 15:12:04 -0700 | [diff] [blame] | 458 | { |
| 459 | if (current->kasan_depth) |
| 460 | return false; |
| 461 | if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) |
| 462 | return true; |
| 463 | return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags); |
| 464 | } |
| 465 | |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 466 | void kasan_report_invalid_free(void *object, unsigned long ip) |
| 467 | { |
| 468 | unsigned long flags; |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 469 | u8 tag = get_tag(object); |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 470 | |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 471 | object = reset_tag(object); |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 472 | start_report(&flags); |
| 473 | pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip); |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 474 | print_tags(tag, object); |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 475 | pr_err("\n"); |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 476 | print_address_description(object, tag); |
Andrey Konovalov | 11cd3cd | 2018-12-28 00:30:38 -0800 | [diff] [blame] | 477 | pr_err("\n"); |
| 478 | print_shadow_for_address(object); |
| 479 | end_report(&flags); |
| 480 | } |
| 481 | |
Peter Zijlstra | 57b78a62 | 2019-04-03 09:39:50 +0200 | [diff] [blame] | 482 | void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 483 | { |
| 484 | struct kasan_access_info info; |
Andrey Konovalov | 121e8f8 | 2018-12-28 00:30:42 -0800 | [diff] [blame] | 485 | void *tagged_addr; |
| 486 | void *untagged_addr; |
| 487 | unsigned long flags; |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 488 | |
Peter Zijlstra | 4f40c6e | 2017-02-03 13:13:12 -0800 | [diff] [blame] | 489 | disable_trace_on_warning(); |
| 490 | |
Andrey Konovalov | 121e8f8 | 2018-12-28 00:30:42 -0800 | [diff] [blame] | 491 | tagged_addr = (void *)addr; |
| 492 | untagged_addr = reset_tag(tagged_addr); |
| 493 | |
| 494 | info.access_addr = tagged_addr; |
| 495 | if (addr_has_shadow(untagged_addr)) |
| 496 | info.first_bad_addr = find_first_bad_addr(tagged_addr, size); |
| 497 | else |
| 498 | info.first_bad_addr = untagged_addr; |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 499 | info.access_size = size; |
| 500 | info.is_write = is_write; |
| 501 | info.ip = ip; |
Andrey Konovalov | e9121076 | 2015-11-05 18:50:55 -0800 | [diff] [blame] | 502 | |
Andrey Konovalov | 121e8f8 | 2018-12-28 00:30:42 -0800 | [diff] [blame] | 503 | start_report(&flags); |
| 504 | |
| 505 | print_error_description(&info); |
| 506 | if (addr_has_shadow(untagged_addr)) |
| 507 | print_tags(get_tag(tagged_addr), info.first_bad_addr); |
| 508 | pr_err("\n"); |
| 509 | |
| 510 | if (addr_has_shadow(untagged_addr)) { |
Walter Wu | ae8f06b | 2019-09-23 15:34:13 -0700 | [diff] [blame] | 511 | print_address_description(untagged_addr, get_tag(tagged_addr)); |
Andrey Konovalov | 121e8f8 | 2018-12-28 00:30:42 -0800 | [diff] [blame] | 512 | pr_err("\n"); |
| 513 | print_shadow_for_address(info.first_bad_addr); |
| 514 | } else { |
| 515 | dump_stack(); |
| 516 | } |
| 517 | |
| 518 | end_report(&flags); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 519 | } |
Jann Horn | 2f004ee | 2019-12-19 00:11:50 +0100 | [diff] [blame] | 520 | |
| 521 | #ifdef CONFIG_KASAN_INLINE |
| 522 | /* |
| 523 | * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high |
| 524 | * canonical half of the address space) cause out-of-bounds shadow memory reads |
| 525 | * before the actual access. For addresses in the low canonical half of the |
| 526 | * address space, as well as most non-canonical addresses, that out-of-bounds |
| 527 | * shadow memory access lands in the non-canonical part of the address space. |
| 528 | * Help the user figure out what the original bogus pointer was. |
| 529 | */ |
| 530 | void kasan_non_canonical_hook(unsigned long addr) |
| 531 | { |
| 532 | unsigned long orig_addr; |
| 533 | const char *bug_type; |
| 534 | |
| 535 | if (addr < KASAN_SHADOW_OFFSET) |
| 536 | return; |
| 537 | |
| 538 | orig_addr = (addr - KASAN_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT; |
| 539 | /* |
| 540 | * For faults near the shadow address for NULL, we can be fairly certain |
| 541 | * that this is a KASAN shadow memory access. |
| 542 | * For faults that correspond to shadow for low canonical addresses, we |
| 543 | * can still be pretty sure - that shadow region is a fairly narrow |
| 544 | * chunk of the non-canonical address space. |
| 545 | * But faults that look like shadow for non-canonical addresses are a |
| 546 | * really large chunk of the address space. In that case, we still |
| 547 | * print the decoded address, but make it clear that this is not |
| 548 | * necessarily what's actually going on. |
| 549 | */ |
| 550 | if (orig_addr < PAGE_SIZE) |
| 551 | bug_type = "null-ptr-deref"; |
| 552 | else if (orig_addr < TASK_SIZE) |
| 553 | bug_type = "probably user-memory-access"; |
| 554 | else |
| 555 | bug_type = "maybe wild-memory-access"; |
| 556 | pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type, |
| 557 | orig_addr, orig_addr + KASAN_SHADOW_MASK); |
| 558 | } |
| 559 | #endif |