blob: 53cbf28859b5a82e251c286201cf713ab8ddc620 [file] [log] [blame]
Andrey Konovalove886bf92018-12-28 00:31:14 -08001// SPDX-License-Identifier: GPL-2.0
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08002/*
Andrey Konovalov2bd926b2018-12-28 00:29:53 -08003 * This file contains core generic KASAN code.
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08004 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
Andrey Ryabinin2baf9e82015-08-14 15:35:13 -07006 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08007 *
Andrey Konovalov5d0926e2015-11-05 18:51:12 -08008 * Some code borrowed from https://github.com/xairy/kasan-prototype by
Andrey Konovalov5f21f3a2018-02-06 15:36:41 -08009 * Andrey Konovalov <andreyknvl@gmail.com>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080010 */
11
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080012#include <linux/export.h>
Alexander Potapenkocd110162016-03-25 14:22:08 -070013#include <linux/interrupt.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080014#include <linux/init.h>
Alexander Potapenkocd110162016-03-25 14:22:08 -070015#include <linux/kasan.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080016#include <linux/kernel.h>
Alexander Potapenko2b830522021-02-25 17:19:21 -080017#include <linux/kfence.h>
Andrey Ryabinin45937252015-11-20 15:57:18 -080018#include <linux/kmemleak.h>
Mark Rutlande3ae1162016-03-09 14:08:15 -080019#include <linux/linkage.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080020#include <linux/memblock.h>
Andrey Ryabinin786a8952015-02-13 14:39:21 -080021#include <linux/memory.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080022#include <linux/mm.h>
Andrey Ryabininbebf56a2015-02-13 14:40:17 -080023#include <linux/module.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080024#include <linux/printk.h>
25#include <linux/sched.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010026#include <linux/sched/task_stack.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080027#include <linux/slab.h>
28#include <linux/stacktrace.h>
29#include <linux/string.h>
30#include <linux/types.h>
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -070031#include <linux/vmalloc.h>
Dmitry Vyukov9f7d4162016-10-14 16:07:23 +020032#include <linux/bug.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080033
34#include "kasan.h"
Andrey Ryabinin0316bec2015-02-13 14:39:42 -080035#include "../slab.h"
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080036
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080037/*
38 * All functions below always inlined so compiler could
39 * perform better optimizations in each of __asan_loadX/__assn_storeX
40 * depending on memory access size X.
41 */
42
43static __always_inline bool memory_is_poisoned_1(unsigned long addr)
44{
45 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
46
47 if (unlikely(shadow_value)) {
Andrey Konovalov1f600622020-12-22 12:00:24 -080048 s8 last_accessible_byte = addr & KASAN_GRANULE_MASK;
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080049 return unlikely(last_accessible_byte >= shadow_value);
50 }
51
52 return false;
53}
54
Andrey Ryabininc634d802017-07-10 15:50:24 -070055static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
56 unsigned long size)
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080057{
Andrey Ryabininc634d802017-07-10 15:50:24 -070058 u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080059
Andrey Ryabininc634d802017-07-10 15:50:24 -070060 /*
61 * Access crosses 8(shadow size)-byte boundary. Such access maps
62 * into 2 shadow bytes, so we need to check them both.
63 */
Andrey Konovalov1f600622020-12-22 12:00:24 -080064 if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
Andrey Ryabininc634d802017-07-10 15:50:24 -070065 return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080066
Andrey Ryabininc634d802017-07-10 15:50:24 -070067 return memory_is_poisoned_1(addr + size - 1);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080068}
69
70static __always_inline bool memory_is_poisoned_16(unsigned long addr)
71{
Andrey Ryabininc634d802017-07-10 15:50:24 -070072 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080073
Andrey Ryabininc634d802017-07-10 15:50:24 -070074 /* Unaligned 16-bytes access maps into 3 shadow bytes. */
Andrey Konovalov1f600622020-12-22 12:00:24 -080075 if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE)))
Andrey Ryabininc634d802017-07-10 15:50:24 -070076 return *shadow_addr || memory_is_poisoned_1(addr + 15);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080077
Andrey Ryabininc634d802017-07-10 15:50:24 -070078 return *shadow_addr;
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080079}
80
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -070081static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080082 size_t size)
83{
84 while (size) {
85 if (unlikely(*start))
86 return (unsigned long)start;
87 start++;
88 size--;
89 }
90
91 return 0;
92}
93
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -070094static __always_inline unsigned long memory_is_nonzero(const void *start,
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080095 const void *end)
96{
97 unsigned int words;
98 unsigned long ret;
99 unsigned int prefix = (unsigned long)start % 8;
100
101 if (end - start <= 16)
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -0700102 return bytes_is_nonzero(start, end - start);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800103
104 if (prefix) {
105 prefix = 8 - prefix;
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -0700106 ret = bytes_is_nonzero(start, prefix);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800107 if (unlikely(ret))
108 return ret;
109 start += prefix;
110 }
111
112 words = (end - start) / 8;
113 while (words) {
114 if (unlikely(*(u64 *)start))
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -0700115 return bytes_is_nonzero(start, 8);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800116 start += 8;
117 words--;
118 }
119
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -0700120 return bytes_is_nonzero(start, (end - start) % 8);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800121}
122
123static __always_inline bool memory_is_poisoned_n(unsigned long addr,
124 size_t size)
125{
126 unsigned long ret;
127
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -0700128 ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800129 kasan_mem_to_shadow((void *)addr + size - 1) + 1);
130
131 if (unlikely(ret)) {
132 unsigned long last_byte = addr + size - 1;
133 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
134
135 if (unlikely(ret != (unsigned long)last_shadow ||
Andrey Konovalov1f600622020-12-22 12:00:24 -0800136 ((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800137 return true;
138 }
139 return false;
140}
141
142static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
143{
144 if (__builtin_constant_p(size)) {
145 switch (size) {
146 case 1:
147 return memory_is_poisoned_1(addr);
148 case 2:
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800149 case 4:
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800150 case 8:
Andrey Ryabininc634d802017-07-10 15:50:24 -0700151 return memory_is_poisoned_2_4_8(addr, size);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800152 case 16:
153 return memory_is_poisoned_16(addr);
154 default:
155 BUILD_BUG();
156 }
157 }
158
159 return memory_is_poisoned_n(addr, size);
160}
161
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800162static __always_inline bool check_region_inline(unsigned long addr,
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700163 size_t size, bool write,
164 unsigned long ret_ip)
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800165{
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800166 if (unlikely(size == 0))
Marco Elverb5f6e0f2019-07-11 20:54:07 -0700167 return true;
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800168
Walter Wu8cceeff2020-04-01 21:09:37 -0700169 if (unlikely(addr + size < addr))
170 return !kasan_report(addr, size, write, ret_ip);
171
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800172 if (unlikely((void *)addr <
173 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
Walter Wu8cceeff2020-04-01 21:09:37 -0700174 return !kasan_report(addr, size, write, ret_ip);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800175 }
176
177 if (likely(!memory_is_poisoned(addr, size)))
Marco Elverb5f6e0f2019-07-11 20:54:07 -0700178 return true;
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800179
Walter Wu8cceeff2020-04-01 21:09:37 -0700180 return !kasan_report(addr, size, write, ret_ip);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800181}
182
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800183bool kasan_check_range(unsigned long addr, size_t size, bool write,
184 unsigned long ret_ip)
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700185{
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800186 return check_region_inline(addr, size, write, ret_ip);
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700187}
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800188
Andrey Konovalov611806b2021-02-24 12:05:50 -0800189bool kasan_byte_accessible(const void *addr)
Andrey Konovalov2cdbed62020-12-22 12:00:46 -0800190{
191 s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
192
Andrey Konovalov611806b2021-02-24 12:05:50 -0800193 return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
Andrey Konovalov2cdbed62020-12-22 12:00:46 -0800194}
195
Alexander Potapenko55834c52016-05-20 16:59:11 -0700196void kasan_cache_shrink(struct kmem_cache *cache)
197{
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800198 kasan_quarantine_remove_cache(cache);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700199}
200
Greg Thelenf9fa1d92017-02-24 15:00:05 -0800201void kasan_cache_shutdown(struct kmem_cache *cache)
Alexander Potapenko55834c52016-05-20 16:59:11 -0700202{
Shakeel Buttf9e13c02018-04-05 16:21:57 -0700203 if (!__kmem_cache_empty(cache))
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800204 kasan_quarantine_remove_cache(cache);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700205}
206
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800207static void register_global(struct kasan_global *global)
208{
Andrey Konovalov1f600622020-12-22 12:00:24 -0800209 size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800210
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700211 kasan_unpoison(global->beg, global->size, false);
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800212
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800213 kasan_poison(global->beg + aligned_size,
Andrey Konovalovcebd0eb2020-12-22 12:00:21 -0800214 global->size_with_redzone - aligned_size,
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700215 KASAN_GLOBAL_REDZONE, false);
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800216}
217
218void __asan_register_globals(struct kasan_global *globals, size_t size)
219{
220 int i;
221
222 for (i = 0; i < size; i++)
223 register_global(&globals[i]);
224}
225EXPORT_SYMBOL(__asan_register_globals);
226
227void __asan_unregister_globals(struct kasan_global *globals, size_t size)
228{
229}
230EXPORT_SYMBOL(__asan_unregister_globals);
231
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700232#define DEFINE_ASAN_LOAD_STORE(size) \
233 void __asan_load##size(unsigned long addr) \
234 { \
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800235 check_region_inline(addr, size, false, _RET_IP_); \
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700236 } \
237 EXPORT_SYMBOL(__asan_load##size); \
238 __alias(__asan_load##size) \
239 void __asan_load##size##_noabort(unsigned long); \
240 EXPORT_SYMBOL(__asan_load##size##_noabort); \
241 void __asan_store##size(unsigned long addr) \
242 { \
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800243 check_region_inline(addr, size, true, _RET_IP_); \
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700244 } \
245 EXPORT_SYMBOL(__asan_store##size); \
246 __alias(__asan_store##size) \
247 void __asan_store##size##_noabort(unsigned long); \
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800248 EXPORT_SYMBOL(__asan_store##size##_noabort)
249
250DEFINE_ASAN_LOAD_STORE(1);
251DEFINE_ASAN_LOAD_STORE(2);
252DEFINE_ASAN_LOAD_STORE(4);
253DEFINE_ASAN_LOAD_STORE(8);
254DEFINE_ASAN_LOAD_STORE(16);
255
256void __asan_loadN(unsigned long addr, size_t size)
257{
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800258 kasan_check_range(addr, size, false, _RET_IP_);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800259}
260EXPORT_SYMBOL(__asan_loadN);
261
262__alias(__asan_loadN)
263void __asan_loadN_noabort(unsigned long, size_t);
264EXPORT_SYMBOL(__asan_loadN_noabort);
265
266void __asan_storeN(unsigned long addr, size_t size)
267{
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800268 kasan_check_range(addr, size, true, _RET_IP_);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800269}
270EXPORT_SYMBOL(__asan_storeN);
271
272__alias(__asan_storeN)
273void __asan_storeN_noabort(unsigned long, size_t);
274EXPORT_SYMBOL(__asan_storeN_noabort);
275
276/* to shut up compiler complaints */
277void __asan_handle_no_return(void) {}
278EXPORT_SYMBOL(__asan_handle_no_return);
Andrey Ryabinin786a8952015-02-13 14:39:21 -0800279
Paul Lawrence342061e2018-02-06 15:36:11 -0800280/* Emitted by compiler to poison alloca()ed objects. */
281void __asan_alloca_poison(unsigned long addr, size_t size)
282{
Andrey Konovalov1f600622020-12-22 12:00:24 -0800283 size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
Paul Lawrence342061e2018-02-06 15:36:11 -0800284 size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
285 rounded_up_size;
Andrey Konovalov1f600622020-12-22 12:00:24 -0800286 size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
Paul Lawrence342061e2018-02-06 15:36:11 -0800287
288 const void *left_redzone = (const void *)(addr -
289 KASAN_ALLOCA_REDZONE_SIZE);
290 const void *right_redzone = (const void *)(addr + rounded_up_size);
291
292 WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
293
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800294 kasan_unpoison((const void *)(addr + rounded_down_size),
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700295 size - rounded_down_size, false);
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800296 kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700297 KASAN_ALLOCA_LEFT, false);
Andrey Konovalovf00748b2021-02-24 12:05:05 -0800298 kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700299 KASAN_ALLOCA_RIGHT, false);
Paul Lawrence342061e2018-02-06 15:36:11 -0800300}
301EXPORT_SYMBOL(__asan_alloca_poison);
302
303/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
304void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
305{
306 if (unlikely(!stack_top || stack_top > stack_bottom))
307 return;
308
Andrey Konovalovaa5c2192021-04-29 22:59:59 -0700309 kasan_unpoison(stack_top, stack_bottom - stack_top, false);
Paul Lawrence342061e2018-02-06 15:36:11 -0800310}
311EXPORT_SYMBOL(__asan_allocas_unpoison);
312
Alexander Potapenkod3215992018-02-06 15:36:20 -0800313/* Emitted by the compiler to [un]poison local variables. */
314#define DEFINE_ASAN_SET_SHADOW(byte) \
315 void __asan_set_shadow_##byte(const void *addr, size_t size) \
316 { \
317 __memset((void *)addr, 0x##byte, size); \
318 } \
319 EXPORT_SYMBOL(__asan_set_shadow_##byte)
320
321DEFINE_ASAN_SET_SHADOW(00);
322DEFINE_ASAN_SET_SHADOW(f1);
323DEFINE_ASAN_SET_SHADOW(f2);
324DEFINE_ASAN_SET_SHADOW(f3);
325DEFINE_ASAN_SET_SHADOW(f5);
326DEFINE_ASAN_SET_SHADOW(f8);
Walter Wu26e760c2020-08-06 23:24:35 -0700327
328void kasan_record_aux_stack(void *addr)
329{
330 struct page *page = kasan_addr_to_page(addr);
331 struct kmem_cache *cache;
Andrey Konovalov64767922020-12-22 12:02:34 -0800332 struct kasan_alloc_meta *alloc_meta;
Walter Wu26e760c2020-08-06 23:24:35 -0700333 void *object;
334
Alexander Potapenko2b830522021-02-25 17:19:21 -0800335 if (is_kfence_address(addr) || !(page && PageSlab(page)))
Walter Wu26e760c2020-08-06 23:24:35 -0700336 return;
337
338 cache = page->slab_cache;
339 object = nearest_obj(cache, page, addr);
Andrey Konovalov64767922020-12-22 12:02:34 -0800340 alloc_meta = kasan_get_alloc_meta(cache, object);
Walter Wu13384f62020-12-29 15:14:46 -0800341 if (!alloc_meta)
342 return;
Walter Wu26e760c2020-08-06 23:24:35 -0700343
Andrey Konovalov64767922020-12-22 12:02:34 -0800344 alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
345 alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
Walter Wu26e760c2020-08-06 23:24:35 -0700346}
Walter Wue4b78182020-08-06 23:24:39 -0700347
348void kasan_set_free_info(struct kmem_cache *cache,
349 void *object, u8 tag)
350{
351 struct kasan_free_meta *free_meta;
352
Andrey Konovalov64767922020-12-22 12:02:34 -0800353 free_meta = kasan_get_free_meta(cache, object);
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800354 if (!free_meta)
355 return;
Walter Wue4b78182020-08-06 23:24:39 -0700356
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800357 kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
358 /* The object was freed and has free track set. */
Walter Wue4b78182020-08-06 23:24:39 -0700359 *(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREETRACK;
360}
361
362struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
363 void *object, u8 tag)
364{
365 if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_KMALLOC_FREETRACK)
366 return NULL;
Andrey Konovalov97593ca2020-12-22 12:03:28 -0800367 /* Free meta must be present with KASAN_KMALLOC_FREETRACK. */
Andrey Konovalov64767922020-12-22 12:02:34 -0800368 return &kasan_get_free_meta(cache, object)->free_track;
Walter Wue4b78182020-08-06 23:24:39 -0700369}