blob: e5beb40d97b1c90d3ddc4fc6c47c170060efcdf7 [file] [log] [blame]
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08001/*
2 * This file contains shadow memory manipulation code.
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
Andrey Ryabinin2baf9e82015-08-14 15:35:13 -07005 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08006 *
Andrey Konovalov5d0926e2015-11-05 18:51:12 -08007 * Some code borrowed from https://github.com/xairy/kasan-prototype by
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08008 * Andrey Konovalov <adech.fo@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17#define DISABLE_BRANCH_PROFILING
18
19#include <linux/export.h>
Alexander Potapenkocd110162016-03-25 14:22:08 -070020#include <linux/interrupt.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080021#include <linux/init.h>
Alexander Potapenkocd110162016-03-25 14:22:08 -070022#include <linux/kasan.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080023#include <linux/kernel.h>
Andrey Ryabinin45937252015-11-20 15:57:18 -080024#include <linux/kmemleak.h>
Mark Rutlande3ae1162016-03-09 14:08:15 -080025#include <linux/linkage.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080026#include <linux/memblock.h>
Andrey Ryabinin786a8952015-02-13 14:39:21 -080027#include <linux/memory.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080028#include <linux/mm.h>
Andrey Ryabininbebf56a2015-02-13 14:40:17 -080029#include <linux/module.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080030#include <linux/printk.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/stacktrace.h>
34#include <linux/string.h>
35#include <linux/types.h>
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -070036#include <linux/vmalloc.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080037
38#include "kasan.h"
Andrey Ryabinin0316bec2015-02-13 14:39:42 -080039#include "../slab.h"
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080040
41/*
42 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
43 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
44 */
45static void kasan_poison_shadow(const void *address, size_t size, u8 value)
46{
47 void *shadow_start, *shadow_end;
48
49 shadow_start = kasan_mem_to_shadow(address);
50 shadow_end = kasan_mem_to_shadow(address + size);
51
52 memset(shadow_start, value, shadow_end - shadow_start);
53}
54
55void kasan_unpoison_shadow(const void *address, size_t size)
56{
57 kasan_poison_shadow(address, size, 0);
58
59 if (size & KASAN_SHADOW_MASK) {
60 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
61 *shadow = size & KASAN_SHADOW_MASK;
62 }
63}
64
Mark Rutlande3ae1162016-03-09 14:08:15 -080065static void __kasan_unpoison_stack(struct task_struct *task, void *sp)
66{
67 void *base = task_stack_page(task);
68 size_t size = sp - base;
69
70 kasan_unpoison_shadow(base, size);
71}
72
73/* Unpoison the entire stack for a task. */
74void kasan_unpoison_task_stack(struct task_struct *task)
75{
76 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
77}
78
79/* Unpoison the stack for the current task beyond a watermark sp value. */
80asmlinkage void kasan_unpoison_remaining_stack(void *sp)
81{
82 __kasan_unpoison_stack(current, sp);
83}
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080084
85/*
86 * All functions below always inlined so compiler could
87 * perform better optimizations in each of __asan_loadX/__assn_storeX
88 * depending on memory access size X.
89 */
90
91static __always_inline bool memory_is_poisoned_1(unsigned long addr)
92{
93 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
94
95 if (unlikely(shadow_value)) {
96 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
97 return unlikely(last_accessible_byte >= shadow_value);
98 }
99
100 return false;
101}
102
103static __always_inline bool memory_is_poisoned_2(unsigned long addr)
104{
105 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
106
107 if (unlikely(*shadow_addr)) {
108 if (memory_is_poisoned_1(addr + 1))
109 return true;
110
Xishi Qiu10f70262015-11-05 18:51:21 -0800111 /*
112 * If single shadow byte covers 2-byte access, we don't
113 * need to do anything more. Otherwise, test the first
114 * shadow byte.
115 */
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800116 if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
117 return false;
118
119 return unlikely(*(u8 *)shadow_addr);
120 }
121
122 return false;
123}
124
125static __always_inline bool memory_is_poisoned_4(unsigned long addr)
126{
127 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
128
129 if (unlikely(*shadow_addr)) {
130 if (memory_is_poisoned_1(addr + 3))
131 return true;
132
Xishi Qiu10f70262015-11-05 18:51:21 -0800133 /*
134 * If single shadow byte covers 4-byte access, we don't
135 * need to do anything more. Otherwise, test the first
136 * shadow byte.
137 */
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800138 if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
139 return false;
140
141 return unlikely(*(u8 *)shadow_addr);
142 }
143
144 return false;
145}
146
147static __always_inline bool memory_is_poisoned_8(unsigned long addr)
148{
149 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
150
151 if (unlikely(*shadow_addr)) {
152 if (memory_is_poisoned_1(addr + 7))
153 return true;
154
Xishi Qiu10f70262015-11-05 18:51:21 -0800155 /*
156 * If single shadow byte covers 8-byte access, we don't
157 * need to do anything more. Otherwise, test the first
158 * shadow byte.
159 */
160 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800161 return false;
162
163 return unlikely(*(u8 *)shadow_addr);
164 }
165
166 return false;
167}
168
169static __always_inline bool memory_is_poisoned_16(unsigned long addr)
170{
171 u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
172
173 if (unlikely(*shadow_addr)) {
174 u16 shadow_first_bytes = *(u16 *)shadow_addr;
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800175
176 if (unlikely(shadow_first_bytes))
177 return true;
178
Xishi Qiu10f70262015-11-05 18:51:21 -0800179 /*
180 * If two shadow bytes covers 16-byte access, we don't
181 * need to do anything more. Otherwise, test the last
182 * shadow byte.
183 */
184 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800185 return false;
186
187 return memory_is_poisoned_1(addr + 15);
188 }
189
190 return false;
191}
192
193static __always_inline unsigned long bytes_is_zero(const u8 *start,
194 size_t size)
195{
196 while (size) {
197 if (unlikely(*start))
198 return (unsigned long)start;
199 start++;
200 size--;
201 }
202
203 return 0;
204}
205
206static __always_inline unsigned long memory_is_zero(const void *start,
207 const void *end)
208{
209 unsigned int words;
210 unsigned long ret;
211 unsigned int prefix = (unsigned long)start % 8;
212
213 if (end - start <= 16)
214 return bytes_is_zero(start, end - start);
215
216 if (prefix) {
217 prefix = 8 - prefix;
218 ret = bytes_is_zero(start, prefix);
219 if (unlikely(ret))
220 return ret;
221 start += prefix;
222 }
223
224 words = (end - start) / 8;
225 while (words) {
226 if (unlikely(*(u64 *)start))
227 return bytes_is_zero(start, 8);
228 start += 8;
229 words--;
230 }
231
232 return bytes_is_zero(start, (end - start) % 8);
233}
234
235static __always_inline bool memory_is_poisoned_n(unsigned long addr,
236 size_t size)
237{
238 unsigned long ret;
239
240 ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
241 kasan_mem_to_shadow((void *)addr + size - 1) + 1);
242
243 if (unlikely(ret)) {
244 unsigned long last_byte = addr + size - 1;
245 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
246
247 if (unlikely(ret != (unsigned long)last_shadow ||
Wang Longe0d57712015-11-05 18:51:18 -0800248 ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800249 return true;
250 }
251 return false;
252}
253
254static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
255{
256 if (__builtin_constant_p(size)) {
257 switch (size) {
258 case 1:
259 return memory_is_poisoned_1(addr);
260 case 2:
261 return memory_is_poisoned_2(addr);
262 case 4:
263 return memory_is_poisoned_4(addr);
264 case 8:
265 return memory_is_poisoned_8(addr);
266 case 16:
267 return memory_is_poisoned_16(addr);
268 default:
269 BUILD_BUG();
270 }
271 }
272
273 return memory_is_poisoned_n(addr, size);
274}
275
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700276static __always_inline void check_memory_region_inline(unsigned long addr,
277 size_t size, bool write,
278 unsigned long ret_ip)
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800279{
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800280 if (unlikely(size == 0))
281 return;
282
283 if (unlikely((void *)addr <
284 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700285 kasan_report(addr, size, write, ret_ip);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800286 return;
287 }
288
289 if (likely(!memory_is_poisoned(addr, size)))
290 return;
291
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700292 kasan_report(addr, size, write, ret_ip);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800293}
294
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700295static void check_memory_region(unsigned long addr,
296 size_t size, bool write,
297 unsigned long ret_ip)
298{
299 check_memory_region_inline(addr, size, write, ret_ip);
300}
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800301
302#undef memset
303void *memset(void *addr, int c, size_t len)
304{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700305 check_memory_region((unsigned long)addr, len, true, _RET_IP_);
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800306
307 return __memset(addr, c, len);
308}
309
310#undef memmove
311void *memmove(void *dest, const void *src, size_t len)
312{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700313 check_memory_region((unsigned long)src, len, false, _RET_IP_);
314 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800315
316 return __memmove(dest, src, len);
317}
318
319#undef memcpy
320void *memcpy(void *dest, const void *src, size_t len)
321{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700322 check_memory_region((unsigned long)src, len, false, _RET_IP_);
323 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800324
325 return __memcpy(dest, src, len);
326}
327
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -0800328void kasan_alloc_pages(struct page *page, unsigned int order)
329{
330 if (likely(!PageHighMem(page)))
331 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
332}
333
334void kasan_free_pages(struct page *page, unsigned int order)
335{
336 if (likely(!PageHighMem(page)))
337 kasan_poison_shadow(page_address(page),
338 PAGE_SIZE << order,
339 KASAN_FREE_PAGE);
340}
341
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700342#ifdef CONFIG_SLAB
343/*
344 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
345 * For larger allocations larger redzones are used.
346 */
347static size_t optimal_redzone(size_t object_size)
348{
349 int rz =
350 object_size <= 64 - 16 ? 16 :
351 object_size <= 128 - 32 ? 32 :
352 object_size <= 512 - 64 ? 64 :
353 object_size <= 4096 - 128 ? 128 :
354 object_size <= (1 << 14) - 256 ? 256 :
355 object_size <= (1 << 15) - 512 ? 512 :
356 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
357 return rz;
358}
359
360void kasan_cache_create(struct kmem_cache *cache, size_t *size,
361 unsigned long *flags)
362{
363 int redzone_adjust;
364 /* Make sure the adjusted size is still less than
365 * KMALLOC_MAX_CACHE_SIZE.
366 * TODO: this check is only useful for SLAB, but not SLUB. We'll need
367 * to skip it for SLUB when it starts using kasan_cache_create().
368 */
369 if (*size > KMALLOC_MAX_CACHE_SIZE -
370 sizeof(struct kasan_alloc_meta) -
371 sizeof(struct kasan_free_meta))
372 return;
373 *flags |= SLAB_KASAN;
374 /* Add alloc meta. */
375 cache->kasan_info.alloc_meta_offset = *size;
376 *size += sizeof(struct kasan_alloc_meta);
377
378 /* Add free meta. */
379 if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
380 cache->object_size < sizeof(struct kasan_free_meta)) {
381 cache->kasan_info.free_meta_offset = *size;
382 *size += sizeof(struct kasan_free_meta);
383 }
384 redzone_adjust = optimal_redzone(cache->object_size) -
385 (*size - cache->object_size);
386 if (redzone_adjust > 0)
387 *size += redzone_adjust;
388 *size = min(KMALLOC_MAX_CACHE_SIZE,
389 max(*size,
390 cache->object_size +
391 optimal_redzone(cache->object_size)));
392}
393#endif
394
Alexander Potapenko55834c52016-05-20 16:59:11 -0700395void kasan_cache_shrink(struct kmem_cache *cache)
396{
397 quarantine_remove_cache(cache);
398}
399
400void kasan_cache_destroy(struct kmem_cache *cache)
401{
402 quarantine_remove_cache(cache);
403}
404
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800405void kasan_poison_slab(struct page *page)
406{
407 kasan_poison_shadow(page_address(page),
408 PAGE_SIZE << compound_order(page),
409 KASAN_KMALLOC_REDZONE);
410}
411
412void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
413{
414 kasan_unpoison_shadow(object, cache->object_size);
415}
416
417void kasan_poison_object_data(struct kmem_cache *cache, void *object)
418{
419 kasan_poison_shadow(object,
420 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
421 KASAN_KMALLOC_REDZONE);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700422#ifdef CONFIG_SLAB
423 if (cache->flags & SLAB_KASAN) {
424 struct kasan_alloc_meta *alloc_info =
425 get_alloc_info(cache, object);
426 alloc_info->state = KASAN_STATE_INIT;
427 }
428#endif
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800429}
430
Alexander Potapenkocd110162016-03-25 14:22:08 -0700431#ifdef CONFIG_SLAB
432static inline int in_irqentry_text(unsigned long ptr)
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700433{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700434 return (ptr >= (unsigned long)&__irqentry_text_start &&
435 ptr < (unsigned long)&__irqentry_text_end) ||
436 (ptr >= (unsigned long)&__softirqentry_text_start &&
437 ptr < (unsigned long)&__softirqentry_text_end);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700438}
439
Alexander Potapenkocd110162016-03-25 14:22:08 -0700440static inline void filter_irq_stacks(struct stack_trace *trace)
441{
442 int i;
443
444 if (!trace->nr_entries)
445 return;
446 for (i = 0; i < trace->nr_entries; i++)
447 if (in_irqentry_text(trace->entries[i])) {
448 /* Include the irqentry function into the stack. */
449 trace->nr_entries = i + 1;
450 break;
451 }
452}
453
454static inline depot_stack_handle_t save_stack(gfp_t flags)
455{
456 unsigned long entries[KASAN_STACK_DEPTH];
457 struct stack_trace trace = {
458 .nr_entries = 0,
459 .entries = entries,
460 .max_entries = KASAN_STACK_DEPTH,
461 .skip = 0
462 };
463
464 save_stack_trace(&trace);
465 filter_irq_stacks(&trace);
466 if (trace.nr_entries != 0 &&
467 trace.entries[trace.nr_entries-1] == ULONG_MAX)
468 trace.nr_entries--;
469
470 return depot_save_stack(&trace, flags);
471}
472
473static inline void set_track(struct kasan_track *track, gfp_t flags)
474{
475 track->pid = current->pid;
476 track->stack = save_stack(flags);
477}
478
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700479struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
480 const void *object)
481{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700482 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700483 return (void *)object + cache->kasan_info.alloc_meta_offset;
484}
485
486struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
487 const void *object)
488{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700489 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700490 return (void *)object + cache->kasan_info.free_meta_offset;
491}
492#endif
493
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700494void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800495{
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700496 kasan_kmalloc(cache, object, cache->object_size, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800497}
498
Alexander Potapenko55834c52016-05-20 16:59:11 -0700499void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800500{
501 unsigned long size = cache->object_size;
502 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
503
504 /* RCU slabs could be legally used after free within the RCU period */
505 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
506 return;
507
Alexander Potapenko55834c52016-05-20 16:59:11 -0700508 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
509}
510
511bool kasan_slab_free(struct kmem_cache *cache, void *object)
512{
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700513#ifdef CONFIG_SLAB
Alexander Potapenko55834c52016-05-20 16:59:11 -0700514 /* RCU slabs could be legally used after free within the RCU period */
515 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
516 return false;
517
518 if (likely(cache->flags & SLAB_KASAN)) {
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700519 struct kasan_alloc_meta *alloc_info =
520 get_alloc_info(cache, object);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700521 struct kasan_free_meta *free_info =
522 get_free_info(cache, object);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700523
Alexander Potapenko55834c52016-05-20 16:59:11 -0700524 switch (alloc_info->state) {
525 case KASAN_STATE_ALLOC:
526 alloc_info->state = KASAN_STATE_QUARANTINE;
527 quarantine_put(free_info, cache);
528 set_track(&free_info->track, GFP_NOWAIT);
529 kasan_poison_slab_free(cache, object);
530 return true;
531 case KASAN_STATE_QUARANTINE:
532 case KASAN_STATE_FREE:
533 pr_err("Double free");
534 dump_stack();
535 break;
536 default:
537 break;
538 }
539 }
540 return false;
541#else
542 kasan_poison_slab_free(cache, object);
543 return false;
544#endif
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800545}
546
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700547void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
548 gfp_t flags)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800549{
550 unsigned long redzone_start;
551 unsigned long redzone_end;
552
Alexander Potapenko55834c52016-05-20 16:59:11 -0700553 if (flags & __GFP_RECLAIM)
554 quarantine_reduce();
555
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800556 if (unlikely(object == NULL))
557 return;
558
559 redzone_start = round_up((unsigned long)(object + size),
560 KASAN_SHADOW_SCALE_SIZE);
561 redzone_end = round_up((unsigned long)object + cache->object_size,
562 KASAN_SHADOW_SCALE_SIZE);
563
564 kasan_unpoison_shadow(object, size);
565 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
566 KASAN_KMALLOC_REDZONE);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700567#ifdef CONFIG_SLAB
568 if (cache->flags & SLAB_KASAN) {
569 struct kasan_alloc_meta *alloc_info =
570 get_alloc_info(cache, object);
571
572 alloc_info->state = KASAN_STATE_ALLOC;
573 alloc_info->alloc_size = size;
Alexander Potapenkocd110162016-03-25 14:22:08 -0700574 set_track(&alloc_info->track, flags);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700575 }
576#endif
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800577}
578EXPORT_SYMBOL(kasan_kmalloc);
579
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700580void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800581{
582 struct page *page;
583 unsigned long redzone_start;
584 unsigned long redzone_end;
585
Alexander Potapenko55834c52016-05-20 16:59:11 -0700586 if (flags & __GFP_RECLAIM)
587 quarantine_reduce();
588
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800589 if (unlikely(ptr == NULL))
590 return;
591
592 page = virt_to_page(ptr);
593 redzone_start = round_up((unsigned long)(ptr + size),
594 KASAN_SHADOW_SCALE_SIZE);
595 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
596
597 kasan_unpoison_shadow(ptr, size);
598 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
599 KASAN_PAGE_REDZONE);
600}
601
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700602void kasan_krealloc(const void *object, size_t size, gfp_t flags)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800603{
604 struct page *page;
605
606 if (unlikely(object == ZERO_SIZE_PTR))
607 return;
608
609 page = virt_to_head_page(object);
610
611 if (unlikely(!PageSlab(page)))
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700612 kasan_kmalloc_large(object, size, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800613 else
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700614 kasan_kmalloc(page->slab_cache, object, size, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800615}
616
Andrey Ryabinin92393612015-04-15 16:15:05 -0700617void kasan_kfree(void *ptr)
618{
619 struct page *page;
620
621 page = virt_to_head_page(ptr);
622
623 if (unlikely(!PageSlab(page)))
624 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
625 KASAN_FREE_PAGE);
626 else
627 kasan_slab_free(page->slab_cache, ptr);
628}
629
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800630void kasan_kfree_large(const void *ptr)
631{
632 struct page *page = virt_to_page(ptr);
633
634 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
635 KASAN_FREE_PAGE);
636}
637
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800638int kasan_module_alloc(void *addr, size_t size)
639{
640 void *ret;
641 size_t shadow_size;
642 unsigned long shadow_start;
643
644 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
645 shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
646 PAGE_SIZE);
647
648 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
649 return -EINVAL;
650
651 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
652 shadow_start + shadow_size,
653 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
654 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
655 __builtin_return_address(0));
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -0700656
657 if (ret) {
658 find_vm_area(addr)->flags |= VM_KASAN;
Andrey Ryabinin45937252015-11-20 15:57:18 -0800659 kmemleak_ignore(ret);
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -0700660 return 0;
661 }
662
663 return -ENOMEM;
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800664}
665
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -0700666void kasan_free_shadow(const struct vm_struct *vm)
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800667{
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -0700668 if (vm->flags & VM_KASAN)
669 vfree(kasan_mem_to_shadow(vm->addr));
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800670}
671
672static void register_global(struct kasan_global *global)
673{
674 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
675
676 kasan_unpoison_shadow(global->beg, global->size);
677
678 kasan_poison_shadow(global->beg + aligned_size,
679 global->size_with_redzone - aligned_size,
680 KASAN_GLOBAL_REDZONE);
681}
682
683void __asan_register_globals(struct kasan_global *globals, size_t size)
684{
685 int i;
686
687 for (i = 0; i < size; i++)
688 register_global(&globals[i]);
689}
690EXPORT_SYMBOL(__asan_register_globals);
691
692void __asan_unregister_globals(struct kasan_global *globals, size_t size)
693{
694}
695EXPORT_SYMBOL(__asan_unregister_globals);
696
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700697#define DEFINE_ASAN_LOAD_STORE(size) \
698 void __asan_load##size(unsigned long addr) \
699 { \
700 check_memory_region_inline(addr, size, false, _RET_IP_);\
701 } \
702 EXPORT_SYMBOL(__asan_load##size); \
703 __alias(__asan_load##size) \
704 void __asan_load##size##_noabort(unsigned long); \
705 EXPORT_SYMBOL(__asan_load##size##_noabort); \
706 void __asan_store##size(unsigned long addr) \
707 { \
708 check_memory_region_inline(addr, size, true, _RET_IP_); \
709 } \
710 EXPORT_SYMBOL(__asan_store##size); \
711 __alias(__asan_store##size) \
712 void __asan_store##size##_noabort(unsigned long); \
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800713 EXPORT_SYMBOL(__asan_store##size##_noabort)
714
715DEFINE_ASAN_LOAD_STORE(1);
716DEFINE_ASAN_LOAD_STORE(2);
717DEFINE_ASAN_LOAD_STORE(4);
718DEFINE_ASAN_LOAD_STORE(8);
719DEFINE_ASAN_LOAD_STORE(16);
720
721void __asan_loadN(unsigned long addr, size_t size)
722{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700723 check_memory_region(addr, size, false, _RET_IP_);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800724}
725EXPORT_SYMBOL(__asan_loadN);
726
727__alias(__asan_loadN)
728void __asan_loadN_noabort(unsigned long, size_t);
729EXPORT_SYMBOL(__asan_loadN_noabort);
730
731void __asan_storeN(unsigned long addr, size_t size)
732{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700733 check_memory_region(addr, size, true, _RET_IP_);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800734}
735EXPORT_SYMBOL(__asan_storeN);
736
737__alias(__asan_storeN)
738void __asan_storeN_noabort(unsigned long, size_t);
739EXPORT_SYMBOL(__asan_storeN_noabort);
740
741/* to shut up compiler complaints */
742void __asan_handle_no_return(void) {}
743EXPORT_SYMBOL(__asan_handle_no_return);
Andrey Ryabinin786a8952015-02-13 14:39:21 -0800744
745#ifdef CONFIG_MEMORY_HOTPLUG
746static int kasan_mem_notifier(struct notifier_block *nb,
747 unsigned long action, void *data)
748{
749 return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
750}
751
752static int __init kasan_memhotplug_init(void)
753{
Andrey Konovalov25add7e2015-11-05 18:51:03 -0800754 pr_err("WARNING: KASAN doesn't support memory hot-add\n");
Andrey Ryabinin786a8952015-02-13 14:39:21 -0800755 pr_err("Memory hot-add will be disabled\n");
756
757 hotplug_memory_notifier(kasan_mem_notifier, 0);
758
759 return 0;
760}
761
762module_init(kasan_memhotplug_init);
763#endif