blob: 53e953bb1d1daf21d14e570e7225fe0b5c900334 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Andrey Ryabinin3f158012015-02-13 14:39:53 -08002/*
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
Andrey Ryabinin3f158012015-02-13 14:39:53 -08006 */
7
8#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
9
Marco Elver19a33ca2019-07-11 20:53:52 -070010#include <linux/bitops.h>
Greg Thelen0386bf32017-02-24 15:00:08 -080011#include <linux/delay.h>
Marco Elver19a33ca2019-07-11 20:53:52 -070012#include <linux/kasan.h>
Andrey Ryabinin3f158012015-02-13 14:39:53 -080013#include <linux/kernel.h>
Andrey Ryabinineae08dc2016-05-20 16:59:34 -070014#include <linux/mm.h>
Marco Elver19a33ca2019-07-11 20:53:52 -070015#include <linux/mman.h>
16#include <linux/module.h>
Andrey Ryabinin3f158012015-02-13 14:39:53 -080017#include <linux/printk.h>
18#include <linux/slab.h>
19#include <linux/string.h>
Andrey Ryabinineae08dc2016-05-20 16:59:34 -070020#include <linux/uaccess.h>
Mark Rutlandb92a9532019-09-23 15:34:16 -070021#include <linux/io.h>
Daniel Axtens06513912019-11-30 17:54:53 -080022#include <linux/vmalloc.h>
Mark Rutlandb92a9532019-09-23 15:34:16 -070023
24#include <asm/page.h>
Andrey Ryabinin3f158012015-02-13 14:39:53 -080025
Walter Wuf33a0142020-08-06 23:24:54 -070026#include "../mm/kasan/kasan.h"
27
28#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
29
Dmitry Vyukov828347f2016-11-30 15:54:16 -080030/*
Daniel Axtensadb72ae2020-06-03 15:56:43 -070031 * We assign some test results to these globals to make sure the tests
32 * are not eliminated as dead code.
33 */
34
35int kasan_int_result;
36void *kasan_ptr_result;
37
38/*
Dmitry Vyukov828347f2016-11-30 15:54:16 -080039 * Note: test functions are marked noinline so that their names appear in
40 * reports.
41 */
42
Andrey Ryabinin3f158012015-02-13 14:39:53 -080043static noinline void __init kmalloc_oob_right(void)
44{
45 char *ptr;
46 size_t size = 123;
47
48 pr_info("out-of-bounds to right\n");
49 ptr = kmalloc(size, GFP_KERNEL);
50 if (!ptr) {
51 pr_err("Allocation failed\n");
52 return;
53 }
54
Walter Wuf33a0142020-08-06 23:24:54 -070055 ptr[size + OOB_TAG_OFF] = 'x';
56
Andrey Ryabinin3f158012015-02-13 14:39:53 -080057 kfree(ptr);
58}
59
60static noinline void __init kmalloc_oob_left(void)
61{
62 char *ptr;
63 size_t size = 15;
64
65 pr_info("out-of-bounds to left\n");
66 ptr = kmalloc(size, GFP_KERNEL);
67 if (!ptr) {
68 pr_err("Allocation failed\n");
69 return;
70 }
71
72 *ptr = *(ptr - 1);
73 kfree(ptr);
74}
75
76static noinline void __init kmalloc_node_oob_right(void)
77{
78 char *ptr;
79 size_t size = 4096;
80
81 pr_info("kmalloc_node(): out-of-bounds to right\n");
82 ptr = kmalloc_node(size, GFP_KERNEL, 0);
83 if (!ptr) {
84 pr_err("Allocation failed\n");
85 return;
86 }
87
88 ptr[size] = 0;
89 kfree(ptr);
90}
91
Alexander Potapenkoe6e83792016-03-25 14:21:56 -070092#ifdef CONFIG_SLUB
93static noinline void __init kmalloc_pagealloc_oob_right(void)
Andrey Ryabinin3f158012015-02-13 14:39:53 -080094{
95 char *ptr;
96 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
97
Alexander Potapenkoe6e83792016-03-25 14:21:56 -070098 /* Allocate a chunk that does not fit into a SLUB cache to trigger
99 * the page allocator fallback.
100 */
101 pr_info("kmalloc pagealloc allocation: out-of-bounds to right\n");
102 ptr = kmalloc(size, GFP_KERNEL);
103 if (!ptr) {
104 pr_err("Allocation failed\n");
105 return;
106 }
107
Walter Wuf33a0142020-08-06 23:24:54 -0700108 ptr[size + OOB_TAG_OFF] = 0;
109
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700110 kfree(ptr);
111}
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800112
113static noinline void __init kmalloc_pagealloc_uaf(void)
114{
115 char *ptr;
116 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
117
118 pr_info("kmalloc pagealloc allocation: use-after-free\n");
119 ptr = kmalloc(size, GFP_KERNEL);
120 if (!ptr) {
121 pr_err("Allocation failed\n");
122 return;
123 }
124
125 kfree(ptr);
126 ptr[0] = 0;
127}
128
129static noinline void __init kmalloc_pagealloc_invalid_free(void)
130{
131 char *ptr;
132 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
133
134 pr_info("kmalloc pagealloc allocation: invalid-free\n");
135 ptr = kmalloc(size, GFP_KERNEL);
136 if (!ptr) {
137 pr_err("Allocation failed\n");
138 return;
139 }
140
141 kfree(ptr + 1);
142}
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700143#endif
144
145static noinline void __init kmalloc_large_oob_right(void)
146{
147 char *ptr;
148 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
149 /* Allocate a chunk that is large enough, but still fits into a slab
150 * and does not trigger the page allocator fallback in SLUB.
151 */
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800152 pr_info("kmalloc large allocation: out-of-bounds to right\n");
153 ptr = kmalloc(size, GFP_KERNEL);
154 if (!ptr) {
155 pr_err("Allocation failed\n");
156 return;
157 }
158
159 ptr[size] = 0;
160 kfree(ptr);
161}
162
163static noinline void __init kmalloc_oob_krealloc_more(void)
164{
165 char *ptr1, *ptr2;
166 size_t size1 = 17;
167 size_t size2 = 19;
168
169 pr_info("out-of-bounds after krealloc more\n");
170 ptr1 = kmalloc(size1, GFP_KERNEL);
171 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
172 if (!ptr1 || !ptr2) {
173 pr_err("Allocation failed\n");
174 kfree(ptr1);
Gustavo A. R. Silva3e21d9a2020-01-30 22:13:51 -0800175 kfree(ptr2);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800176 return;
177 }
178
Walter Wuf33a0142020-08-06 23:24:54 -0700179 ptr2[size2 + OOB_TAG_OFF] = 'x';
180
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800181 kfree(ptr2);
182}
183
184static noinline void __init kmalloc_oob_krealloc_less(void)
185{
186 char *ptr1, *ptr2;
187 size_t size1 = 17;
188 size_t size2 = 15;
189
190 pr_info("out-of-bounds after krealloc less\n");
191 ptr1 = kmalloc(size1, GFP_KERNEL);
192 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
193 if (!ptr1 || !ptr2) {
194 pr_err("Allocation failed\n");
195 kfree(ptr1);
196 return;
197 }
Walter Wuf33a0142020-08-06 23:24:54 -0700198
199 ptr2[size2 + OOB_TAG_OFF] = 'x';
200
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800201 kfree(ptr2);
202}
203
204static noinline void __init kmalloc_oob_16(void)
205{
206 struct {
207 u64 words[2];
208 } *ptr1, *ptr2;
209
210 pr_info("kmalloc out-of-bounds for 16-bytes access\n");
211 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
212 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
213 if (!ptr1 || !ptr2) {
214 pr_err("Allocation failed\n");
215 kfree(ptr1);
216 kfree(ptr2);
217 return;
218 }
219 *ptr1 = *ptr2;
220 kfree(ptr1);
221 kfree(ptr2);
222}
223
Wang Longf523e732015-11-05 18:51:15 -0800224static noinline void __init kmalloc_oob_memset_2(void)
225{
226 char *ptr;
227 size_t size = 8;
228
229 pr_info("out-of-bounds in memset2\n");
230 ptr = kmalloc(size, GFP_KERNEL);
231 if (!ptr) {
232 pr_err("Allocation failed\n");
233 return;
234 }
235
Walter Wuf33a0142020-08-06 23:24:54 -0700236 memset(ptr + 7 + OOB_TAG_OFF, 0, 2);
237
Wang Longf523e732015-11-05 18:51:15 -0800238 kfree(ptr);
239}
240
241static noinline void __init kmalloc_oob_memset_4(void)
242{
243 char *ptr;
244 size_t size = 8;
245
246 pr_info("out-of-bounds in memset4\n");
247 ptr = kmalloc(size, GFP_KERNEL);
248 if (!ptr) {
249 pr_err("Allocation failed\n");
250 return;
251 }
252
Walter Wuf33a0142020-08-06 23:24:54 -0700253 memset(ptr + 5 + OOB_TAG_OFF, 0, 4);
254
Wang Longf523e732015-11-05 18:51:15 -0800255 kfree(ptr);
256}
257
258
259static noinline void __init kmalloc_oob_memset_8(void)
260{
261 char *ptr;
262 size_t size = 8;
263
264 pr_info("out-of-bounds in memset8\n");
265 ptr = kmalloc(size, GFP_KERNEL);
266 if (!ptr) {
267 pr_err("Allocation failed\n");
268 return;
269 }
270
Walter Wuf33a0142020-08-06 23:24:54 -0700271 memset(ptr + 1 + OOB_TAG_OFF, 0, 8);
272
Wang Longf523e732015-11-05 18:51:15 -0800273 kfree(ptr);
274}
275
276static noinline void __init kmalloc_oob_memset_16(void)
277{
278 char *ptr;
279 size_t size = 16;
280
281 pr_info("out-of-bounds in memset16\n");
282 ptr = kmalloc(size, GFP_KERNEL);
283 if (!ptr) {
284 pr_err("Allocation failed\n");
285 return;
286 }
287
Walter Wuf33a0142020-08-06 23:24:54 -0700288 memset(ptr + 1 + OOB_TAG_OFF, 0, 16);
289
Wang Longf523e732015-11-05 18:51:15 -0800290 kfree(ptr);
291}
292
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800293static noinline void __init kmalloc_oob_in_memset(void)
294{
295 char *ptr;
296 size_t size = 666;
297
298 pr_info("out-of-bounds in memset\n");
299 ptr = kmalloc(size, GFP_KERNEL);
300 if (!ptr) {
301 pr_err("Allocation failed\n");
302 return;
303 }
304
Walter Wuf33a0142020-08-06 23:24:54 -0700305 memset(ptr, 0, size + 5 + OOB_TAG_OFF);
306
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800307 kfree(ptr);
308}
309
Walter Wu98f3b562020-04-01 21:09:40 -0700310static noinline void __init kmalloc_memmove_invalid_size(void)
311{
312 char *ptr;
313 size_t size = 64;
314 volatile size_t invalid_size = -2;
315
316 pr_info("invalid size in memmove\n");
317 ptr = kmalloc(size, GFP_KERNEL);
318 if (!ptr) {
319 pr_err("Allocation failed\n");
320 return;
321 }
322
323 memset((char *)ptr, 0, 64);
324 memmove((char *)ptr, (char *)ptr + 4, invalid_size);
325 kfree(ptr);
326}
327
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800328static noinline void __init kmalloc_uaf(void)
329{
330 char *ptr;
331 size_t size = 10;
332
333 pr_info("use-after-free\n");
334 ptr = kmalloc(size, GFP_KERNEL);
335 if (!ptr) {
336 pr_err("Allocation failed\n");
337 return;
338 }
339
340 kfree(ptr);
341 *(ptr + 8) = 'x';
342}
343
344static noinline void __init kmalloc_uaf_memset(void)
345{
346 char *ptr;
347 size_t size = 33;
348
349 pr_info("use-after-free in memset\n");
350 ptr = kmalloc(size, GFP_KERNEL);
351 if (!ptr) {
352 pr_err("Allocation failed\n");
353 return;
354 }
355
356 kfree(ptr);
357 memset(ptr, 0, size);
358}
359
360static noinline void __init kmalloc_uaf2(void)
361{
362 char *ptr1, *ptr2;
363 size_t size = 43;
364
365 pr_info("use-after-free after another kmalloc\n");
366 ptr1 = kmalloc(size, GFP_KERNEL);
367 if (!ptr1) {
368 pr_err("Allocation failed\n");
369 return;
370 }
371
372 kfree(ptr1);
373 ptr2 = kmalloc(size, GFP_KERNEL);
374 if (!ptr2) {
375 pr_err("Allocation failed\n");
376 return;
377 }
378
379 ptr1[40] = 'x';
Alexander Potapenko9dcadd32016-03-25 14:22:11 -0700380 if (ptr1 == ptr2)
381 pr_err("Could not detect use-after-free: ptr1 == ptr2\n");
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800382 kfree(ptr2);
383}
384
Mark Rutlandb92a9532019-09-23 15:34:16 -0700385static noinline void __init kfree_via_page(void)
386{
387 char *ptr;
388 size_t size = 8;
389 struct page *page;
390 unsigned long offset;
391
392 pr_info("invalid-free false positive (via page)\n");
393 ptr = kmalloc(size, GFP_KERNEL);
394 if (!ptr) {
395 pr_err("Allocation failed\n");
396 return;
397 }
398
399 page = virt_to_page(ptr);
400 offset = offset_in_page(ptr);
401 kfree(page_address(page) + offset);
402}
403
404static noinline void __init kfree_via_phys(void)
405{
406 char *ptr;
407 size_t size = 8;
408 phys_addr_t phys;
409
410 pr_info("invalid-free false positive (via phys)\n");
411 ptr = kmalloc(size, GFP_KERNEL);
412 if (!ptr) {
413 pr_err("Allocation failed\n");
414 return;
415 }
416
417 phys = virt_to_phys(ptr);
418 kfree(phys_to_virt(phys));
419}
420
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800421static noinline void __init kmem_cache_oob(void)
422{
423 char *p;
424 size_t size = 200;
425 struct kmem_cache *cache = kmem_cache_create("test_cache",
426 size, 0,
427 0, NULL);
428 if (!cache) {
429 pr_err("Cache allocation failed\n");
430 return;
431 }
432 pr_info("out-of-bounds in kmem_cache_alloc\n");
433 p = kmem_cache_alloc(cache, GFP_KERNEL);
434 if (!p) {
435 pr_err("Allocation failed\n");
436 kmem_cache_destroy(cache);
437 return;
438 }
439
Walter Wuf33a0142020-08-06 23:24:54 -0700440 *p = p[size + OOB_TAG_OFF];
441
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800442 kmem_cache_free(cache, p);
443 kmem_cache_destroy(cache);
444}
445
Greg Thelen0386bf32017-02-24 15:00:08 -0800446static noinline void __init memcg_accounted_kmem_cache(void)
447{
448 int i;
449 char *p;
450 size_t size = 200;
451 struct kmem_cache *cache;
452
453 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
454 if (!cache) {
455 pr_err("Cache allocation failed\n");
456 return;
457 }
458
459 pr_info("allocate memcg accounted object\n");
460 /*
461 * Several allocations with a delay to allow for lazy per memcg kmem
462 * cache creation.
463 */
464 for (i = 0; i < 5; i++) {
465 p = kmem_cache_alloc(cache, GFP_KERNEL);
Markus Elfringdc2bf0002017-11-17 15:28:00 -0800466 if (!p)
Greg Thelen0386bf32017-02-24 15:00:08 -0800467 goto free_cache;
Markus Elfringdc2bf0002017-11-17 15:28:00 -0800468
Greg Thelen0386bf32017-02-24 15:00:08 -0800469 kmem_cache_free(cache, p);
470 msleep(100);
471 }
472
473free_cache:
474 kmem_cache_destroy(cache);
475}
476
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800477static char global_array[10];
478
479static noinline void __init kasan_global_oob(void)
480{
481 volatile int i = 3;
482 char *p = &global_array[ARRAY_SIZE(global_array) + i];
483
484 pr_info("out-of-bounds global variable\n");
485 *(volatile char *)p;
486}
487
488static noinline void __init kasan_stack_oob(void)
489{
490 char stack_array[10];
Andrey Konovalov51dcc812020-08-06 23:25:12 -0700491 volatile int i = OOB_TAG_OFF;
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800492 char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
493
494 pr_info("out-of-bounds on stack\n");
495 *(volatile char *)p;
496}
497
Alexander Potapenko96fe8052016-05-20 16:59:17 -0700498static noinline void __init ksize_unpoisons_memory(void)
499{
500 char *ptr;
Colin Ian King48c23232018-02-06 15:36:48 -0800501 size_t size = 123, real_size;
Alexander Potapenko96fe8052016-05-20 16:59:17 -0700502
503 pr_info("ksize() unpoisons the whole allocated chunk\n");
504 ptr = kmalloc(size, GFP_KERNEL);
505 if (!ptr) {
506 pr_err("Allocation failed\n");
507 return;
508 }
509 real_size = ksize(ptr);
510 /* This access doesn't trigger an error. */
511 ptr[size] = 'x';
512 /* This one does. */
513 ptr[real_size] = 'y';
514 kfree(ptr);
515}
516
Andrey Ryabinineae08dc2016-05-20 16:59:34 -0700517static noinline void __init copy_user_test(void)
518{
519 char *kmem;
520 char __user *usermem;
521 size_t size = 10;
522 int unused;
523
524 kmem = kmalloc(size, GFP_KERNEL);
525 if (!kmem)
526 return;
527
528 usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE,
529 PROT_READ | PROT_WRITE | PROT_EXEC,
530 MAP_ANONYMOUS | MAP_PRIVATE, 0);
531 if (IS_ERR(usermem)) {
532 pr_err("Failed to allocate user memory\n");
533 kfree(kmem);
534 return;
535 }
536
537 pr_info("out-of-bounds in copy_from_user()\n");
Walter Wuf33a0142020-08-06 23:24:54 -0700538 unused = copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
Andrey Ryabinineae08dc2016-05-20 16:59:34 -0700539
540 pr_info("out-of-bounds in copy_to_user()\n");
Walter Wuf33a0142020-08-06 23:24:54 -0700541 unused = copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
Andrey Ryabinineae08dc2016-05-20 16:59:34 -0700542
543 pr_info("out-of-bounds in __copy_from_user()\n");
Walter Wuf33a0142020-08-06 23:24:54 -0700544 unused = __copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
Andrey Ryabinineae08dc2016-05-20 16:59:34 -0700545
546 pr_info("out-of-bounds in __copy_to_user()\n");
Walter Wuf33a0142020-08-06 23:24:54 -0700547 unused = __copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
Andrey Ryabinineae08dc2016-05-20 16:59:34 -0700548
549 pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
Walter Wuf33a0142020-08-06 23:24:54 -0700550 unused = __copy_from_user_inatomic(kmem, usermem, size + 1 + OOB_TAG_OFF);
Andrey Ryabinineae08dc2016-05-20 16:59:34 -0700551
552 pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
Walter Wuf33a0142020-08-06 23:24:54 -0700553 unused = __copy_to_user_inatomic(usermem, kmem, size + 1 + OOB_TAG_OFF);
Andrey Ryabinineae08dc2016-05-20 16:59:34 -0700554
555 pr_info("out-of-bounds in strncpy_from_user()\n");
Walter Wuf33a0142020-08-06 23:24:54 -0700556 unused = strncpy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
Andrey Ryabinineae08dc2016-05-20 16:59:34 -0700557
558 vm_munmap((unsigned long)usermem, PAGE_SIZE);
559 kfree(kmem);
560}
561
Paul Lawrence00a14292018-02-06 15:36:16 -0800562static noinline void __init kasan_alloca_oob_left(void)
563{
564 volatile int i = 10;
565 char alloca_array[i];
566 char *p = alloca_array - 1;
567
568 pr_info("out-of-bounds to left on alloca\n");
569 *(volatile char *)p;
570}
571
572static noinline void __init kasan_alloca_oob_right(void)
573{
574 volatile int i = 10;
575 char alloca_array[i];
576 char *p = alloca_array + i;
577
578 pr_info("out-of-bounds to right on alloca\n");
579 *(volatile char *)p;
580}
581
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800582static noinline void __init kmem_cache_double_free(void)
583{
584 char *p;
585 size_t size = 200;
586 struct kmem_cache *cache;
587
588 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
589 if (!cache) {
590 pr_err("Cache allocation failed\n");
591 return;
592 }
593 pr_info("double-free on heap object\n");
594 p = kmem_cache_alloc(cache, GFP_KERNEL);
595 if (!p) {
596 pr_err("Allocation failed\n");
597 kmem_cache_destroy(cache);
598 return;
599 }
600
601 kmem_cache_free(cache, p);
602 kmem_cache_free(cache, p);
603 kmem_cache_destroy(cache);
604}
605
606static noinline void __init kmem_cache_invalid_free(void)
607{
608 char *p;
609 size_t size = 200;
610 struct kmem_cache *cache;
611
612 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
613 NULL);
614 if (!cache) {
615 pr_err("Cache allocation failed\n");
616 return;
617 }
618 pr_info("invalid-free of heap object\n");
619 p = kmem_cache_alloc(cache, GFP_KERNEL);
620 if (!p) {
621 pr_err("Allocation failed\n");
622 kmem_cache_destroy(cache);
623 return;
624 }
625
Andrey Konovalov91c93ed2018-04-10 16:30:35 -0700626 /* Trigger invalid free, the object doesn't get freed */
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800627 kmem_cache_free(cache, p + 1);
Andrey Konovalov91c93ed2018-04-10 16:30:35 -0700628
629 /*
630 * Properly free the object to prevent the "Objects remaining in
631 * test_cache on __kmem_cache_shutdown" BUG failure.
632 */
633 kmem_cache_free(cache, p);
634
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800635 kmem_cache_destroy(cache);
636}
637
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700638static noinline void __init kasan_memchr(void)
639{
640 char *ptr;
641 size_t size = 24;
642
643 pr_info("out-of-bounds in memchr\n");
644 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
645 if (!ptr)
646 return;
647
Daniel Axtensadb72ae2020-06-03 15:56:43 -0700648 kasan_ptr_result = memchr(ptr, '1', size + 1);
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700649 kfree(ptr);
650}
651
652static noinline void __init kasan_memcmp(void)
653{
654 char *ptr;
655 size_t size = 24;
656 int arr[9];
657
658 pr_info("out-of-bounds in memcmp\n");
659 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
660 if (!ptr)
661 return;
662
663 memset(arr, 0, sizeof(arr));
Daniel Axtensadb72ae2020-06-03 15:56:43 -0700664 kasan_int_result = memcmp(ptr, arr, size + 1);
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700665 kfree(ptr);
666}
667
668static noinline void __init kasan_strings(void)
669{
670 char *ptr;
671 size_t size = 24;
672
673 pr_info("use-after-free in strchr\n");
674 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
675 if (!ptr)
676 return;
677
678 kfree(ptr);
679
680 /*
681 * Try to cause only 1 invalid access (less spam in dmesg).
682 * For that we need ptr to point to zeroed byte.
683 * Skip metadata that could be stored in freed object so ptr
684 * will likely point to zeroed byte.
685 */
686 ptr += 16;
Daniel Axtensadb72ae2020-06-03 15:56:43 -0700687 kasan_ptr_result = strchr(ptr, '1');
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700688
689 pr_info("use-after-free in strrchr\n");
Daniel Axtensadb72ae2020-06-03 15:56:43 -0700690 kasan_ptr_result = strrchr(ptr, '1');
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700691
692 pr_info("use-after-free in strcmp\n");
Daniel Axtensadb72ae2020-06-03 15:56:43 -0700693 kasan_int_result = strcmp(ptr, "2");
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700694
695 pr_info("use-after-free in strncmp\n");
Daniel Axtensadb72ae2020-06-03 15:56:43 -0700696 kasan_int_result = strncmp(ptr, "2", 1);
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700697
698 pr_info("use-after-free in strlen\n");
Daniel Axtensadb72ae2020-06-03 15:56:43 -0700699 kasan_int_result = strlen(ptr);
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700700
701 pr_info("use-after-free in strnlen\n");
Daniel Axtensadb72ae2020-06-03 15:56:43 -0700702 kasan_int_result = strnlen(ptr, 1);
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700703}
704
Marco Elver19a33ca2019-07-11 20:53:52 -0700705static noinline void __init kasan_bitops(void)
706{
707 /*
708 * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
709 * this way we do not actually corrupt other memory.
710 */
711 long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
712 if (!bits)
713 return;
714
715 /*
716 * Below calls try to access bit within allocated memory; however, the
717 * below accesses are still out-of-bounds, since bitops are defined to
718 * operate on the whole long the bit is in.
719 */
720 pr_info("out-of-bounds in set_bit\n");
721 set_bit(BITS_PER_LONG, bits);
722
723 pr_info("out-of-bounds in __set_bit\n");
724 __set_bit(BITS_PER_LONG, bits);
725
726 pr_info("out-of-bounds in clear_bit\n");
727 clear_bit(BITS_PER_LONG, bits);
728
729 pr_info("out-of-bounds in __clear_bit\n");
730 __clear_bit(BITS_PER_LONG, bits);
731
732 pr_info("out-of-bounds in clear_bit_unlock\n");
733 clear_bit_unlock(BITS_PER_LONG, bits);
734
735 pr_info("out-of-bounds in __clear_bit_unlock\n");
736 __clear_bit_unlock(BITS_PER_LONG, bits);
737
738 pr_info("out-of-bounds in change_bit\n");
739 change_bit(BITS_PER_LONG, bits);
740
741 pr_info("out-of-bounds in __change_bit\n");
742 __change_bit(BITS_PER_LONG, bits);
743
744 /*
745 * Below calls try to access bit beyond allocated memory.
746 */
747 pr_info("out-of-bounds in test_and_set_bit\n");
748 test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
749
750 pr_info("out-of-bounds in __test_and_set_bit\n");
751 __test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
752
753 pr_info("out-of-bounds in test_and_set_bit_lock\n");
754 test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits);
755
756 pr_info("out-of-bounds in test_and_clear_bit\n");
757 test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
758
759 pr_info("out-of-bounds in __test_and_clear_bit\n");
760 __test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
761
762 pr_info("out-of-bounds in test_and_change_bit\n");
763 test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
764
765 pr_info("out-of-bounds in __test_and_change_bit\n");
766 __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
767
768 pr_info("out-of-bounds in test_bit\n");
Daniel Axtensadb72ae2020-06-03 15:56:43 -0700769 kasan_int_result = test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
Marco Elver19a33ca2019-07-11 20:53:52 -0700770
771#if defined(clear_bit_unlock_is_negative_byte)
772 pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n");
Daniel Axtensadb72ae2020-06-03 15:56:43 -0700773 kasan_int_result = clear_bit_unlock_is_negative_byte(BITS_PER_LONG +
774 BITS_PER_BYTE, bits);
Marco Elver19a33ca2019-07-11 20:53:52 -0700775#endif
776 kfree(bits);
777}
778
Marco Elverbb104ed2019-07-11 20:54:11 -0700779static noinline void __init kmalloc_double_kzfree(void)
780{
781 char *ptr;
782 size_t size = 16;
783
Waiman Long453431a2020-08-06 23:18:13 -0700784 pr_info("double-free (kfree_sensitive)\n");
Marco Elverbb104ed2019-07-11 20:54:11 -0700785 ptr = kmalloc(size, GFP_KERNEL);
786 if (!ptr) {
787 pr_err("Allocation failed\n");
788 return;
789 }
790
Waiman Long453431a2020-08-06 23:18:13 -0700791 kfree_sensitive(ptr);
792 kfree_sensitive(ptr);
Marco Elverbb104ed2019-07-11 20:54:11 -0700793}
794
Daniel Axtens06513912019-11-30 17:54:53 -0800795#ifdef CONFIG_KASAN_VMALLOC
796static noinline void __init vmalloc_oob(void)
797{
798 void *area;
799
800 pr_info("vmalloc out-of-bounds\n");
801
802 /*
803 * We have to be careful not to hit the guard page.
804 * The MMU will catch that and crash us.
805 */
806 area = vmalloc(3000);
807 if (!area) {
808 pr_err("Allocation failed\n");
809 return;
810 }
811
812 ((volatile char *)area)[3100];
813 vfree(area);
814}
815#else
816static void __init vmalloc_oob(void) {}
817#endif
818
Walter Wu387d6e42020-08-06 23:24:42 -0700819static struct kasan_rcu_info {
820 int i;
821 struct rcu_head rcu;
822} *global_rcu_ptr;
823
824static noinline void __init kasan_rcu_reclaim(struct rcu_head *rp)
825{
826 struct kasan_rcu_info *fp = container_of(rp,
827 struct kasan_rcu_info, rcu);
828
829 kfree(fp);
830 fp->i = 1;
831}
832
833static noinline void __init kasan_rcu_uaf(void)
834{
835 struct kasan_rcu_info *ptr;
836
837 pr_info("use-after-free in kasan_rcu_reclaim\n");
838 ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
839 if (!ptr) {
840 pr_err("Allocation failed\n");
841 return;
842 }
843
844 global_rcu_ptr = rcu_dereference_protected(ptr, NULL);
845 call_rcu(&global_rcu_ptr->rcu, kasan_rcu_reclaim);
846}
847
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800848static int __init kmalloc_tests_init(void)
849{
Mark Rutlandb0845ce2017-03-31 15:12:04 -0700850 /*
851 * Temporarily enable multi-shot mode. Otherwise, we'd only get a
852 * report for the first case.
853 */
854 bool multishot = kasan_save_enable_multi_shot();
855
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800856 kmalloc_oob_right();
857 kmalloc_oob_left();
858 kmalloc_node_oob_right();
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700859#ifdef CONFIG_SLUB
860 kmalloc_pagealloc_oob_right();
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800861 kmalloc_pagealloc_uaf();
862 kmalloc_pagealloc_invalid_free();
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700863#endif
Wang Long9789d8e2015-09-09 15:37:19 -0700864 kmalloc_large_oob_right();
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800865 kmalloc_oob_krealloc_more();
866 kmalloc_oob_krealloc_less();
867 kmalloc_oob_16();
868 kmalloc_oob_in_memset();
Wang Longf523e732015-11-05 18:51:15 -0800869 kmalloc_oob_memset_2();
870 kmalloc_oob_memset_4();
871 kmalloc_oob_memset_8();
872 kmalloc_oob_memset_16();
Walter Wu98f3b562020-04-01 21:09:40 -0700873 kmalloc_memmove_invalid_size();
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800874 kmalloc_uaf();
875 kmalloc_uaf_memset();
876 kmalloc_uaf2();
Mark Rutlandb92a9532019-09-23 15:34:16 -0700877 kfree_via_page();
878 kfree_via_phys();
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800879 kmem_cache_oob();
Greg Thelen0386bf32017-02-24 15:00:08 -0800880 memcg_accounted_kmem_cache();
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800881 kasan_stack_oob();
882 kasan_global_oob();
Paul Lawrence00a14292018-02-06 15:36:16 -0800883 kasan_alloca_oob_left();
884 kasan_alloca_oob_right();
Alexander Potapenko96fe8052016-05-20 16:59:17 -0700885 ksize_unpoisons_memory();
Andrey Ryabinineae08dc2016-05-20 16:59:34 -0700886 copy_user_test();
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800887 kmem_cache_double_free();
888 kmem_cache_invalid_free();
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700889 kasan_memchr();
890 kasan_memcmp();
891 kasan_strings();
Marco Elver19a33ca2019-07-11 20:53:52 -0700892 kasan_bitops();
Marco Elverbb104ed2019-07-11 20:54:11 -0700893 kmalloc_double_kzfree();
Daniel Axtens06513912019-11-30 17:54:53 -0800894 vmalloc_oob();
Walter Wu387d6e42020-08-06 23:24:42 -0700895 kasan_rcu_uaf();
Mark Rutlandb0845ce2017-03-31 15:12:04 -0700896
897 kasan_restore_multi_shot(multishot);
898
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800899 return -EAGAIN;
900}
901
902module_init(kmalloc_tests_init);
903MODULE_LICENSE("GPL");