blob: c344fe506ffc438d19d196394b899d05d608029a [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Andrey Ryabinin3f158012015-02-13 14:39:53 -08002/*
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
Andrey Ryabinin3f158012015-02-13 14:39:53 -08006 */
7
Marco Elver19a33ca2019-07-11 20:53:52 -07008#include <linux/bitops.h>
Greg Thelen0386bf32017-02-24 15:00:08 -08009#include <linux/delay.h>
Marco Elver19a33ca2019-07-11 20:53:52 -070010#include <linux/kasan.h>
Andrey Ryabinin3f158012015-02-13 14:39:53 -080011#include <linux/kernel.h>
Andrey Ryabinineae08dc2016-05-20 16:59:34 -070012#include <linux/mm.h>
Marco Elver19a33ca2019-07-11 20:53:52 -070013#include <linux/mman.h>
14#include <linux/module.h>
Andrey Ryabinin3f158012015-02-13 14:39:53 -080015#include <linux/printk.h>
Andrey Konovalov573a4802021-02-24 12:05:21 -080016#include <linux/random.h>
Andrey Ryabinin3f158012015-02-13 14:39:53 -080017#include <linux/slab.h>
18#include <linux/string.h>
Andrey Ryabinineae08dc2016-05-20 16:59:34 -070019#include <linux/uaccess.h>
Mark Rutlandb92a9532019-09-23 15:34:16 -070020#include <linux/io.h>
Daniel Axtens06513912019-11-30 17:54:53 -080021#include <linux/vmalloc.h>
Mark Rutlandb92a9532019-09-23 15:34:16 -070022
23#include <asm/page.h>
Andrey Ryabinin3f158012015-02-13 14:39:53 -080024
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070025#include <kunit/test.h>
26
Walter Wuf33a0142020-08-06 23:24:54 -070027#include "../mm/kasan/kasan.h"
28
Andrey Konovalov1f600622020-12-22 12:00:24 -080029#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
Walter Wuf33a0142020-08-06 23:24:54 -070030
Dmitry Vyukov828347f2016-11-30 15:54:16 -080031/*
Andrey Konovalov0fd37922021-02-24 12:05:13 -080032 * Some tests use these global variables to store return values from function
33 * calls that could otherwise be eliminated by the compiler as dead code.
Daniel Axtensadb72ae2020-06-03 15:56:43 -070034 */
Daniel Axtensadb72ae2020-06-03 15:56:43 -070035void *kasan_ptr_result;
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070036int kasan_int_result;
37
38static struct kunit_resource resource;
39static struct kunit_kasan_expectation fail_data;
40static bool multishot;
41
Andrey Konovalov0fd37922021-02-24 12:05:13 -080042/*
43 * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
44 * first detected bug and panic the kernel if panic_on_warn is enabled.
45 */
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070046static int kasan_test_init(struct kunit *test)
47{
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070048 multishot = kasan_save_enable_multi_shot();
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070049 return 0;
50}
51
52static void kasan_test_exit(struct kunit *test)
53{
54 kasan_restore_multi_shot(multishot);
55}
56
57/**
Andrey Konovalov0fd37922021-02-24 12:05:13 -080058 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
59 * KASAN report; causes a test failure otherwise. This relies on a KUnit
60 * resource named "kasan_data". Do not use this name for KUnit resources
61 * outside of KASAN tests.
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070062 */
Andrey Konovalov0fd37922021-02-24 12:05:13 -080063#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070064 fail_data.report_expected = true; \
65 fail_data.report_found = false; \
66 kunit_add_named_resource(test, \
67 NULL, \
68 NULL, \
69 &resource, \
70 "kasan_data", &fail_data); \
Andrey Konovalov0fd37922021-02-24 12:05:13 -080071 expression; \
Patricia Alfonso83c4e7a2020-10-13 16:55:02 -070072 KUNIT_EXPECT_EQ(test, \
73 fail_data.report_expected, \
74 fail_data.report_found); \
75} while (0)
76
Andrey Konovalovda17e372021-02-24 12:05:17 -080077#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
78 if (!IS_ENABLED(config)) { \
79 kunit_info((test), "skipping, " #config " required"); \
80 return; \
81 } \
82} while (0)
83
84#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
85 if (IS_ENABLED(config)) { \
86 kunit_info((test), "skipping, " #config " enabled"); \
87 return; \
88 } \
89} while (0)
90
Patricia Alfonso73228c72020-10-13 16:55:06 -070091static void kmalloc_oob_right(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -080092{
93 char *ptr;
94 size_t size = 123;
95
Andrey Ryabinin3f158012015-02-13 14:39:53 -080096 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -070097 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin3f158012015-02-13 14:39:53 -080098
Patricia Alfonso73228c72020-10-13 16:55:06 -070099 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x');
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800100 kfree(ptr);
101}
102
Patricia Alfonso73228c72020-10-13 16:55:06 -0700103static void kmalloc_oob_left(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800104{
105 char *ptr;
106 size_t size = 15;
107
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800108 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700109 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800110
Patricia Alfonso73228c72020-10-13 16:55:06 -0700111 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800112 kfree(ptr);
113}
114
Patricia Alfonso73228c72020-10-13 16:55:06 -0700115static void kmalloc_node_oob_right(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800116{
117 char *ptr;
118 size_t size = 4096;
119
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800120 ptr = kmalloc_node(size, GFP_KERNEL, 0);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700121 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800122
Patricia Alfonso73228c72020-10-13 16:55:06 -0700123 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800124 kfree(ptr);
125}
126
Patricia Alfonso73228c72020-10-13 16:55:06 -0700127static void kmalloc_pagealloc_oob_right(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800128{
129 char *ptr;
130 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
131
Andrey Konovalovda17e372021-02-24 12:05:17 -0800132 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700133
Andrey Konovalov0fd37922021-02-24 12:05:13 -0800134 /*
135 * Allocate a chunk that does not fit into a SLUB cache to trigger
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700136 * the page allocator fallback.
137 */
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700138 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700139 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700140
Patricia Alfonso73228c72020-10-13 16:55:06 -0700141 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700142 kfree(ptr);
143}
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800144
Patricia Alfonso73228c72020-10-13 16:55:06 -0700145static void kmalloc_pagealloc_uaf(struct kunit *test)
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800146{
147 char *ptr;
148 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
149
Andrey Konovalovda17e372021-02-24 12:05:17 -0800150 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800151
Patricia Alfonso73228c72020-10-13 16:55:06 -0700152 ptr = kmalloc(size, GFP_KERNEL);
153 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
154
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800155 kfree(ptr);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700156 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800157}
158
Patricia Alfonso73228c72020-10-13 16:55:06 -0700159static void kmalloc_pagealloc_invalid_free(struct kunit *test)
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800160{
161 char *ptr;
162 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
163
Andrey Konovalovda17e372021-02-24 12:05:17 -0800164 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800165
Patricia Alfonso73228c72020-10-13 16:55:06 -0700166 ptr = kmalloc(size, GFP_KERNEL);
167 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700168
Patricia Alfonso73228c72020-10-13 16:55:06 -0700169 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
170}
171
172static void kmalloc_large_oob_right(struct kunit *test)
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700173{
174 char *ptr;
175 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
Andrey Konovalov0fd37922021-02-24 12:05:13 -0800176
177 /*
178 * Allocate a chunk that is large enough, but still fits into a slab
Alexander Potapenkoe6e83792016-03-25 14:21:56 -0700179 * and does not trigger the page allocator fallback in SLUB.
180 */
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800181 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700182 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800183
Patricia Alfonso73228c72020-10-13 16:55:06 -0700184 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800185 kfree(ptr);
186}
187
Patricia Alfonso73228c72020-10-13 16:55:06 -0700188static void kmalloc_oob_krealloc_more(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800189{
190 char *ptr1, *ptr2;
191 size_t size1 = 17;
192 size_t size2 = 19;
193
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800194 ptr1 = kmalloc(size1, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700195 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
196
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800197 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700198 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800199
Patricia Alfonso73228c72020-10-13 16:55:06 -0700200 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800201 kfree(ptr2);
202}
203
Patricia Alfonso73228c72020-10-13 16:55:06 -0700204static void kmalloc_oob_krealloc_less(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800205{
206 char *ptr1, *ptr2;
207 size_t size1 = 17;
208 size_t size2 = 15;
209
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800210 ptr1 = kmalloc(size1, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700211 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
212
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800213 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700214 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
Walter Wuf33a0142020-08-06 23:24:54 -0700215
Patricia Alfonso73228c72020-10-13 16:55:06 -0700216 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800217 kfree(ptr2);
218}
219
Patricia Alfonso73228c72020-10-13 16:55:06 -0700220static void kmalloc_oob_16(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800221{
222 struct {
223 u64 words[2];
224 } *ptr1, *ptr2;
225
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800226 /* This test is specifically crafted for the generic mode. */
Andrey Konovalovda17e372021-02-24 12:05:17 -0800227 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800228
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800229 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700230 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
231
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800232 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700233 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
234
235 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800236 kfree(ptr1);
237 kfree(ptr2);
238}
239
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800240static void kmalloc_uaf_16(struct kunit *test)
241{
242 struct {
243 u64 words[2];
244 } *ptr1, *ptr2;
245
246 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
247 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
248
249 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
250 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
251 kfree(ptr2);
252
253 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
254 kfree(ptr1);
255}
256
Patricia Alfonso73228c72020-10-13 16:55:06 -0700257static void kmalloc_oob_memset_2(struct kunit *test)
Wang Longf523e732015-11-05 18:51:15 -0800258{
259 char *ptr;
260 size_t size = 8;
261
Wang Longf523e732015-11-05 18:51:15 -0800262 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700263 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Wang Longf523e732015-11-05 18:51:15 -0800264
Patricia Alfonso73228c72020-10-13 16:55:06 -0700265 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2));
Wang Longf523e732015-11-05 18:51:15 -0800266 kfree(ptr);
267}
268
Patricia Alfonso73228c72020-10-13 16:55:06 -0700269static void kmalloc_oob_memset_4(struct kunit *test)
Wang Longf523e732015-11-05 18:51:15 -0800270{
271 char *ptr;
272 size_t size = 8;
273
Wang Longf523e732015-11-05 18:51:15 -0800274 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700275 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Wang Longf523e732015-11-05 18:51:15 -0800276
Patricia Alfonso73228c72020-10-13 16:55:06 -0700277 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4));
Wang Longf523e732015-11-05 18:51:15 -0800278 kfree(ptr);
279}
280
281
Patricia Alfonso73228c72020-10-13 16:55:06 -0700282static void kmalloc_oob_memset_8(struct kunit *test)
Wang Longf523e732015-11-05 18:51:15 -0800283{
284 char *ptr;
285 size_t size = 8;
286
Wang Longf523e732015-11-05 18:51:15 -0800287 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700288 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Wang Longf523e732015-11-05 18:51:15 -0800289
Patricia Alfonso73228c72020-10-13 16:55:06 -0700290 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8));
Wang Longf523e732015-11-05 18:51:15 -0800291 kfree(ptr);
292}
293
Patricia Alfonso73228c72020-10-13 16:55:06 -0700294static void kmalloc_oob_memset_16(struct kunit *test)
Wang Longf523e732015-11-05 18:51:15 -0800295{
296 char *ptr;
297 size_t size = 16;
298
Wang Longf523e732015-11-05 18:51:15 -0800299 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700300 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Wang Longf523e732015-11-05 18:51:15 -0800301
Patricia Alfonso73228c72020-10-13 16:55:06 -0700302 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16));
Wang Longf523e732015-11-05 18:51:15 -0800303 kfree(ptr);
304}
305
Patricia Alfonso73228c72020-10-13 16:55:06 -0700306static void kmalloc_oob_in_memset(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800307{
308 char *ptr;
309 size_t size = 666;
310
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800311 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700312 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800313
Patricia Alfonso73228c72020-10-13 16:55:06 -0700314 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF));
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800315 kfree(ptr);
316}
317
Patricia Alfonso73228c72020-10-13 16:55:06 -0700318static void kmalloc_memmove_invalid_size(struct kunit *test)
Walter Wu98f3b562020-04-01 21:09:40 -0700319{
320 char *ptr;
321 size_t size = 64;
322 volatile size_t invalid_size = -2;
323
Walter Wu98f3b562020-04-01 21:09:40 -0700324 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700325 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Walter Wu98f3b562020-04-01 21:09:40 -0700326
327 memset((char *)ptr, 0, 64);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700328
329 KUNIT_EXPECT_KASAN_FAIL(test,
330 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
Walter Wu98f3b562020-04-01 21:09:40 -0700331 kfree(ptr);
332}
333
Patricia Alfonso73228c72020-10-13 16:55:06 -0700334static void kmalloc_uaf(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800335{
336 char *ptr;
337 size_t size = 10;
338
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800339 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700340 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800341
342 kfree(ptr);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700343 KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x');
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800344}
345
Patricia Alfonso73228c72020-10-13 16:55:06 -0700346static void kmalloc_uaf_memset(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800347{
348 char *ptr;
349 size_t size = 33;
350
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800351 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700352 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800353
354 kfree(ptr);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700355 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800356}
357
Patricia Alfonso73228c72020-10-13 16:55:06 -0700358static void kmalloc_uaf2(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800359{
360 char *ptr1, *ptr2;
361 size_t size = 43;
362
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800363 ptr1 = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700364 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800365
366 kfree(ptr1);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800367
Patricia Alfonso73228c72020-10-13 16:55:06 -0700368 ptr2 = kmalloc(size, GFP_KERNEL);
369 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
370
371 KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
372 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
373
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800374 kfree(ptr2);
375}
376
Patricia Alfonso73228c72020-10-13 16:55:06 -0700377static void kfree_via_page(struct kunit *test)
Mark Rutlandb92a9532019-09-23 15:34:16 -0700378{
379 char *ptr;
380 size_t size = 8;
381 struct page *page;
382 unsigned long offset;
383
Mark Rutlandb92a9532019-09-23 15:34:16 -0700384 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700385 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Mark Rutlandb92a9532019-09-23 15:34:16 -0700386
387 page = virt_to_page(ptr);
388 offset = offset_in_page(ptr);
389 kfree(page_address(page) + offset);
390}
391
Patricia Alfonso73228c72020-10-13 16:55:06 -0700392static void kfree_via_phys(struct kunit *test)
Mark Rutlandb92a9532019-09-23 15:34:16 -0700393{
394 char *ptr;
395 size_t size = 8;
396 phys_addr_t phys;
397
Mark Rutlandb92a9532019-09-23 15:34:16 -0700398 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700399 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Mark Rutlandb92a9532019-09-23 15:34:16 -0700400
401 phys = virt_to_phys(ptr);
402 kfree(phys_to_virt(phys));
403}
404
Patricia Alfonso73228c72020-10-13 16:55:06 -0700405static void kmem_cache_oob(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800406{
407 char *p;
408 size_t size = 200;
409 struct kmem_cache *cache = kmem_cache_create("test_cache",
410 size, 0,
411 0, NULL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700412 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800413 p = kmem_cache_alloc(cache, GFP_KERNEL);
414 if (!p) {
Patricia Alfonso73228c72020-10-13 16:55:06 -0700415 kunit_err(test, "Allocation failed: %s\n", __func__);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800416 kmem_cache_destroy(cache);
417 return;
418 }
419
Patricia Alfonso73228c72020-10-13 16:55:06 -0700420 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800421 kmem_cache_free(cache, p);
422 kmem_cache_destroy(cache);
423}
424
Patricia Alfonso73228c72020-10-13 16:55:06 -0700425static void memcg_accounted_kmem_cache(struct kunit *test)
Greg Thelen0386bf32017-02-24 15:00:08 -0800426{
427 int i;
428 char *p;
429 size_t size = 200;
430 struct kmem_cache *cache;
431
432 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700433 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
Greg Thelen0386bf32017-02-24 15:00:08 -0800434
Greg Thelen0386bf32017-02-24 15:00:08 -0800435 /*
436 * Several allocations with a delay to allow for lazy per memcg kmem
437 * cache creation.
438 */
439 for (i = 0; i < 5; i++) {
440 p = kmem_cache_alloc(cache, GFP_KERNEL);
Markus Elfringdc2bf0002017-11-17 15:28:00 -0800441 if (!p)
Greg Thelen0386bf32017-02-24 15:00:08 -0800442 goto free_cache;
Markus Elfringdc2bf0002017-11-17 15:28:00 -0800443
Greg Thelen0386bf32017-02-24 15:00:08 -0800444 kmem_cache_free(cache, p);
445 msleep(100);
446 }
447
448free_cache:
449 kmem_cache_destroy(cache);
450}
451
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800452static char global_array[10];
453
Patricia Alfonso73228c72020-10-13 16:55:06 -0700454static void kasan_global_oob(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800455{
456 volatile int i = 3;
457 char *p = &global_array[ARRAY_SIZE(global_array) + i];
458
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800459 /* Only generic mode instruments globals. */
Andrey Konovalovda17e372021-02-24 12:05:17 -0800460 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800461
Patricia Alfonso73228c72020-10-13 16:55:06 -0700462 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800463}
464
Patricia Alfonso73228c72020-10-13 16:55:06 -0700465static void ksize_unpoisons_memory(struct kunit *test)
466{
467 char *ptr;
468 size_t size = 123, real_size;
469
470 ptr = kmalloc(size, GFP_KERNEL);
471 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
472 real_size = ksize(ptr);
Andrey Konovalov0fd37922021-02-24 12:05:13 -0800473
474 /* This access shouldn't trigger a KASAN report. */
Patricia Alfonso73228c72020-10-13 16:55:06 -0700475 ptr[size] = 'x';
Andrey Konovalov0fd37922021-02-24 12:05:13 -0800476
477 /* This one must. */
Patricia Alfonso73228c72020-10-13 16:55:06 -0700478 KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
Andrey Konovalov0fd37922021-02-24 12:05:13 -0800479
Patricia Alfonso73228c72020-10-13 16:55:06 -0700480 kfree(ptr);
481}
482
483static void kasan_stack_oob(struct kunit *test)
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800484{
485 char stack_array[10];
Andrey Konovalov51dcc812020-08-06 23:25:12 -0700486 volatile int i = OOB_TAG_OFF;
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800487 char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
488
Andrey Konovalovda17e372021-02-24 12:05:17 -0800489 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
Andrey Ryabinineae08dc2016-05-20 16:59:34 -0700490
Patricia Alfonso73228c72020-10-13 16:55:06 -0700491 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
Andrey Ryabinineae08dc2016-05-20 16:59:34 -0700492}
493
Patricia Alfonso73228c72020-10-13 16:55:06 -0700494static void kasan_alloca_oob_left(struct kunit *test)
Paul Lawrence00a14292018-02-06 15:36:16 -0800495{
496 volatile int i = 10;
497 char alloca_array[i];
498 char *p = alloca_array - 1;
499
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800500 /* Only generic mode instruments dynamic allocas. */
Andrey Konovalovda17e372021-02-24 12:05:17 -0800501 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
502 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700503
504 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
Paul Lawrence00a14292018-02-06 15:36:16 -0800505}
506
Patricia Alfonso73228c72020-10-13 16:55:06 -0700507static void kasan_alloca_oob_right(struct kunit *test)
Paul Lawrence00a14292018-02-06 15:36:16 -0800508{
509 volatile int i = 10;
510 char alloca_array[i];
511 char *p = alloca_array + i;
512
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800513 /* Only generic mode instruments dynamic allocas. */
Andrey Konovalovda17e372021-02-24 12:05:17 -0800514 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
515 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700516
517 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
Paul Lawrence00a14292018-02-06 15:36:16 -0800518}
519
Patricia Alfonso73228c72020-10-13 16:55:06 -0700520static void kmem_cache_double_free(struct kunit *test)
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800521{
522 char *p;
523 size_t size = 200;
524 struct kmem_cache *cache;
525
526 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700527 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
528
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800529 p = kmem_cache_alloc(cache, GFP_KERNEL);
530 if (!p) {
Patricia Alfonso73228c72020-10-13 16:55:06 -0700531 kunit_err(test, "Allocation failed: %s\n", __func__);
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800532 kmem_cache_destroy(cache);
533 return;
534 }
535
536 kmem_cache_free(cache, p);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700537 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800538 kmem_cache_destroy(cache);
539}
540
Patricia Alfonso73228c72020-10-13 16:55:06 -0700541static void kmem_cache_invalid_free(struct kunit *test)
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800542{
543 char *p;
544 size_t size = 200;
545 struct kmem_cache *cache;
546
547 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
548 NULL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700549 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
550
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800551 p = kmem_cache_alloc(cache, GFP_KERNEL);
552 if (!p) {
Patricia Alfonso73228c72020-10-13 16:55:06 -0700553 kunit_err(test, "Allocation failed: %s\n", __func__);
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800554 kmem_cache_destroy(cache);
555 return;
556 }
557
Andrey Konovalov0fd37922021-02-24 12:05:13 -0800558 /* Trigger invalid free, the object doesn't get freed. */
Patricia Alfonso73228c72020-10-13 16:55:06 -0700559 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
Andrey Konovalov91c93ed2018-04-10 16:30:35 -0700560
561 /*
562 * Properly free the object to prevent the "Objects remaining in
563 * test_cache on __kmem_cache_shutdown" BUG failure.
564 */
565 kmem_cache_free(cache, p);
566
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800567 kmem_cache_destroy(cache);
568}
569
Patricia Alfonso73228c72020-10-13 16:55:06 -0700570static void kasan_memchr(struct kunit *test)
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700571{
572 char *ptr;
573 size_t size = 24;
574
Andrey Konovalov0fd37922021-02-24 12:05:13 -0800575 /*
576 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
577 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
578 */
Andrey Konovalovda17e372021-02-24 12:05:17 -0800579 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700580
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800581 if (OOB_TAG_OFF)
582 size = round_up(size, OOB_TAG_OFF);
583
Patricia Alfonso73228c72020-10-13 16:55:06 -0700584 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
585 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
586
587 KUNIT_EXPECT_KASAN_FAIL(test,
588 kasan_ptr_result = memchr(ptr, '1', size + 1));
589
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700590 kfree(ptr);
591}
592
Patricia Alfonso73228c72020-10-13 16:55:06 -0700593static void kasan_memcmp(struct kunit *test)
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700594{
595 char *ptr;
596 size_t size = 24;
597 int arr[9];
598
Andrey Konovalov0fd37922021-02-24 12:05:13 -0800599 /*
600 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
601 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
602 */
Andrey Konovalovda17e372021-02-24 12:05:17 -0800603 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700604
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800605 if (OOB_TAG_OFF)
606 size = round_up(size, OOB_TAG_OFF);
607
Patricia Alfonso73228c72020-10-13 16:55:06 -0700608 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
609 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700610 memset(arr, 0, sizeof(arr));
Patricia Alfonso73228c72020-10-13 16:55:06 -0700611
612 KUNIT_EXPECT_KASAN_FAIL(test,
613 kasan_int_result = memcmp(ptr, arr, size+1));
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700614 kfree(ptr);
615}
616
Patricia Alfonso73228c72020-10-13 16:55:06 -0700617static void kasan_strings(struct kunit *test)
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700618{
619 char *ptr;
620 size_t size = 24;
621
Andrey Konovalov0fd37922021-02-24 12:05:13 -0800622 /*
623 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
624 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
625 */
Andrey Konovalovda17e372021-02-24 12:05:17 -0800626 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700627
628 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
629 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700630
631 kfree(ptr);
632
633 /*
634 * Try to cause only 1 invalid access (less spam in dmesg).
635 * For that we need ptr to point to zeroed byte.
636 * Skip metadata that could be stored in freed object so ptr
637 * will likely point to zeroed byte.
638 */
639 ptr += 16;
Patricia Alfonso73228c72020-10-13 16:55:06 -0700640 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700641
Patricia Alfonso73228c72020-10-13 16:55:06 -0700642 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700643
Patricia Alfonso73228c72020-10-13 16:55:06 -0700644 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700645
Patricia Alfonso73228c72020-10-13 16:55:06 -0700646 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700647
Patricia Alfonso73228c72020-10-13 16:55:06 -0700648 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700649
Patricia Alfonso73228c72020-10-13 16:55:06 -0700650 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
Andrey Ryabinin0c963502018-10-26 15:02:34 -0700651}
652
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800653static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
Marco Elver19a33ca2019-07-11 20:53:52 -0700654{
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800655 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
656 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
657 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
658 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
659 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
660 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
661 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
662 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
663}
664
665static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
666{
667 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
668 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
669 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
670 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
671 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
672 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
673 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
674 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
675
676#if defined(clear_bit_unlock_is_negative_byte)
677 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
678 clear_bit_unlock_is_negative_byte(nr, addr));
679#endif
680}
681
682static void kasan_bitops_generic(struct kunit *test)
683{
684 long *bits;
685
686 /* This test is specifically crafted for the generic mode. */
Andrey Konovalovda17e372021-02-24 12:05:17 -0800687 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800688
Marco Elver19a33ca2019-07-11 20:53:52 -0700689 /*
Andrey Konovalov0fd37922021-02-24 12:05:13 -0800690 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
Marco Elver19a33ca2019-07-11 20:53:52 -0700691 * this way we do not actually corrupt other memory.
692 */
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800693 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700694 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
Marco Elver19a33ca2019-07-11 20:53:52 -0700695
696 /*
697 * Below calls try to access bit within allocated memory; however, the
698 * below accesses are still out-of-bounds, since bitops are defined to
699 * operate on the whole long the bit is in.
700 */
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800701 kasan_bitops_modify(test, BITS_PER_LONG, bits);
Marco Elver19a33ca2019-07-11 20:53:52 -0700702
703 /*
704 * Below calls try to access bit beyond allocated memory.
705 */
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800706 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
Marco Elver19a33ca2019-07-11 20:53:52 -0700707
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800708 kfree(bits);
709}
Marco Elver19a33ca2019-07-11 20:53:52 -0700710
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800711static void kasan_bitops_tags(struct kunit *test)
712{
713 long *bits;
Marco Elver19a33ca2019-07-11 20:53:52 -0700714
Andrey Konovalovda17e372021-02-24 12:05:17 -0800715 /* This test is specifically crafted for tag-based modes. */
716 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
Marco Elver19a33ca2019-07-11 20:53:52 -0700717
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800718 /* Allocation size will be rounded to up granule size, which is 16. */
719 bits = kzalloc(sizeof(*bits), GFP_KERNEL);
720 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
Marco Elver19a33ca2019-07-11 20:53:52 -0700721
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800722 /* Do the accesses past the 16 allocated bytes. */
723 kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]);
724 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]);
Marco Elver19a33ca2019-07-11 20:53:52 -0700725
Marco Elver19a33ca2019-07-11 20:53:52 -0700726 kfree(bits);
727}
728
Patricia Alfonso73228c72020-10-13 16:55:06 -0700729static void kmalloc_double_kzfree(struct kunit *test)
Marco Elverbb104ed2019-07-11 20:54:11 -0700730{
731 char *ptr;
732 size_t size = 16;
733
Marco Elverbb104ed2019-07-11 20:54:11 -0700734 ptr = kmalloc(size, GFP_KERNEL);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700735 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
Marco Elverbb104ed2019-07-11 20:54:11 -0700736
Waiman Long453431a2020-08-06 23:18:13 -0700737 kfree_sensitive(ptr);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700738 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
Marco Elverbb104ed2019-07-11 20:54:11 -0700739}
740
Patricia Alfonso73228c72020-10-13 16:55:06 -0700741static void vmalloc_oob(struct kunit *test)
Daniel Axtens06513912019-11-30 17:54:53 -0800742{
743 void *area;
744
Andrey Konovalovda17e372021-02-24 12:05:17 -0800745 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
Daniel Axtens06513912019-11-30 17:54:53 -0800746
747 /*
748 * We have to be careful not to hit the guard page.
749 * The MMU will catch that and crash us.
750 */
751 area = vmalloc(3000);
Patricia Alfonso73228c72020-10-13 16:55:06 -0700752 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
Daniel Axtens06513912019-11-30 17:54:53 -0800753
Patricia Alfonso73228c72020-10-13 16:55:06 -0700754 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
Daniel Axtens06513912019-11-30 17:54:53 -0800755 vfree(area);
756}
Daniel Axtens06513912019-11-30 17:54:53 -0800757
Andrey Konovalov573a4802021-02-24 12:05:21 -0800758/*
759 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
760 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
761 * modes.
762 */
763static void match_all_not_assigned(struct kunit *test)
764{
765 char *ptr;
766 struct page *pages;
767 int i, size, order;
768
769 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
770
771 for (i = 0; i < 256; i++) {
772 size = (get_random_int() % 1024) + 1;
773 ptr = kmalloc(size, GFP_KERNEL);
774 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
775 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
776 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
777 kfree(ptr);
778 }
779
780 for (i = 0; i < 256; i++) {
781 order = (get_random_int() % 4) + 1;
782 pages = alloc_pages(GFP_KERNEL, order);
783 ptr = page_address(pages);
784 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
785 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
786 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
787 free_pages((unsigned long)ptr, order);
788 }
789}
790
791/* Check that 0xff works as a match-all pointer tag for tag-based modes. */
792static void match_all_ptr_tag(struct kunit *test)
793{
794 char *ptr;
795 u8 tag;
796
797 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
798
799 ptr = kmalloc(128, GFP_KERNEL);
800 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
801
802 /* Backup the assigned tag. */
803 tag = get_tag(ptr);
804 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
805
806 /* Reset the tag to 0xff.*/
807 ptr = set_tag(ptr, KASAN_TAG_KERNEL);
808
809 /* This access shouldn't trigger a KASAN report. */
810 *ptr = 0;
811
812 /* Recover the pointer tag and free. */
813 ptr = set_tag(ptr, tag);
814 kfree(ptr);
815}
816
817/* Check that there are no match-all memory tags for tag-based modes. */
818static void match_all_mem_tag(struct kunit *test)
819{
820 char *ptr;
821 int tag;
822
823 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
824
825 ptr = kmalloc(128, GFP_KERNEL);
826 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
827 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
828
829 /* For each possible tag value not matching the pointer tag. */
830 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
831 if (tag == get_tag(ptr))
832 continue;
833
834 /* Mark the first memory granule with the chosen memory tag. */
835 kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag);
836
837 /* This access must cause a KASAN report. */
838 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
839 }
840
841 /* Recover the memory tag and free. */
842 kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr));
843 kfree(ptr);
844}
845
Patricia Alfonso73228c72020-10-13 16:55:06 -0700846static struct kunit_case kasan_kunit_test_cases[] = {
847 KUNIT_CASE(kmalloc_oob_right),
848 KUNIT_CASE(kmalloc_oob_left),
849 KUNIT_CASE(kmalloc_node_oob_right),
850 KUNIT_CASE(kmalloc_pagealloc_oob_right),
851 KUNIT_CASE(kmalloc_pagealloc_uaf),
852 KUNIT_CASE(kmalloc_pagealloc_invalid_free),
853 KUNIT_CASE(kmalloc_large_oob_right),
854 KUNIT_CASE(kmalloc_oob_krealloc_more),
855 KUNIT_CASE(kmalloc_oob_krealloc_less),
856 KUNIT_CASE(kmalloc_oob_16),
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800857 KUNIT_CASE(kmalloc_uaf_16),
Patricia Alfonso73228c72020-10-13 16:55:06 -0700858 KUNIT_CASE(kmalloc_oob_in_memset),
859 KUNIT_CASE(kmalloc_oob_memset_2),
860 KUNIT_CASE(kmalloc_oob_memset_4),
861 KUNIT_CASE(kmalloc_oob_memset_8),
862 KUNIT_CASE(kmalloc_oob_memset_16),
863 KUNIT_CASE(kmalloc_memmove_invalid_size),
864 KUNIT_CASE(kmalloc_uaf),
865 KUNIT_CASE(kmalloc_uaf_memset),
866 KUNIT_CASE(kmalloc_uaf2),
867 KUNIT_CASE(kfree_via_page),
868 KUNIT_CASE(kfree_via_phys),
869 KUNIT_CASE(kmem_cache_oob),
870 KUNIT_CASE(memcg_accounted_kmem_cache),
871 KUNIT_CASE(kasan_global_oob),
872 KUNIT_CASE(kasan_stack_oob),
873 KUNIT_CASE(kasan_alloca_oob_left),
874 KUNIT_CASE(kasan_alloca_oob_right),
875 KUNIT_CASE(ksize_unpoisons_memory),
876 KUNIT_CASE(kmem_cache_double_free),
877 KUNIT_CASE(kmem_cache_invalid_free),
878 KUNIT_CASE(kasan_memchr),
879 KUNIT_CASE(kasan_memcmp),
880 KUNIT_CASE(kasan_strings),
Andrey Konovalov58b999d2020-11-01 17:07:37 -0800881 KUNIT_CASE(kasan_bitops_generic),
882 KUNIT_CASE(kasan_bitops_tags),
Patricia Alfonso73228c72020-10-13 16:55:06 -0700883 KUNIT_CASE(kmalloc_double_kzfree),
884 KUNIT_CASE(vmalloc_oob),
Andrey Konovalov573a4802021-02-24 12:05:21 -0800885 KUNIT_CASE(match_all_not_assigned),
886 KUNIT_CASE(match_all_ptr_tag),
887 KUNIT_CASE(match_all_mem_tag),
Patricia Alfonso73228c72020-10-13 16:55:06 -0700888 {}
889};
Walter Wu387d6e42020-08-06 23:24:42 -0700890
Patricia Alfonso73228c72020-10-13 16:55:06 -0700891static struct kunit_suite kasan_kunit_test_suite = {
892 .name = "kasan",
893 .init = kasan_test_init,
894 .test_cases = kasan_kunit_test_cases,
895 .exit = kasan_test_exit,
896};
Walter Wu387d6e42020-08-06 23:24:42 -0700897
Patricia Alfonso73228c72020-10-13 16:55:06 -0700898kunit_test_suite(kasan_kunit_test_suite);
Walter Wu387d6e42020-08-06 23:24:42 -0700899
Andrey Ryabinin3f158012015-02-13 14:39:53 -0800900MODULE_LICENSE("GPL");