blob: a22b1af85577d69b9df1f404a010eec6df4d000a [file] [log] [blame]
Marco Elverbc8fbc52021-02-25 17:19:31 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Test cases for KFENCE memory safety error detector. Since the interface with
4 * which KFENCE's reports are obtained is via the console, this is the output we
5 * should verify. For each test case checks the presence (or absence) of
6 * generated reports. Relies on 'console' tracepoint to capture reports as they
7 * appear in the kernel log.
8 *
9 * Copyright (C) 2020, Google LLC.
10 * Author: Alexander Potapenko <glider@google.com>
11 * Marco Elver <elver@google.com>
12 */
13
14#include <kunit/test.h>
15#include <linux/jiffies.h>
16#include <linux/kernel.h>
17#include <linux/kfence.h>
18#include <linux/mm.h>
19#include <linux/random.h>
20#include <linux/slab.h>
21#include <linux/spinlock.h>
22#include <linux/string.h>
23#include <linux/tracepoint.h>
24#include <trace/events/printk.h>
25
Sven Schnellef99e12b2021-07-28 21:02:52 +020026#include <asm/kfence.h>
27
Marco Elverbc8fbc52021-02-25 17:19:31 -080028#include "kfence.h"
29
Sven Schnellef99e12b2021-07-28 21:02:52 +020030/* May be overridden by <asm/kfence.h>. */
31#ifndef arch_kfence_test_address
32#define arch_kfence_test_address(addr) (addr)
33#endif
34
Marco Elverf51733e2021-11-05 13:45:40 -070035#define KFENCE_TEST_REQUIRES(test, cond) do { \
36 if (!(cond)) \
37 kunit_skip((test), "Test requires: " #cond); \
38} while (0)
39
Marco Elverbc8fbc52021-02-25 17:19:31 -080040/* Report as observed from console. */
41static struct {
42 spinlock_t lock;
43 int nlines;
44 char lines[2][256];
45} observed = {
46 .lock = __SPIN_LOCK_UNLOCKED(observed.lock),
47};
48
49/* Probe for console output: obtains observed lines of interest. */
50static void probe_console(void *ignore, const char *buf, size_t len)
51{
52 unsigned long flags;
53 int nlines;
54
55 spin_lock_irqsave(&observed.lock, flags);
56 nlines = observed.nlines;
57
58 if (strnstr(buf, "BUG: KFENCE: ", len) && strnstr(buf, "test_", len)) {
59 /*
60 * KFENCE report and related to the test.
61 *
62 * The provided @buf is not NUL-terminated; copy no more than
63 * @len bytes and let strscpy() add the missing NUL-terminator.
64 */
65 strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0])));
66 nlines = 1;
67 } else if (nlines == 1 && (strnstr(buf, "at 0x", len) || strnstr(buf, "of 0x", len))) {
68 strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0])));
69 }
70
71 WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */
72 spin_unlock_irqrestore(&observed.lock, flags);
73}
74
75/* Check if a report related to the test exists. */
76static bool report_available(void)
77{
78 return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
79}
80
81/* Information we expect in a report. */
82struct expect_report {
83 enum kfence_error_type type; /* The type or error. */
84 void *fn; /* Function pointer to expected function where access occurred. */
85 char *addr; /* Address at which the bad access occurred. */
86 bool is_write; /* Is access a write. */
87};
88
89static const char *get_access_type(const struct expect_report *r)
90{
91 return r->is_write ? "write" : "read";
92}
93
94/* Check observed report matches information in @r. */
95static bool report_matches(const struct expect_report *r)
96{
Sven Schnellef99e12b2021-07-28 21:02:52 +020097 unsigned long addr = (unsigned long)r->addr;
Marco Elverbc8fbc52021-02-25 17:19:31 -080098 bool ret = false;
99 unsigned long flags;
100 typeof(observed.lines) expect;
101 const char *end;
102 char *cur;
103
104 /* Doubled-checked locking. */
105 if (!report_available())
106 return false;
107
108 /* Generate expected report contents. */
109
110 /* Title */
111 cur = expect[0];
112 end = &expect[0][sizeof(expect[0]) - 1];
113 switch (r->type) {
114 case KFENCE_ERROR_OOB:
115 cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds %s",
116 get_access_type(r));
117 break;
118 case KFENCE_ERROR_UAF:
119 cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free %s",
120 get_access_type(r));
121 break;
122 case KFENCE_ERROR_CORRUPTION:
123 cur += scnprintf(cur, end - cur, "BUG: KFENCE: memory corruption");
124 break;
125 case KFENCE_ERROR_INVALID:
126 cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid %s",
127 get_access_type(r));
128 break;
129 case KFENCE_ERROR_INVALID_FREE:
130 cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid free");
131 break;
132 }
133
134 scnprintf(cur, end - cur, " in %pS", r->fn);
135 /* The exact offset won't match, remove it; also strip module name. */
136 cur = strchr(expect[0], '+');
137 if (cur)
138 *cur = '\0';
139
140 /* Access information */
141 cur = expect[1];
142 end = &expect[1][sizeof(expect[1]) - 1];
143
144 switch (r->type) {
145 case KFENCE_ERROR_OOB:
146 cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
Sven Schnellef99e12b2021-07-28 21:02:52 +0200147 addr = arch_kfence_test_address(addr);
Marco Elverbc8fbc52021-02-25 17:19:31 -0800148 break;
149 case KFENCE_ERROR_UAF:
150 cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
Sven Schnellef99e12b2021-07-28 21:02:52 +0200151 addr = arch_kfence_test_address(addr);
Marco Elverbc8fbc52021-02-25 17:19:31 -0800152 break;
153 case KFENCE_ERROR_CORRUPTION:
154 cur += scnprintf(cur, end - cur, "Corrupted memory at");
155 break;
156 case KFENCE_ERROR_INVALID:
157 cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
Sven Schnellef99e12b2021-07-28 21:02:52 +0200158 addr = arch_kfence_test_address(addr);
Marco Elverbc8fbc52021-02-25 17:19:31 -0800159 break;
160 case KFENCE_ERROR_INVALID_FREE:
161 cur += scnprintf(cur, end - cur, "Invalid free of");
162 break;
163 }
164
Sven Schnellef99e12b2021-07-28 21:02:52 +0200165 cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr);
Marco Elverbc8fbc52021-02-25 17:19:31 -0800166
167 spin_lock_irqsave(&observed.lock, flags);
168 if (!report_available())
169 goto out; /* A new report is being captured. */
170
171 /* Finally match expected output to what we actually observed. */
172 ret = strstr(observed.lines[0], expect[0]) && strstr(observed.lines[1], expect[1]);
173out:
174 spin_unlock_irqrestore(&observed.lock, flags);
175 return ret;
176}
177
178/* ===== Test cases ===== */
179
180#define TEST_PRIV_WANT_MEMCACHE ((void *)1)
181
182/* Cache used by tests; if NULL, allocate from kmalloc instead. */
183static struct kmem_cache *test_cache;
184
185static size_t setup_test_cache(struct kunit *test, size_t size, slab_flags_t flags,
186 void (*ctor)(void *))
187{
188 if (test->priv != TEST_PRIV_WANT_MEMCACHE)
189 return size;
190
191 kunit_info(test, "%s: size=%zu, ctor=%ps\n", __func__, size, ctor);
192
193 /*
194 * Use SLAB_NOLEAKTRACE to prevent merging with existing caches. Any
195 * other flag in SLAB_NEVER_MERGE also works. Use SLAB_ACCOUNT to
196 * allocate via memcg, if enabled.
197 */
198 flags |= SLAB_NOLEAKTRACE | SLAB_ACCOUNT;
199 test_cache = kmem_cache_create("test", size, 1, flags, ctor);
200 KUNIT_ASSERT_TRUE_MSG(test, test_cache, "could not create cache");
201
202 return size;
203}
204
205static void test_cache_destroy(void)
206{
207 if (!test_cache)
208 return;
209
210 kmem_cache_destroy(test_cache);
211 test_cache = NULL;
212}
213
214static inline size_t kmalloc_cache_alignment(size_t size)
215{
Hyeonggon Yoo588c7fa2021-06-28 19:34:39 -0700216 return kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)]->align;
Marco Elverbc8fbc52021-02-25 17:19:31 -0800217}
218
219/* Must always inline to match stack trace against caller. */
220static __always_inline void test_free(void *ptr)
221{
222 if (test_cache)
223 kmem_cache_free(test_cache, ptr);
224 else
225 kfree(ptr);
226}
227
228/*
229 * If this should be a KFENCE allocation, and on which side the allocation and
230 * the closest guard page should be.
231 */
232enum allocation_policy {
233 ALLOCATE_ANY, /* KFENCE, any side. */
234 ALLOCATE_LEFT, /* KFENCE, left side of page. */
235 ALLOCATE_RIGHT, /* KFENCE, right side of page. */
236 ALLOCATE_NONE, /* No KFENCE allocation. */
237};
238
239/*
240 * Try to get a guarded allocation from KFENCE. Uses either kmalloc() or the
241 * current test_cache if set up.
242 */
243static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy)
244{
245 void *alloc;
246 unsigned long timeout, resched_after;
247 const char *policy_name;
248
249 switch (policy) {
250 case ALLOCATE_ANY:
251 policy_name = "any";
252 break;
253 case ALLOCATE_LEFT:
254 policy_name = "left";
255 break;
256 case ALLOCATE_RIGHT:
257 policy_name = "right";
258 break;
259 case ALLOCATE_NONE:
260 policy_name = "none";
261 break;
262 }
263
264 kunit_info(test, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__, size, gfp,
265 policy_name, !!test_cache);
266
267 /*
268 * 100x the sample interval should be more than enough to ensure we get
269 * a KFENCE allocation eventually.
270 */
271 timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
272 /*
273 * Especially for non-preemption kernels, ensure the allocation-gate
274 * timer can catch up: after @resched_after, every failed allocation
275 * attempt yields, to ensure the allocation-gate timer is scheduled.
276 */
277 resched_after = jiffies + msecs_to_jiffies(CONFIG_KFENCE_SAMPLE_INTERVAL);
278 do {
279 if (test_cache)
280 alloc = kmem_cache_alloc(test_cache, gfp);
281 else
282 alloc = kmalloc(size, gfp);
283
284 if (is_kfence_address(alloc)) {
Vlastimil Babka8dae0cf2021-11-03 18:19:48 +0100285 struct slab *slab = virt_to_slab(alloc);
Hyeonggon Yoo588c7fa2021-06-28 19:34:39 -0700286 struct kmem_cache *s = test_cache ?:
287 kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)];
Marco Elverbc8fbc52021-02-25 17:19:31 -0800288
289 /*
290 * Verify that various helpers return the right values
291 * even for KFENCE objects; these are required so that
292 * memcg accounting works correctly.
293 */
Vlastimil Babka8dae0cf2021-11-03 18:19:48 +0100294 KUNIT_EXPECT_EQ(test, obj_to_index(s, slab, alloc), 0U);
295 KUNIT_EXPECT_EQ(test, objs_per_slab(s, slab), 1);
Marco Elverbc8fbc52021-02-25 17:19:31 -0800296
297 if (policy == ALLOCATE_ANY)
298 return alloc;
299 if (policy == ALLOCATE_LEFT && IS_ALIGNED((unsigned long)alloc, PAGE_SIZE))
300 return alloc;
301 if (policy == ALLOCATE_RIGHT &&
302 !IS_ALIGNED((unsigned long)alloc, PAGE_SIZE))
303 return alloc;
304 } else if (policy == ALLOCATE_NONE)
305 return alloc;
306
307 test_free(alloc);
308
309 if (time_after(jiffies, resched_after))
310 cond_resched();
311 } while (time_before(jiffies, timeout));
312
313 KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate from KFENCE");
314 return NULL; /* Unreachable. */
315}
316
317static void test_out_of_bounds_read(struct kunit *test)
318{
319 size_t size = 32;
320 struct expect_report expect = {
321 .type = KFENCE_ERROR_OOB,
322 .fn = test_out_of_bounds_read,
323 .is_write = false,
324 };
325 char *buf;
326
327 setup_test_cache(test, size, 0, NULL);
328
329 /*
330 * If we don't have our own cache, adjust based on alignment, so that we
331 * actually access guard pages on either side.
332 */
333 if (!test_cache)
334 size = kmalloc_cache_alignment(size);
335
336 /* Test both sides. */
337
338 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
339 expect.addr = buf - 1;
340 READ_ONCE(*expect.addr);
341 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
342 test_free(buf);
343
344 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
345 expect.addr = buf + size;
346 READ_ONCE(*expect.addr);
347 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
348 test_free(buf);
349}
350
351static void test_out_of_bounds_write(struct kunit *test)
352{
353 size_t size = 32;
354 struct expect_report expect = {
355 .type = KFENCE_ERROR_OOB,
356 .fn = test_out_of_bounds_write,
357 .is_write = true,
358 };
359 char *buf;
360
361 setup_test_cache(test, size, 0, NULL);
362 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
363 expect.addr = buf - 1;
364 WRITE_ONCE(*expect.addr, 42);
365 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
366 test_free(buf);
367}
368
369static void test_use_after_free_read(struct kunit *test)
370{
371 const size_t size = 32;
372 struct expect_report expect = {
373 .type = KFENCE_ERROR_UAF,
374 .fn = test_use_after_free_read,
375 .is_write = false,
376 };
377
378 setup_test_cache(test, size, 0, NULL);
379 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
380 test_free(expect.addr);
381 READ_ONCE(*expect.addr);
382 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
383}
384
385static void test_double_free(struct kunit *test)
386{
387 const size_t size = 32;
388 struct expect_report expect = {
389 .type = KFENCE_ERROR_INVALID_FREE,
390 .fn = test_double_free,
391 };
392
393 setup_test_cache(test, size, 0, NULL);
394 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
395 test_free(expect.addr);
396 test_free(expect.addr); /* Double-free. */
397 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
398}
399
400static void test_invalid_addr_free(struct kunit *test)
401{
402 const size_t size = 32;
403 struct expect_report expect = {
404 .type = KFENCE_ERROR_INVALID_FREE,
405 .fn = test_invalid_addr_free,
406 };
407 char *buf;
408
409 setup_test_cache(test, size, 0, NULL);
410 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
411 expect.addr = buf + 1; /* Free on invalid address. */
412 test_free(expect.addr); /* Invalid address free. */
413 test_free(buf); /* No error. */
414 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
415}
416
417static void test_corruption(struct kunit *test)
418{
419 size_t size = 32;
420 struct expect_report expect = {
421 .type = KFENCE_ERROR_CORRUPTION,
422 .fn = test_corruption,
423 };
424 char *buf;
425
426 setup_test_cache(test, size, 0, NULL);
427
428 /* Test both sides. */
429
430 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
431 expect.addr = buf + size;
432 WRITE_ONCE(*expect.addr, 42);
433 test_free(buf);
434 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
435
436 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
437 expect.addr = buf - 1;
438 WRITE_ONCE(*expect.addr, 42);
439 test_free(buf);
440 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
441}
442
443/*
444 * KFENCE is unable to detect an OOB if the allocation's alignment requirements
445 * leave a gap between the object and the guard page. Specifically, an
446 * allocation of e.g. 73 bytes is aligned on 8 and 128 bytes for SLUB or SLAB
447 * respectively. Therefore it is impossible for the allocated object to
448 * contiguously line up with the right guard page.
449 *
450 * However, we test that an access to memory beyond the gap results in KFENCE
451 * detecting an OOB access.
452 */
453static void test_kmalloc_aligned_oob_read(struct kunit *test)
454{
455 const size_t size = 73;
456 const size_t align = kmalloc_cache_alignment(size);
457 struct expect_report expect = {
458 .type = KFENCE_ERROR_OOB,
459 .fn = test_kmalloc_aligned_oob_read,
460 .is_write = false,
461 };
462 char *buf;
463
464 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
465
466 /*
467 * The object is offset to the right, so there won't be an OOB to the
468 * left of it.
469 */
470 READ_ONCE(*(buf - 1));
471 KUNIT_EXPECT_FALSE(test, report_available());
472
473 /*
474 * @buf must be aligned on @align, therefore buf + size belongs to the
475 * same page -> no OOB.
476 */
477 READ_ONCE(*(buf + size));
478 KUNIT_EXPECT_FALSE(test, report_available());
479
480 /* Overflowing by @align bytes will result in an OOB. */
481 expect.addr = buf + size + align;
482 READ_ONCE(*expect.addr);
483 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
484
485 test_free(buf);
486}
487
488static void test_kmalloc_aligned_oob_write(struct kunit *test)
489{
490 const size_t size = 73;
491 struct expect_report expect = {
492 .type = KFENCE_ERROR_CORRUPTION,
493 .fn = test_kmalloc_aligned_oob_write,
494 };
495 char *buf;
496
497 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
498 /*
499 * The object is offset to the right, so we won't get a page
500 * fault immediately after it.
501 */
502 expect.addr = buf + size;
503 WRITE_ONCE(*expect.addr, READ_ONCE(*expect.addr) + 1);
504 KUNIT_EXPECT_FALSE(test, report_available());
505 test_free(buf);
506 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
507}
508
509/* Test cache shrinking and destroying with KFENCE. */
510static void test_shrink_memcache(struct kunit *test)
511{
512 const size_t size = 32;
513 void *buf;
514
515 setup_test_cache(test, size, 0, NULL);
516 KUNIT_EXPECT_TRUE(test, test_cache);
517 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
518 kmem_cache_shrink(test_cache);
519 test_free(buf);
520
521 KUNIT_EXPECT_FALSE(test, report_available());
522}
523
524static void ctor_set_x(void *obj)
525{
526 /* Every object has at least 8 bytes. */
527 memset(obj, 'x', 8);
528}
529
530/* Ensure that SL*B does not modify KFENCE objects on bulk free. */
531static void test_free_bulk(struct kunit *test)
532{
533 int iter;
534
535 for (iter = 0; iter < 5; iter++) {
536 const size_t size = setup_test_cache(test, 8 + prandom_u32_max(300), 0,
537 (iter & 1) ? ctor_set_x : NULL);
538 void *objects[] = {
539 test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
540 test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
541 test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT),
542 test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
543 test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
544 };
545
546 kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects);
547 KUNIT_ASSERT_FALSE(test, report_available());
548 test_cache_destroy();
549 }
550}
551
552/* Test init-on-free works. */
553static void test_init_on_free(struct kunit *test)
554{
555 const size_t size = 32;
556 struct expect_report expect = {
557 .type = KFENCE_ERROR_UAF,
558 .fn = test_init_on_free,
559 .is_write = false,
560 };
561 int i;
562
Marco Elverf51733e2021-11-05 13:45:40 -0700563 KFENCE_TEST_REQUIRES(test, IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON));
Marco Elverbc8fbc52021-02-25 17:19:31 -0800564 /* Assume it hasn't been disabled on command line. */
565
566 setup_test_cache(test, size, 0, NULL);
567 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
568 for (i = 0; i < size; i++)
569 expect.addr[i] = i + 1;
570 test_free(expect.addr);
571
572 for (i = 0; i < size; i++) {
573 /*
574 * This may fail if the page was recycled by KFENCE and then
575 * written to again -- this however, is near impossible with a
576 * default config.
577 */
578 KUNIT_EXPECT_EQ(test, expect.addr[i], (char)0);
579
580 if (!i) /* Only check first access to not fail test if page is ever re-protected. */
581 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
582 }
583}
584
585/* Ensure that constructors work properly. */
586static void test_memcache_ctor(struct kunit *test)
587{
588 const size_t size = 32;
589 char *buf;
590 int i;
591
592 setup_test_cache(test, size, 0, ctor_set_x);
593 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
594
595 for (i = 0; i < 8; i++)
596 KUNIT_EXPECT_EQ(test, buf[i], (char)'x');
597
598 test_free(buf);
599
600 KUNIT_EXPECT_FALSE(test, report_available());
601}
602
603/* Test that memory is zeroed if requested. */
604static void test_gfpzero(struct kunit *test)
605{
606 const size_t size = PAGE_SIZE; /* PAGE_SIZE so we can use ALLOCATE_ANY. */
607 char *buf1, *buf2;
608 int i;
609
Marco Elverf51733e2021-11-05 13:45:40 -0700610 /* Skip if we think it'd take too long. */
611 KFENCE_TEST_REQUIRES(test, CONFIG_KFENCE_SAMPLE_INTERVAL <= 100);
Marco Elverbc8fbc52021-02-25 17:19:31 -0800612
613 setup_test_cache(test, size, 0, NULL);
614 buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
615 for (i = 0; i < size; i++)
616 buf1[i] = i + 1;
617 test_free(buf1);
618
619 /* Try to get same address again -- this can take a while. */
620 for (i = 0;; i++) {
621 buf2 = test_alloc(test, size, GFP_KERNEL | __GFP_ZERO, ALLOCATE_ANY);
622 if (buf1 == buf2)
623 break;
624 test_free(buf2);
625
626 if (i == CONFIG_KFENCE_NUM_OBJECTS) {
627 kunit_warn(test, "giving up ... cannot get same object back\n");
628 return;
629 }
630 }
631
632 for (i = 0; i < size; i++)
633 KUNIT_EXPECT_EQ(test, buf2[i], (char)0);
634
635 test_free(buf2);
636
637 KUNIT_EXPECT_FALSE(test, report_available());
638}
639
640static void test_invalid_access(struct kunit *test)
641{
642 const struct expect_report expect = {
643 .type = KFENCE_ERROR_INVALID,
644 .fn = test_invalid_access,
645 .addr = &__kfence_pool[10],
646 .is_write = false,
647 };
648
649 READ_ONCE(__kfence_pool[10]);
650 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
651}
652
653/* Test SLAB_TYPESAFE_BY_RCU works. */
654static void test_memcache_typesafe_by_rcu(struct kunit *test)
655{
656 const size_t size = 32;
657 struct expect_report expect = {
658 .type = KFENCE_ERROR_UAF,
659 .fn = test_memcache_typesafe_by_rcu,
660 .is_write = false,
661 };
662
663 setup_test_cache(test, size, SLAB_TYPESAFE_BY_RCU, NULL);
664 KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
665
666 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
667 *expect.addr = 42;
668
669 rcu_read_lock();
670 test_free(expect.addr);
671 KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
672 /*
673 * Up to this point, memory should not have been freed yet, and
674 * therefore there should be no KFENCE report from the above access.
675 */
676 rcu_read_unlock();
677
678 /* Above access to @expect.addr should not have generated a report! */
679 KUNIT_EXPECT_FALSE(test, report_available());
680
681 /* Only after rcu_barrier() is the memory guaranteed to be freed. */
682 rcu_barrier();
683
684 /* Expect use-after-free. */
685 KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
686 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
687}
688
689/* Test krealloc(). */
690static void test_krealloc(struct kunit *test)
691{
692 const size_t size = 32;
693 const struct expect_report expect = {
694 .type = KFENCE_ERROR_UAF,
695 .fn = test_krealloc,
696 .addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY),
697 .is_write = false,
698 };
699 char *buf = expect.addr;
700 int i;
701
702 KUNIT_EXPECT_FALSE(test, test_cache);
703 KUNIT_EXPECT_EQ(test, ksize(buf), size); /* Precise size match after KFENCE alloc. */
704 for (i = 0; i < size; i++)
705 buf[i] = i + 1;
706
707 /* Check that we successfully change the size. */
708 buf = krealloc(buf, size * 3, GFP_KERNEL); /* Grow. */
709 /* Note: Might no longer be a KFENCE alloc. */
710 KUNIT_EXPECT_GE(test, ksize(buf), size * 3);
711 for (i = 0; i < size; i++)
712 KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
713 for (; i < size * 3; i++) /* Fill to extra bytes. */
714 buf[i] = i + 1;
715
716 buf = krealloc(buf, size * 2, GFP_KERNEL); /* Shrink. */
717 KUNIT_EXPECT_GE(test, ksize(buf), size * 2);
718 for (i = 0; i < size * 2; i++)
719 KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
720
721 buf = krealloc(buf, 0, GFP_KERNEL); /* Free. */
722 KUNIT_EXPECT_EQ(test, (unsigned long)buf, (unsigned long)ZERO_SIZE_PTR);
723 KUNIT_ASSERT_FALSE(test, report_available()); /* No reports yet! */
724
725 READ_ONCE(*expect.addr); /* Ensure krealloc() actually freed earlier KFENCE object. */
726 KUNIT_ASSERT_TRUE(test, report_matches(&expect));
727}
728
729/* Test that some objects from a bulk allocation belong to KFENCE pool. */
730static void test_memcache_alloc_bulk(struct kunit *test)
731{
732 const size_t size = 32;
733 bool pass = false;
734 unsigned long timeout;
735
736 setup_test_cache(test, size, 0, NULL);
737 KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
738 /*
739 * 100x the sample interval should be more than enough to ensure we get
740 * a KFENCE allocation eventually.
741 */
742 timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
743 do {
744 void *objects[100];
745 int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
746 objects);
747 if (!num)
748 continue;
749 for (i = 0; i < ARRAY_SIZE(objects); i++) {
750 if (is_kfence_address(objects[i])) {
751 pass = true;
752 break;
753 }
754 }
755 kmem_cache_free_bulk(test_cache, num, objects);
756 /*
757 * kmem_cache_alloc_bulk() disables interrupts, and calling it
758 * in a tight loop may not give KFENCE a chance to switch the
759 * static branch. Call cond_resched() to let KFENCE chime in.
760 */
761 cond_resched();
762 } while (!pass && time_before(jiffies, timeout));
763
764 KUNIT_EXPECT_TRUE(test, pass);
765 KUNIT_EXPECT_FALSE(test, report_available());
766}
767
768/*
769 * KUnit does not provide a way to provide arguments to tests, and we encode
770 * additional info in the name. Set up 2 tests per test case, one using the
771 * default allocator, and another using a custom memcache (suffix '-memcache').
772 */
773#define KFENCE_KUNIT_CASE(test_name) \
774 { .run_case = test_name, .name = #test_name }, \
775 { .run_case = test_name, .name = #test_name "-memcache" }
776
777static struct kunit_case kfence_test_cases[] = {
778 KFENCE_KUNIT_CASE(test_out_of_bounds_read),
779 KFENCE_KUNIT_CASE(test_out_of_bounds_write),
780 KFENCE_KUNIT_CASE(test_use_after_free_read),
781 KFENCE_KUNIT_CASE(test_double_free),
782 KFENCE_KUNIT_CASE(test_invalid_addr_free),
783 KFENCE_KUNIT_CASE(test_corruption),
784 KFENCE_KUNIT_CASE(test_free_bulk),
785 KFENCE_KUNIT_CASE(test_init_on_free),
786 KUNIT_CASE(test_kmalloc_aligned_oob_read),
787 KUNIT_CASE(test_kmalloc_aligned_oob_write),
788 KUNIT_CASE(test_shrink_memcache),
789 KUNIT_CASE(test_memcache_ctor),
790 KUNIT_CASE(test_invalid_access),
791 KUNIT_CASE(test_gfpzero),
792 KUNIT_CASE(test_memcache_typesafe_by_rcu),
793 KUNIT_CASE(test_krealloc),
794 KUNIT_CASE(test_memcache_alloc_bulk),
795 {},
796};
797
798/* ===== End test cases ===== */
799
800static int test_init(struct kunit *test)
801{
802 unsigned long flags;
803 int i;
804
Marco Elverc40c6e52021-09-07 19:56:24 -0700805 if (!__kfence_pool)
806 return -EINVAL;
807
Marco Elverbc8fbc52021-02-25 17:19:31 -0800808 spin_lock_irqsave(&observed.lock, flags);
809 for (i = 0; i < ARRAY_SIZE(observed.lines); i++)
810 observed.lines[i][0] = '\0';
811 observed.nlines = 0;
812 spin_unlock_irqrestore(&observed.lock, flags);
813
814 /* Any test with 'memcache' in its name will want a memcache. */
815 if (strstr(test->name, "memcache"))
816 test->priv = TEST_PRIV_WANT_MEMCACHE;
817 else
818 test->priv = NULL;
819
820 return 0;
821}
822
823static void test_exit(struct kunit *test)
824{
825 test_cache_destroy();
826}
827
828static struct kunit_suite kfence_test_suite = {
829 .name = "kfence",
830 .test_cases = kfence_test_cases,
831 .init = test_init,
832 .exit = test_exit,
833};
834static struct kunit_suite *kfence_test_suites[] = { &kfence_test_suite, NULL };
835
836static void register_tracepoints(struct tracepoint *tp, void *ignore)
837{
838 check_trace_callback_type_console(probe_console);
839 if (!strcmp(tp->name, "console"))
840 WARN_ON(tracepoint_probe_register(tp, probe_console, NULL));
841}
842
843static void unregister_tracepoints(struct tracepoint *tp, void *ignore)
844{
845 if (!strcmp(tp->name, "console"))
846 tracepoint_probe_unregister(tp, probe_console, NULL);
847}
848
849/*
850 * We only want to do tracepoints setup and teardown once, therefore we have to
851 * customize the init and exit functions and cannot rely on kunit_test_suite().
852 */
853static int __init kfence_test_init(void)
854{
855 /*
856 * Because we want to be able to build the test as a module, we need to
857 * iterate through all known tracepoints, since the static registration
858 * won't work here.
859 */
860 for_each_kernel_tracepoint(register_tracepoints, NULL);
861 return __kunit_test_suites_init(kfence_test_suites);
862}
863
864static void kfence_test_exit(void)
865{
866 __kunit_test_suites_exit(kfence_test_suites);
867 for_each_kernel_tracepoint(unregister_tracepoints, NULL);
868 tracepoint_synchronize_unregister();
869}
870
Weizhao Ouyang32ae8a02021-07-23 15:50:08 -0700871late_initcall_sync(kfence_test_init);
Marco Elverbc8fbc52021-02-25 17:19:31 -0800872module_exit(kfence_test_exit);
873
874MODULE_LICENSE("GPL v2");
875MODULE_AUTHOR("Alexander Potapenko <glider@google.com>, Marco Elver <elver@google.com>");