blob: db1bb596acaf90f52906f68963f3a93bffdd46e9 [file] [log] [blame]
Marco Elverbc8fbc52021-02-25 17:19:31 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Test cases for KFENCE memory safety error detector. Since the interface with
4 * which KFENCE's reports are obtained is via the console, this is the output we
5 * should verify. For each test case checks the presence (or absence) of
6 * generated reports. Relies on 'console' tracepoint to capture reports as they
7 * appear in the kernel log.
8 *
9 * Copyright (C) 2020, Google LLC.
10 * Author: Alexander Potapenko <glider@google.com>
11 * Marco Elver <elver@google.com>
12 */
13
14#include <kunit/test.h>
15#include <linux/jiffies.h>
16#include <linux/kernel.h>
17#include <linux/kfence.h>
18#include <linux/mm.h>
19#include <linux/random.h>
20#include <linux/slab.h>
21#include <linux/spinlock.h>
22#include <linux/string.h>
23#include <linux/tracepoint.h>
24#include <trace/events/printk.h>
25
26#include "kfence.h"
27
28/* Report as observed from console. */
29static struct {
30 spinlock_t lock;
31 int nlines;
32 char lines[2][256];
33} observed = {
34 .lock = __SPIN_LOCK_UNLOCKED(observed.lock),
35};
36
37/* Probe for console output: obtains observed lines of interest. */
38static void probe_console(void *ignore, const char *buf, size_t len)
39{
40 unsigned long flags;
41 int nlines;
42
43 spin_lock_irqsave(&observed.lock, flags);
44 nlines = observed.nlines;
45
46 if (strnstr(buf, "BUG: KFENCE: ", len) && strnstr(buf, "test_", len)) {
47 /*
48 * KFENCE report and related to the test.
49 *
50 * The provided @buf is not NUL-terminated; copy no more than
51 * @len bytes and let strscpy() add the missing NUL-terminator.
52 */
53 strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0])));
54 nlines = 1;
55 } else if (nlines == 1 && (strnstr(buf, "at 0x", len) || strnstr(buf, "of 0x", len))) {
56 strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0])));
57 }
58
59 WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */
60 spin_unlock_irqrestore(&observed.lock, flags);
61}
62
63/* Check if a report related to the test exists. */
64static bool report_available(void)
65{
66 return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
67}
68
69/* Information we expect in a report. */
70struct expect_report {
71 enum kfence_error_type type; /* The type or error. */
72 void *fn; /* Function pointer to expected function where access occurred. */
73 char *addr; /* Address at which the bad access occurred. */
74 bool is_write; /* Is access a write. */
75};
76
77static const char *get_access_type(const struct expect_report *r)
78{
79 return r->is_write ? "write" : "read";
80}
81
82/* Check observed report matches information in @r. */
83static bool report_matches(const struct expect_report *r)
84{
85 bool ret = false;
86 unsigned long flags;
87 typeof(observed.lines) expect;
88 const char *end;
89 char *cur;
90
91 /* Doubled-checked locking. */
92 if (!report_available())
93 return false;
94
95 /* Generate expected report contents. */
96
97 /* Title */
98 cur = expect[0];
99 end = &expect[0][sizeof(expect[0]) - 1];
100 switch (r->type) {
101 case KFENCE_ERROR_OOB:
102 cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds %s",
103 get_access_type(r));
104 break;
105 case KFENCE_ERROR_UAF:
106 cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free %s",
107 get_access_type(r));
108 break;
109 case KFENCE_ERROR_CORRUPTION:
110 cur += scnprintf(cur, end - cur, "BUG: KFENCE: memory corruption");
111 break;
112 case KFENCE_ERROR_INVALID:
113 cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid %s",
114 get_access_type(r));
115 break;
116 case KFENCE_ERROR_INVALID_FREE:
117 cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid free");
118 break;
119 }
120
121 scnprintf(cur, end - cur, " in %pS", r->fn);
122 /* The exact offset won't match, remove it; also strip module name. */
123 cur = strchr(expect[0], '+');
124 if (cur)
125 *cur = '\0';
126
127 /* Access information */
128 cur = expect[1];
129 end = &expect[1][sizeof(expect[1]) - 1];
130
131 switch (r->type) {
132 case KFENCE_ERROR_OOB:
133 cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
134 break;
135 case KFENCE_ERROR_UAF:
136 cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
137 break;
138 case KFENCE_ERROR_CORRUPTION:
139 cur += scnprintf(cur, end - cur, "Corrupted memory at");
140 break;
141 case KFENCE_ERROR_INVALID:
142 cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
143 break;
144 case KFENCE_ERROR_INVALID_FREE:
145 cur += scnprintf(cur, end - cur, "Invalid free of");
146 break;
147 }
148
149 cur += scnprintf(cur, end - cur, " 0x" PTR_FMT, (void *)r->addr);
150
151 spin_lock_irqsave(&observed.lock, flags);
152 if (!report_available())
153 goto out; /* A new report is being captured. */
154
155 /* Finally match expected output to what we actually observed. */
156 ret = strstr(observed.lines[0], expect[0]) && strstr(observed.lines[1], expect[1]);
157out:
158 spin_unlock_irqrestore(&observed.lock, flags);
159 return ret;
160}
161
162/* ===== Test cases ===== */
163
164#define TEST_PRIV_WANT_MEMCACHE ((void *)1)
165
166/* Cache used by tests; if NULL, allocate from kmalloc instead. */
167static struct kmem_cache *test_cache;
168
169static size_t setup_test_cache(struct kunit *test, size_t size, slab_flags_t flags,
170 void (*ctor)(void *))
171{
172 if (test->priv != TEST_PRIV_WANT_MEMCACHE)
173 return size;
174
175 kunit_info(test, "%s: size=%zu, ctor=%ps\n", __func__, size, ctor);
176
177 /*
178 * Use SLAB_NOLEAKTRACE to prevent merging with existing caches. Any
179 * other flag in SLAB_NEVER_MERGE also works. Use SLAB_ACCOUNT to
180 * allocate via memcg, if enabled.
181 */
182 flags |= SLAB_NOLEAKTRACE | SLAB_ACCOUNT;
183 test_cache = kmem_cache_create("test", size, 1, flags, ctor);
184 KUNIT_ASSERT_TRUE_MSG(test, test_cache, "could not create cache");
185
186 return size;
187}
188
189static void test_cache_destroy(void)
190{
191 if (!test_cache)
192 return;
193
194 kmem_cache_destroy(test_cache);
195 test_cache = NULL;
196}
197
198static inline size_t kmalloc_cache_alignment(size_t size)
199{
200 return kmalloc_caches[kmalloc_type(GFP_KERNEL)][kmalloc_index(size)]->align;
201}
202
203/* Must always inline to match stack trace against caller. */
204static __always_inline void test_free(void *ptr)
205{
206 if (test_cache)
207 kmem_cache_free(test_cache, ptr);
208 else
209 kfree(ptr);
210}
211
212/*
213 * If this should be a KFENCE allocation, and on which side the allocation and
214 * the closest guard page should be.
215 */
216enum allocation_policy {
217 ALLOCATE_ANY, /* KFENCE, any side. */
218 ALLOCATE_LEFT, /* KFENCE, left side of page. */
219 ALLOCATE_RIGHT, /* KFENCE, right side of page. */
220 ALLOCATE_NONE, /* No KFENCE allocation. */
221};
222
223/*
224 * Try to get a guarded allocation from KFENCE. Uses either kmalloc() or the
225 * current test_cache if set up.
226 */
227static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy)
228{
229 void *alloc;
230 unsigned long timeout, resched_after;
231 const char *policy_name;
232
233 switch (policy) {
234 case ALLOCATE_ANY:
235 policy_name = "any";
236 break;
237 case ALLOCATE_LEFT:
238 policy_name = "left";
239 break;
240 case ALLOCATE_RIGHT:
241 policy_name = "right";
242 break;
243 case ALLOCATE_NONE:
244 policy_name = "none";
245 break;
246 }
247
248 kunit_info(test, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__, size, gfp,
249 policy_name, !!test_cache);
250
251 /*
252 * 100x the sample interval should be more than enough to ensure we get
253 * a KFENCE allocation eventually.
254 */
255 timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
256 /*
257 * Especially for non-preemption kernels, ensure the allocation-gate
258 * timer can catch up: after @resched_after, every failed allocation
259 * attempt yields, to ensure the allocation-gate timer is scheduled.
260 */
261 resched_after = jiffies + msecs_to_jiffies(CONFIG_KFENCE_SAMPLE_INTERVAL);
262 do {
263 if (test_cache)
264 alloc = kmem_cache_alloc(test_cache, gfp);
265 else
266 alloc = kmalloc(size, gfp);
267
268 if (is_kfence_address(alloc)) {
269 struct page *page = virt_to_head_page(alloc);
270 struct kmem_cache *s = test_cache ?: kmalloc_caches[kmalloc_type(GFP_KERNEL)][kmalloc_index(size)];
271
272 /*
273 * Verify that various helpers return the right values
274 * even for KFENCE objects; these are required so that
275 * memcg accounting works correctly.
276 */
277 KUNIT_EXPECT_EQ(test, obj_to_index(s, page, alloc), 0U);
278 KUNIT_EXPECT_EQ(test, objs_per_slab_page(s, page), 1);
279
280 if (policy == ALLOCATE_ANY)
281 return alloc;
282 if (policy == ALLOCATE_LEFT && IS_ALIGNED((unsigned long)alloc, PAGE_SIZE))
283 return alloc;
284 if (policy == ALLOCATE_RIGHT &&
285 !IS_ALIGNED((unsigned long)alloc, PAGE_SIZE))
286 return alloc;
287 } else if (policy == ALLOCATE_NONE)
288 return alloc;
289
290 test_free(alloc);
291
292 if (time_after(jiffies, resched_after))
293 cond_resched();
294 } while (time_before(jiffies, timeout));
295
296 KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate from KFENCE");
297 return NULL; /* Unreachable. */
298}
299
300static void test_out_of_bounds_read(struct kunit *test)
301{
302 size_t size = 32;
303 struct expect_report expect = {
304 .type = KFENCE_ERROR_OOB,
305 .fn = test_out_of_bounds_read,
306 .is_write = false,
307 };
308 char *buf;
309
310 setup_test_cache(test, size, 0, NULL);
311
312 /*
313 * If we don't have our own cache, adjust based on alignment, so that we
314 * actually access guard pages on either side.
315 */
316 if (!test_cache)
317 size = kmalloc_cache_alignment(size);
318
319 /* Test both sides. */
320
321 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
322 expect.addr = buf - 1;
323 READ_ONCE(*expect.addr);
324 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
325 test_free(buf);
326
327 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
328 expect.addr = buf + size;
329 READ_ONCE(*expect.addr);
330 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
331 test_free(buf);
332}
333
334static void test_out_of_bounds_write(struct kunit *test)
335{
336 size_t size = 32;
337 struct expect_report expect = {
338 .type = KFENCE_ERROR_OOB,
339 .fn = test_out_of_bounds_write,
340 .is_write = true,
341 };
342 char *buf;
343
344 setup_test_cache(test, size, 0, NULL);
345 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
346 expect.addr = buf - 1;
347 WRITE_ONCE(*expect.addr, 42);
348 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
349 test_free(buf);
350}
351
352static void test_use_after_free_read(struct kunit *test)
353{
354 const size_t size = 32;
355 struct expect_report expect = {
356 .type = KFENCE_ERROR_UAF,
357 .fn = test_use_after_free_read,
358 .is_write = false,
359 };
360
361 setup_test_cache(test, size, 0, NULL);
362 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
363 test_free(expect.addr);
364 READ_ONCE(*expect.addr);
365 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
366}
367
368static void test_double_free(struct kunit *test)
369{
370 const size_t size = 32;
371 struct expect_report expect = {
372 .type = KFENCE_ERROR_INVALID_FREE,
373 .fn = test_double_free,
374 };
375
376 setup_test_cache(test, size, 0, NULL);
377 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
378 test_free(expect.addr);
379 test_free(expect.addr); /* Double-free. */
380 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
381}
382
383static void test_invalid_addr_free(struct kunit *test)
384{
385 const size_t size = 32;
386 struct expect_report expect = {
387 .type = KFENCE_ERROR_INVALID_FREE,
388 .fn = test_invalid_addr_free,
389 };
390 char *buf;
391
392 setup_test_cache(test, size, 0, NULL);
393 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
394 expect.addr = buf + 1; /* Free on invalid address. */
395 test_free(expect.addr); /* Invalid address free. */
396 test_free(buf); /* No error. */
397 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
398}
399
400static void test_corruption(struct kunit *test)
401{
402 size_t size = 32;
403 struct expect_report expect = {
404 .type = KFENCE_ERROR_CORRUPTION,
405 .fn = test_corruption,
406 };
407 char *buf;
408
409 setup_test_cache(test, size, 0, NULL);
410
411 /* Test both sides. */
412
413 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
414 expect.addr = buf + size;
415 WRITE_ONCE(*expect.addr, 42);
416 test_free(buf);
417 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
418
419 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
420 expect.addr = buf - 1;
421 WRITE_ONCE(*expect.addr, 42);
422 test_free(buf);
423 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
424}
425
426/*
427 * KFENCE is unable to detect an OOB if the allocation's alignment requirements
428 * leave a gap between the object and the guard page. Specifically, an
429 * allocation of e.g. 73 bytes is aligned on 8 and 128 bytes for SLUB or SLAB
430 * respectively. Therefore it is impossible for the allocated object to
431 * contiguously line up with the right guard page.
432 *
433 * However, we test that an access to memory beyond the gap results in KFENCE
434 * detecting an OOB access.
435 */
436static void test_kmalloc_aligned_oob_read(struct kunit *test)
437{
438 const size_t size = 73;
439 const size_t align = kmalloc_cache_alignment(size);
440 struct expect_report expect = {
441 .type = KFENCE_ERROR_OOB,
442 .fn = test_kmalloc_aligned_oob_read,
443 .is_write = false,
444 };
445 char *buf;
446
447 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
448
449 /*
450 * The object is offset to the right, so there won't be an OOB to the
451 * left of it.
452 */
453 READ_ONCE(*(buf - 1));
454 KUNIT_EXPECT_FALSE(test, report_available());
455
456 /*
457 * @buf must be aligned on @align, therefore buf + size belongs to the
458 * same page -> no OOB.
459 */
460 READ_ONCE(*(buf + size));
461 KUNIT_EXPECT_FALSE(test, report_available());
462
463 /* Overflowing by @align bytes will result in an OOB. */
464 expect.addr = buf + size + align;
465 READ_ONCE(*expect.addr);
466 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
467
468 test_free(buf);
469}
470
471static void test_kmalloc_aligned_oob_write(struct kunit *test)
472{
473 const size_t size = 73;
474 struct expect_report expect = {
475 .type = KFENCE_ERROR_CORRUPTION,
476 .fn = test_kmalloc_aligned_oob_write,
477 };
478 char *buf;
479
480 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
481 /*
482 * The object is offset to the right, so we won't get a page
483 * fault immediately after it.
484 */
485 expect.addr = buf + size;
486 WRITE_ONCE(*expect.addr, READ_ONCE(*expect.addr) + 1);
487 KUNIT_EXPECT_FALSE(test, report_available());
488 test_free(buf);
489 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
490}
491
492/* Test cache shrinking and destroying with KFENCE. */
493static void test_shrink_memcache(struct kunit *test)
494{
495 const size_t size = 32;
496 void *buf;
497
498 setup_test_cache(test, size, 0, NULL);
499 KUNIT_EXPECT_TRUE(test, test_cache);
500 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
501 kmem_cache_shrink(test_cache);
502 test_free(buf);
503
504 KUNIT_EXPECT_FALSE(test, report_available());
505}
506
507static void ctor_set_x(void *obj)
508{
509 /* Every object has at least 8 bytes. */
510 memset(obj, 'x', 8);
511}
512
513/* Ensure that SL*B does not modify KFENCE objects on bulk free. */
514static void test_free_bulk(struct kunit *test)
515{
516 int iter;
517
518 for (iter = 0; iter < 5; iter++) {
519 const size_t size = setup_test_cache(test, 8 + prandom_u32_max(300), 0,
520 (iter & 1) ? ctor_set_x : NULL);
521 void *objects[] = {
522 test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
523 test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
524 test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT),
525 test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
526 test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
527 };
528
529 kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects);
530 KUNIT_ASSERT_FALSE(test, report_available());
531 test_cache_destroy();
532 }
533}
534
535/* Test init-on-free works. */
536static void test_init_on_free(struct kunit *test)
537{
538 const size_t size = 32;
539 struct expect_report expect = {
540 .type = KFENCE_ERROR_UAF,
541 .fn = test_init_on_free,
542 .is_write = false,
543 };
544 int i;
545
546 if (!IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON))
547 return;
548 /* Assume it hasn't been disabled on command line. */
549
550 setup_test_cache(test, size, 0, NULL);
551 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
552 for (i = 0; i < size; i++)
553 expect.addr[i] = i + 1;
554 test_free(expect.addr);
555
556 for (i = 0; i < size; i++) {
557 /*
558 * This may fail if the page was recycled by KFENCE and then
559 * written to again -- this however, is near impossible with a
560 * default config.
561 */
562 KUNIT_EXPECT_EQ(test, expect.addr[i], (char)0);
563
564 if (!i) /* Only check first access to not fail test if page is ever re-protected. */
565 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
566 }
567}
568
569/* Ensure that constructors work properly. */
570static void test_memcache_ctor(struct kunit *test)
571{
572 const size_t size = 32;
573 char *buf;
574 int i;
575
576 setup_test_cache(test, size, 0, ctor_set_x);
577 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
578
579 for (i = 0; i < 8; i++)
580 KUNIT_EXPECT_EQ(test, buf[i], (char)'x');
581
582 test_free(buf);
583
584 KUNIT_EXPECT_FALSE(test, report_available());
585}
586
587/* Test that memory is zeroed if requested. */
588static void test_gfpzero(struct kunit *test)
589{
590 const size_t size = PAGE_SIZE; /* PAGE_SIZE so we can use ALLOCATE_ANY. */
591 char *buf1, *buf2;
592 int i;
593
594 if (CONFIG_KFENCE_SAMPLE_INTERVAL > 100) {
595 kunit_warn(test, "skipping ... would take too long\n");
596 return;
597 }
598
599 setup_test_cache(test, size, 0, NULL);
600 buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
601 for (i = 0; i < size; i++)
602 buf1[i] = i + 1;
603 test_free(buf1);
604
605 /* Try to get same address again -- this can take a while. */
606 for (i = 0;; i++) {
607 buf2 = test_alloc(test, size, GFP_KERNEL | __GFP_ZERO, ALLOCATE_ANY);
608 if (buf1 == buf2)
609 break;
610 test_free(buf2);
611
612 if (i == CONFIG_KFENCE_NUM_OBJECTS) {
613 kunit_warn(test, "giving up ... cannot get same object back\n");
614 return;
615 }
616 }
617
618 for (i = 0; i < size; i++)
619 KUNIT_EXPECT_EQ(test, buf2[i], (char)0);
620
621 test_free(buf2);
622
623 KUNIT_EXPECT_FALSE(test, report_available());
624}
625
626static void test_invalid_access(struct kunit *test)
627{
628 const struct expect_report expect = {
629 .type = KFENCE_ERROR_INVALID,
630 .fn = test_invalid_access,
631 .addr = &__kfence_pool[10],
632 .is_write = false,
633 };
634
635 READ_ONCE(__kfence_pool[10]);
636 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
637}
638
639/* Test SLAB_TYPESAFE_BY_RCU works. */
640static void test_memcache_typesafe_by_rcu(struct kunit *test)
641{
642 const size_t size = 32;
643 struct expect_report expect = {
644 .type = KFENCE_ERROR_UAF,
645 .fn = test_memcache_typesafe_by_rcu,
646 .is_write = false,
647 };
648
649 setup_test_cache(test, size, SLAB_TYPESAFE_BY_RCU, NULL);
650 KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
651
652 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
653 *expect.addr = 42;
654
655 rcu_read_lock();
656 test_free(expect.addr);
657 KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
658 /*
659 * Up to this point, memory should not have been freed yet, and
660 * therefore there should be no KFENCE report from the above access.
661 */
662 rcu_read_unlock();
663
664 /* Above access to @expect.addr should not have generated a report! */
665 KUNIT_EXPECT_FALSE(test, report_available());
666
667 /* Only after rcu_barrier() is the memory guaranteed to be freed. */
668 rcu_barrier();
669
670 /* Expect use-after-free. */
671 KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
672 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
673}
674
675/* Test krealloc(). */
676static void test_krealloc(struct kunit *test)
677{
678 const size_t size = 32;
679 const struct expect_report expect = {
680 .type = KFENCE_ERROR_UAF,
681 .fn = test_krealloc,
682 .addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY),
683 .is_write = false,
684 };
685 char *buf = expect.addr;
686 int i;
687
688 KUNIT_EXPECT_FALSE(test, test_cache);
689 KUNIT_EXPECT_EQ(test, ksize(buf), size); /* Precise size match after KFENCE alloc. */
690 for (i = 0; i < size; i++)
691 buf[i] = i + 1;
692
693 /* Check that we successfully change the size. */
694 buf = krealloc(buf, size * 3, GFP_KERNEL); /* Grow. */
695 /* Note: Might no longer be a KFENCE alloc. */
696 KUNIT_EXPECT_GE(test, ksize(buf), size * 3);
697 for (i = 0; i < size; i++)
698 KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
699 for (; i < size * 3; i++) /* Fill to extra bytes. */
700 buf[i] = i + 1;
701
702 buf = krealloc(buf, size * 2, GFP_KERNEL); /* Shrink. */
703 KUNIT_EXPECT_GE(test, ksize(buf), size * 2);
704 for (i = 0; i < size * 2; i++)
705 KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
706
707 buf = krealloc(buf, 0, GFP_KERNEL); /* Free. */
708 KUNIT_EXPECT_EQ(test, (unsigned long)buf, (unsigned long)ZERO_SIZE_PTR);
709 KUNIT_ASSERT_FALSE(test, report_available()); /* No reports yet! */
710
711 READ_ONCE(*expect.addr); /* Ensure krealloc() actually freed earlier KFENCE object. */
712 KUNIT_ASSERT_TRUE(test, report_matches(&expect));
713}
714
715/* Test that some objects from a bulk allocation belong to KFENCE pool. */
716static void test_memcache_alloc_bulk(struct kunit *test)
717{
718 const size_t size = 32;
719 bool pass = false;
720 unsigned long timeout;
721
722 setup_test_cache(test, size, 0, NULL);
723 KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
724 /*
725 * 100x the sample interval should be more than enough to ensure we get
726 * a KFENCE allocation eventually.
727 */
728 timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
729 do {
730 void *objects[100];
731 int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
732 objects);
733 if (!num)
734 continue;
735 for (i = 0; i < ARRAY_SIZE(objects); i++) {
736 if (is_kfence_address(objects[i])) {
737 pass = true;
738 break;
739 }
740 }
741 kmem_cache_free_bulk(test_cache, num, objects);
742 /*
743 * kmem_cache_alloc_bulk() disables interrupts, and calling it
744 * in a tight loop may not give KFENCE a chance to switch the
745 * static branch. Call cond_resched() to let KFENCE chime in.
746 */
747 cond_resched();
748 } while (!pass && time_before(jiffies, timeout));
749
750 KUNIT_EXPECT_TRUE(test, pass);
751 KUNIT_EXPECT_FALSE(test, report_available());
752}
753
754/*
755 * KUnit does not provide a way to provide arguments to tests, and we encode
756 * additional info in the name. Set up 2 tests per test case, one using the
757 * default allocator, and another using a custom memcache (suffix '-memcache').
758 */
759#define KFENCE_KUNIT_CASE(test_name) \
760 { .run_case = test_name, .name = #test_name }, \
761 { .run_case = test_name, .name = #test_name "-memcache" }
762
763static struct kunit_case kfence_test_cases[] = {
764 KFENCE_KUNIT_CASE(test_out_of_bounds_read),
765 KFENCE_KUNIT_CASE(test_out_of_bounds_write),
766 KFENCE_KUNIT_CASE(test_use_after_free_read),
767 KFENCE_KUNIT_CASE(test_double_free),
768 KFENCE_KUNIT_CASE(test_invalid_addr_free),
769 KFENCE_KUNIT_CASE(test_corruption),
770 KFENCE_KUNIT_CASE(test_free_bulk),
771 KFENCE_KUNIT_CASE(test_init_on_free),
772 KUNIT_CASE(test_kmalloc_aligned_oob_read),
773 KUNIT_CASE(test_kmalloc_aligned_oob_write),
774 KUNIT_CASE(test_shrink_memcache),
775 KUNIT_CASE(test_memcache_ctor),
776 KUNIT_CASE(test_invalid_access),
777 KUNIT_CASE(test_gfpzero),
778 KUNIT_CASE(test_memcache_typesafe_by_rcu),
779 KUNIT_CASE(test_krealloc),
780 KUNIT_CASE(test_memcache_alloc_bulk),
781 {},
782};
783
784/* ===== End test cases ===== */
785
786static int test_init(struct kunit *test)
787{
788 unsigned long flags;
789 int i;
790
791 spin_lock_irqsave(&observed.lock, flags);
792 for (i = 0; i < ARRAY_SIZE(observed.lines); i++)
793 observed.lines[i][0] = '\0';
794 observed.nlines = 0;
795 spin_unlock_irqrestore(&observed.lock, flags);
796
797 /* Any test with 'memcache' in its name will want a memcache. */
798 if (strstr(test->name, "memcache"))
799 test->priv = TEST_PRIV_WANT_MEMCACHE;
800 else
801 test->priv = NULL;
802
803 return 0;
804}
805
806static void test_exit(struct kunit *test)
807{
808 test_cache_destroy();
809}
810
811static struct kunit_suite kfence_test_suite = {
812 .name = "kfence",
813 .test_cases = kfence_test_cases,
814 .init = test_init,
815 .exit = test_exit,
816};
817static struct kunit_suite *kfence_test_suites[] = { &kfence_test_suite, NULL };
818
819static void register_tracepoints(struct tracepoint *tp, void *ignore)
820{
821 check_trace_callback_type_console(probe_console);
822 if (!strcmp(tp->name, "console"))
823 WARN_ON(tracepoint_probe_register(tp, probe_console, NULL));
824}
825
826static void unregister_tracepoints(struct tracepoint *tp, void *ignore)
827{
828 if (!strcmp(tp->name, "console"))
829 tracepoint_probe_unregister(tp, probe_console, NULL);
830}
831
832/*
833 * We only want to do tracepoints setup and teardown once, therefore we have to
834 * customize the init and exit functions and cannot rely on kunit_test_suite().
835 */
836static int __init kfence_test_init(void)
837{
838 /*
839 * Because we want to be able to build the test as a module, we need to
840 * iterate through all known tracepoints, since the static registration
841 * won't work here.
842 */
843 for_each_kernel_tracepoint(register_tracepoints, NULL);
844 return __kunit_test_suites_init(kfence_test_suites);
845}
846
847static void kfence_test_exit(void)
848{
849 __kunit_test_suites_exit(kfence_test_suites);
850 for_each_kernel_tracepoint(unregister_tracepoints, NULL);
851 tracepoint_synchronize_unregister();
852}
853
854late_initcall(kfence_test_init);
855module_exit(kfence_test_exit);
856
857MODULE_LICENSE("GPL v2");
858MODULE_AUTHOR("Alexander Potapenko <glider@google.com>, Marco Elver <elver@google.com>");