blob: d6a32c13336b7f4202bf2b5aac105e82c6df8637 [file] [log] [blame]
Alexander Potapenko0ce20dd2021-02-25 17:18:53 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * KFENCE guarded object allocator and fault handling.
4 *
5 * Copyright (C) 2020, Google LLC.
6 */
7
8#define pr_fmt(fmt) "kfence: " fmt
9
10#include <linux/atomic.h>
11#include <linux/bug.h>
12#include <linux/debugfs.h>
13#include <linux/kcsan-checks.h>
14#include <linux/kfence.h>
15#include <linux/list.h>
16#include <linux/lockdep.h>
17#include <linux/memblock.h>
18#include <linux/moduleparam.h>
19#include <linux/random.h>
20#include <linux/rcupdate.h>
21#include <linux/seq_file.h>
22#include <linux/slab.h>
23#include <linux/spinlock.h>
24#include <linux/string.h>
25
26#include <asm/kfence.h>
27
28#include "kfence.h"
29
30/* Disables KFENCE on the first warning assuming an irrecoverable error. */
31#define KFENCE_WARN_ON(cond) \
32 ({ \
33 const bool __cond = WARN_ON(cond); \
34 if (unlikely(__cond)) \
35 WRITE_ONCE(kfence_enabled, false); \
36 __cond; \
37 })
38
39/* === Data ================================================================= */
40
41static bool kfence_enabled __read_mostly;
42
43static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
44
45#ifdef MODULE_PARAM_PREFIX
46#undef MODULE_PARAM_PREFIX
47#endif
48#define MODULE_PARAM_PREFIX "kfence."
49
50static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
51{
52 unsigned long num;
53 int ret = kstrtoul(val, 0, &num);
54
55 if (ret < 0)
56 return ret;
57
58 if (!num) /* Using 0 to indicate KFENCE is disabled. */
59 WRITE_ONCE(kfence_enabled, false);
60 else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
61 return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */
62
63 *((unsigned long *)kp->arg) = num;
64 return 0;
65}
66
67static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
68{
69 if (!READ_ONCE(kfence_enabled))
70 return sprintf(buffer, "0\n");
71
72 return param_get_ulong(buffer, kp);
73}
74
75static const struct kernel_param_ops sample_interval_param_ops = {
76 .set = param_set_sample_interval,
77 .get = param_get_sample_interval,
78};
79module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
80
81/* The pool of pages used for guard pages and objects. */
82char *__kfence_pool __ro_after_init;
83EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
84
85/*
86 * Per-object metadata, with one-to-one mapping of object metadata to
87 * backing pages (in __kfence_pool).
88 */
89static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
90struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
91
92/* Freelist with available objects. */
93static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
94static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
95
96#ifdef CONFIG_KFENCE_STATIC_KEYS
97/* The static key to set up a KFENCE allocation. */
98DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
99#endif
100
101/* Gates the allocation, ensuring only one succeeds in a given period. */
102atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
103
104/* Statistics counters for debugfs. */
105enum kfence_counter_id {
106 KFENCE_COUNTER_ALLOCATED,
107 KFENCE_COUNTER_ALLOCS,
108 KFENCE_COUNTER_FREES,
109 KFENCE_COUNTER_ZOMBIES,
110 KFENCE_COUNTER_BUGS,
111 KFENCE_COUNTER_COUNT,
112};
113static atomic_long_t counters[KFENCE_COUNTER_COUNT];
114static const char *const counter_names[] = {
115 [KFENCE_COUNTER_ALLOCATED] = "currently allocated",
116 [KFENCE_COUNTER_ALLOCS] = "total allocations",
117 [KFENCE_COUNTER_FREES] = "total frees",
118 [KFENCE_COUNTER_ZOMBIES] = "zombie allocations",
119 [KFENCE_COUNTER_BUGS] = "total bugs",
120};
121static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
122
123/* === Internals ============================================================ */
124
125static bool kfence_protect(unsigned long addr)
126{
127 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
128}
129
130static bool kfence_unprotect(unsigned long addr)
131{
132 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
133}
134
135static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
136{
137 long index;
138
139 /* The checks do not affect performance; only called from slow-paths. */
140
141 if (!is_kfence_address((void *)addr))
142 return NULL;
143
144 /*
145 * May be an invalid index if called with an address at the edge of
146 * __kfence_pool, in which case we would report an "invalid access"
147 * error.
148 */
149 index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
150 if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
151 return NULL;
152
153 return &kfence_metadata[index];
154}
155
156static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
157{
158 unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
159 unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
160
161 /* The checks do not affect performance; only called from slow-paths. */
162
163 /* Only call with a pointer into kfence_metadata. */
164 if (KFENCE_WARN_ON(meta < kfence_metadata ||
165 meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
166 return 0;
167
168 /*
169 * This metadata object only ever maps to 1 page; verify that the stored
170 * address is in the expected range.
171 */
172 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
173 return 0;
174
175 return pageaddr;
176}
177
178/*
179 * Update the object's metadata state, including updating the alloc/free stacks
180 * depending on the state transition.
181 */
182static noinline void metadata_update_state(struct kfence_metadata *meta,
183 enum kfence_object_state next)
184{
185 struct kfence_track *track =
186 next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
187
188 lockdep_assert_held(&meta->lock);
189
190 /*
191 * Skip over 1 (this) functions; noinline ensures we do not accidentally
192 * skip over the caller by never inlining.
193 */
194 track->num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
195 track->pid = task_pid_nr(current);
196
197 /*
198 * Pairs with READ_ONCE() in
199 * kfence_shutdown_cache(),
200 * kfence_handle_page_fault().
201 */
202 WRITE_ONCE(meta->state, next);
203}
204
205/* Write canary byte to @addr. */
206static inline bool set_canary_byte(u8 *addr)
207{
208 *addr = KFENCE_CANARY_PATTERN(addr);
209 return true;
210}
211
212/* Check canary byte at @addr. */
213static inline bool check_canary_byte(u8 *addr)
214{
215 if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
216 return true;
217
218 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
219 kfence_report_error((unsigned long)addr, addr_to_metadata((unsigned long)addr),
220 KFENCE_ERROR_CORRUPTION);
221 return false;
222}
223
224/* __always_inline this to ensure we won't do an indirect call to fn. */
225static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
226{
227 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
228 unsigned long addr;
229
230 lockdep_assert_held(&meta->lock);
231
232 /*
233 * We'll iterate over each canary byte per-side until fn() returns
234 * false. However, we'll still iterate over the canary bytes to the
235 * right of the object even if there was an error in the canary bytes to
236 * the left of the object. Specifically, if check_canary_byte()
237 * generates an error, showing both sides might give more clues as to
238 * what the error is about when displaying which bytes were corrupted.
239 */
240
241 /* Apply to left of object. */
242 for (addr = pageaddr; addr < meta->addr; addr++) {
243 if (!fn((u8 *)addr))
244 break;
245 }
246
247 /* Apply to right of object. */
248 for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) {
249 if (!fn((u8 *)addr))
250 break;
251 }
252}
253
254static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp)
255{
256 struct kfence_metadata *meta = NULL;
257 unsigned long flags;
258 struct page *page;
259 void *addr;
260
261 /* Try to obtain a free object. */
262 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
263 if (!list_empty(&kfence_freelist)) {
264 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
265 list_del_init(&meta->list);
266 }
267 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
268 if (!meta)
269 return NULL;
270
271 if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
272 /*
273 * This is extremely unlikely -- we are reporting on a
274 * use-after-free, which locked meta->lock, and the reporting
275 * code via printk calls kmalloc() which ends up in
276 * kfence_alloc() and tries to grab the same object that we're
277 * reporting on. While it has never been observed, lockdep does
278 * report that there is a possibility of deadlock. Fix it by
279 * using trylock and bailing out gracefully.
280 */
281 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
282 /* Put the object back on the freelist. */
283 list_add_tail(&meta->list, &kfence_freelist);
284 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
285
286 return NULL;
287 }
288
289 meta->addr = metadata_to_pageaddr(meta);
290 /* Unprotect if we're reusing this page. */
291 if (meta->state == KFENCE_OBJECT_FREED)
292 kfence_unprotect(meta->addr);
293
294 /*
295 * Note: for allocations made before RNG initialization, will always
296 * return zero. We still benefit from enabling KFENCE as early as
297 * possible, even when the RNG is not yet available, as this will allow
298 * KFENCE to detect bugs due to earlier allocations. The only downside
299 * is that the out-of-bounds accesses detected are deterministic for
300 * such allocations.
301 */
302 if (prandom_u32_max(2)) {
303 /* Allocate on the "right" side, re-calculate address. */
304 meta->addr += PAGE_SIZE - size;
305 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
306 }
307
308 addr = (void *)meta->addr;
309
310 /* Update remaining metadata. */
311 metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED);
312 /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
313 WRITE_ONCE(meta->cache, cache);
314 meta->size = size;
315 for_each_canary(meta, set_canary_byte);
316
317 /* Set required struct page fields. */
318 page = virt_to_page(meta->addr);
319 page->slab_cache = cache;
320
321 raw_spin_unlock_irqrestore(&meta->lock, flags);
322
323 /* Memory initialization. */
324
325 /*
326 * We check slab_want_init_on_alloc() ourselves, rather than letting
327 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
328 * redzone.
329 */
330 if (unlikely(slab_want_init_on_alloc(gfp, cache)))
331 memzero_explicit(addr, size);
332 if (cache->ctor)
333 cache->ctor(addr);
334
335 if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
336 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
337
338 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
339 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
340
341 return addr;
342}
343
344static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
345{
346 struct kcsan_scoped_access assert_page_exclusive;
347 unsigned long flags;
348
349 raw_spin_lock_irqsave(&meta->lock, flags);
350
351 if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
352 /* Invalid or double-free, bail out. */
353 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
354 kfence_report_error((unsigned long)addr, meta, KFENCE_ERROR_INVALID_FREE);
355 raw_spin_unlock_irqrestore(&meta->lock, flags);
356 return;
357 }
358
359 /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
360 kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
361 KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
362 &assert_page_exclusive);
363
364 if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
365 kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
366
367 /* Restore page protection if there was an OOB access. */
368 if (meta->unprotected_page) {
369 kfence_protect(meta->unprotected_page);
370 meta->unprotected_page = 0;
371 }
372
373 /* Check canary bytes for memory corruption. */
374 for_each_canary(meta, check_canary_byte);
375
376 /*
377 * Clear memory if init-on-free is set. While we protect the page, the
378 * data is still there, and after a use-after-free is detected, we
379 * unprotect the page, so the data is still accessible.
380 */
381 if (!zombie && unlikely(slab_want_init_on_free(meta->cache)))
382 memzero_explicit(addr, meta->size);
383
384 /* Mark the object as freed. */
385 metadata_update_state(meta, KFENCE_OBJECT_FREED);
386
387 raw_spin_unlock_irqrestore(&meta->lock, flags);
388
389 /* Protect to detect use-after-frees. */
390 kfence_protect((unsigned long)addr);
391
392 kcsan_end_scoped_access(&assert_page_exclusive);
393 if (!zombie) {
394 /* Add it to the tail of the freelist for reuse. */
395 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
396 KFENCE_WARN_ON(!list_empty(&meta->list));
397 list_add_tail(&meta->list, &kfence_freelist);
398 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
399
400 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
401 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
402 } else {
403 /* See kfence_shutdown_cache(). */
404 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
405 }
406}
407
408static void rcu_guarded_free(struct rcu_head *h)
409{
410 struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
411
412 kfence_guarded_free((void *)meta->addr, meta, false);
413}
414
415static bool __init kfence_init_pool(void)
416{
417 unsigned long addr = (unsigned long)__kfence_pool;
418 struct page *pages;
419 int i;
420
421 if (!__kfence_pool)
422 return false;
423
424 if (!arch_kfence_init_pool())
425 goto err;
426
427 pages = virt_to_page(addr);
428
429 /*
430 * Set up object pages: they must have PG_slab set, to avoid freeing
431 * these as real pages.
432 *
433 * We also want to avoid inserting kfence_free() in the kfree()
434 * fast-path in SLUB, and therefore need to ensure kfree() correctly
435 * enters __slab_free() slow-path.
436 */
437 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
438 if (!i || (i % 2))
439 continue;
440
441 /* Verify we do not have a compound head page. */
442 if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
443 goto err;
444
445 __SetPageSlab(&pages[i]);
446 }
447
448 /*
449 * Protect the first 2 pages. The first page is mostly unnecessary, and
450 * merely serves as an extended guard page. However, adding one
451 * additional page in the beginning gives us an even number of pages,
452 * which simplifies the mapping of address to metadata index.
453 */
454 for (i = 0; i < 2; i++) {
455 if (unlikely(!kfence_protect(addr)))
456 goto err;
457
458 addr += PAGE_SIZE;
459 }
460
461 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
462 struct kfence_metadata *meta = &kfence_metadata[i];
463
464 /* Initialize metadata. */
465 INIT_LIST_HEAD(&meta->list);
466 raw_spin_lock_init(&meta->lock);
467 meta->state = KFENCE_OBJECT_UNUSED;
468 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
469 list_add_tail(&meta->list, &kfence_freelist);
470
471 /* Protect the right redzone. */
472 if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
473 goto err;
474
475 addr += 2 * PAGE_SIZE;
476 }
477
478 return true;
479
480err:
481 /*
482 * Only release unprotected pages, and do not try to go back and change
483 * page attributes due to risk of failing to do so as well. If changing
484 * page attributes for some pages fails, it is very likely that it also
485 * fails for the first page, and therefore expect addr==__kfence_pool in
486 * most failure cases.
487 */
488 memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
489 __kfence_pool = NULL;
490 return false;
491}
492
493/* === DebugFS Interface ==================================================== */
494
495static int stats_show(struct seq_file *seq, void *v)
496{
497 int i;
498
499 seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
500 for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
501 seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
502
503 return 0;
504}
505DEFINE_SHOW_ATTRIBUTE(stats);
506
507/*
508 * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
509 * start_object() and next_object() return the object index + 1, because NULL is used
510 * to stop iteration.
511 */
512static void *start_object(struct seq_file *seq, loff_t *pos)
513{
514 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
515 return (void *)((long)*pos + 1);
516 return NULL;
517}
518
519static void stop_object(struct seq_file *seq, void *v)
520{
521}
522
523static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
524{
525 ++*pos;
526 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
527 return (void *)((long)*pos + 1);
528 return NULL;
529}
530
531static int show_object(struct seq_file *seq, void *v)
532{
533 struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
534 unsigned long flags;
535
536 raw_spin_lock_irqsave(&meta->lock, flags);
537 kfence_print_object(seq, meta);
538 raw_spin_unlock_irqrestore(&meta->lock, flags);
539 seq_puts(seq, "---------------------------------\n");
540
541 return 0;
542}
543
544static const struct seq_operations object_seqops = {
545 .start = start_object,
546 .next = next_object,
547 .stop = stop_object,
548 .show = show_object,
549};
550
551static int open_objects(struct inode *inode, struct file *file)
552{
553 return seq_open(file, &object_seqops);
554}
555
556static const struct file_operations objects_fops = {
557 .open = open_objects,
558 .read = seq_read,
559 .llseek = seq_lseek,
560};
561
562static int __init kfence_debugfs_init(void)
563{
564 struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
565
566 debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
567 debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
568 return 0;
569}
570
571late_initcall(kfence_debugfs_init);
572
573/* === Allocation Gate Timer ================================================ */
574
575/*
576 * Set up delayed work, which will enable and disable the static key. We need to
577 * use a work queue (rather than a simple timer), since enabling and disabling a
578 * static key cannot be done from an interrupt.
579 *
580 * Note: Toggling a static branch currently causes IPIs, and here we'll end up
581 * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
582 * more aggressive sampling intervals), we could get away with a variant that
583 * avoids IPIs, at the cost of not immediately capturing allocations if the
584 * instructions remain cached.
585 */
586static struct delayed_work kfence_timer;
587static void toggle_allocation_gate(struct work_struct *work)
588{
589 if (!READ_ONCE(kfence_enabled))
590 return;
591
592 /* Enable static key, and await allocation to happen. */
593 atomic_set(&kfence_allocation_gate, 0);
594#ifdef CONFIG_KFENCE_STATIC_KEYS
595 static_branch_enable(&kfence_allocation_key);
596 /*
597 * Await an allocation. Timeout after 1 second, in case the kernel stops
598 * doing allocations, to avoid stalling this worker task for too long.
599 */
600 {
601 unsigned long end_wait = jiffies + HZ;
602
603 do {
604 set_current_state(TASK_UNINTERRUPTIBLE);
605 if (atomic_read(&kfence_allocation_gate) != 0)
606 break;
607 schedule_timeout(1);
608 } while (time_before(jiffies, end_wait));
609 __set_current_state(TASK_RUNNING);
610 }
611 /* Disable static key and reset timer. */
612 static_branch_disable(&kfence_allocation_key);
613#endif
614 schedule_delayed_work(&kfence_timer, msecs_to_jiffies(kfence_sample_interval));
615}
616static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate);
617
618/* === Public interface ===================================================== */
619
620void __init kfence_alloc_pool(void)
621{
622 if (!kfence_sample_interval)
623 return;
624
625 __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
626
627 if (!__kfence_pool)
628 pr_err("failed to allocate pool\n");
629}
630
631void __init kfence_init(void)
632{
633 /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
634 if (!kfence_sample_interval)
635 return;
636
637 if (!kfence_init_pool()) {
638 pr_err("%s failed\n", __func__);
639 return;
640 }
641
642 WRITE_ONCE(kfence_enabled, true);
643 schedule_delayed_work(&kfence_timer, 0);
644 pr_info("initialized - using %lu bytes for %d objects", KFENCE_POOL_SIZE,
645 CONFIG_KFENCE_NUM_OBJECTS);
646 if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
647 pr_cont(" at 0x%px-0x%px\n", (void *)__kfence_pool,
648 (void *)(__kfence_pool + KFENCE_POOL_SIZE));
649 else
650 pr_cont("\n");
651}
652
653void kfence_shutdown_cache(struct kmem_cache *s)
654{
655 unsigned long flags;
656 struct kfence_metadata *meta;
657 int i;
658
659 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
660 bool in_use;
661
662 meta = &kfence_metadata[i];
663
664 /*
665 * If we observe some inconsistent cache and state pair where we
666 * should have returned false here, cache destruction is racing
667 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
668 * the lock will not help, as different critical section
669 * serialization will have the same outcome.
670 */
671 if (READ_ONCE(meta->cache) != s ||
672 READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
673 continue;
674
675 raw_spin_lock_irqsave(&meta->lock, flags);
676 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
677 raw_spin_unlock_irqrestore(&meta->lock, flags);
678
679 if (in_use) {
680 /*
681 * This cache still has allocations, and we should not
682 * release them back into the freelist so they can still
683 * safely be used and retain the kernel's default
684 * behaviour of keeping the allocations alive (leak the
685 * cache); however, they effectively become "zombie
686 * allocations" as the KFENCE objects are the only ones
687 * still in use and the owning cache is being destroyed.
688 *
689 * We mark them freed, so that any subsequent use shows
690 * more useful error messages that will include stack
691 * traces of the user of the object, the original
692 * allocation, and caller to shutdown_cache().
693 */
694 kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
695 }
696 }
697
698 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
699 meta = &kfence_metadata[i];
700
701 /* See above. */
702 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
703 continue;
704
705 raw_spin_lock_irqsave(&meta->lock, flags);
706 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
707 meta->cache = NULL;
708 raw_spin_unlock_irqrestore(&meta->lock, flags);
709 }
710}
711
712void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
713{
714 /*
715 * allocation_gate only needs to become non-zero, so it doesn't make
716 * sense to continue writing to it and pay the associated contention
717 * cost, in case we have a large number of concurrent allocations.
718 */
719 if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1)
720 return NULL;
721
722 if (!READ_ONCE(kfence_enabled))
723 return NULL;
724
725 if (size > PAGE_SIZE)
726 return NULL;
727
728 return kfence_guarded_alloc(s, size, flags);
729}
730
731size_t kfence_ksize(const void *addr)
732{
733 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
734
735 /*
736 * Read locklessly -- if there is a race with __kfence_alloc(), this is
737 * either a use-after-free or invalid access.
738 */
739 return meta ? meta->size : 0;
740}
741
742void *kfence_object_start(const void *addr)
743{
744 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
745
746 /*
747 * Read locklessly -- if there is a race with __kfence_alloc(), this is
748 * either a use-after-free or invalid access.
749 */
750 return meta ? (void *)meta->addr : NULL;
751}
752
753void __kfence_free(void *addr)
754{
755 struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
756
757 /*
758 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
759 * the object, as the object page may be recycled for other-typed
760 * objects once it has been freed. meta->cache may be NULL if the cache
761 * was destroyed.
762 */
763 if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
764 call_rcu(&meta->rcu_head, rcu_guarded_free);
765 else
766 kfence_guarded_free(addr, meta, false);
767}
768
769bool kfence_handle_page_fault(unsigned long addr)
770{
771 const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
772 struct kfence_metadata *to_report = NULL;
773 enum kfence_error_type error_type;
774 unsigned long flags;
775
776 if (!is_kfence_address((void *)addr))
777 return false;
778
779 if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
780 return kfence_unprotect(addr); /* ... unprotect and proceed. */
781
782 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
783
784 if (page_index % 2) {
785 /* This is a redzone, report a buffer overflow. */
786 struct kfence_metadata *meta;
787 int distance = 0;
788
789 meta = addr_to_metadata(addr - PAGE_SIZE);
790 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
791 to_report = meta;
792 /* Data race ok; distance calculation approximate. */
793 distance = addr - data_race(meta->addr + meta->size);
794 }
795
796 meta = addr_to_metadata(addr + PAGE_SIZE);
797 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
798 /* Data race ok; distance calculation approximate. */
799 if (!to_report || distance > data_race(meta->addr) - addr)
800 to_report = meta;
801 }
802
803 if (!to_report)
804 goto out;
805
806 raw_spin_lock_irqsave(&to_report->lock, flags);
807 to_report->unprotected_page = addr;
808 error_type = KFENCE_ERROR_OOB;
809
810 /*
811 * If the object was freed before we took the look we can still
812 * report this as an OOB -- the report will simply show the
813 * stacktrace of the free as well.
814 */
815 } else {
816 to_report = addr_to_metadata(addr);
817 if (!to_report)
818 goto out;
819
820 raw_spin_lock_irqsave(&to_report->lock, flags);
821 error_type = KFENCE_ERROR_UAF;
822 /*
823 * We may race with __kfence_alloc(), and it is possible that a
824 * freed object may be reallocated. We simply report this as a
825 * use-after-free, with the stack trace showing the place where
826 * the object was re-allocated.
827 */
828 }
829
830out:
831 if (to_report) {
832 kfence_report_error(addr, to_report, error_type);
833 raw_spin_unlock_irqrestore(&to_report->lock, flags);
834 } else {
835 /* This may be a UAF or OOB access, but we can't be sure. */
836 kfence_report_error(addr, NULL, KFENCE_ERROR_INVALID);
837 }
838
839 return kfence_unprotect(addr); /* Unprotect and let access proceed. */
840}