blob: 2a2d5de9d3791624f780193074983e7edd6eb482 [file] [log] [blame]
Alexander Potapenko0ce20dd2021-02-25 17:18:53 -08001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Kernel Electric-Fence (KFENCE). For more info please see
4 * Documentation/dev-tools/kfence.rst.
5 *
6 * Copyright (C) 2020, Google LLC.
7 */
8
9#ifndef MM_KFENCE_KFENCE_H
10#define MM_KFENCE_KFENCE_H
11
12#include <linux/mm.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <linux/types.h>
16
17#include "../slab.h" /* for struct kmem_cache */
18
Alexander Potapenko0ce20dd2021-02-25 17:18:53 -080019/*
20 * Get the canary byte pattern for @addr. Use a pattern that varies based on the
21 * lower 3 bits of the address, to detect memory corruptions with higher
22 * probability, where similar constants are used.
23 */
24#define KFENCE_CANARY_PATTERN(addr) ((u8)0xaa ^ (u8)((unsigned long)(addr) & 0x7))
25
26/* Maximum stack depth for reports. */
27#define KFENCE_STACK_DEPTH 64
28
29/* KFENCE object states. */
30enum kfence_object_state {
31 KFENCE_OBJECT_UNUSED, /* Object is unused. */
32 KFENCE_OBJECT_ALLOCATED, /* Object is currently allocated. */
33 KFENCE_OBJECT_FREED, /* Object was allocated, and then freed. */
34};
35
36/* Alloc/free tracking information. */
37struct kfence_track {
38 pid_t pid;
Marco Elver4bbf04a2021-09-07 19:56:21 -070039 int cpu;
40 u64 ts_nsec;
Alexander Potapenko0ce20dd2021-02-25 17:18:53 -080041 int num_stack_entries;
42 unsigned long stack_entries[KFENCE_STACK_DEPTH];
43};
44
45/* KFENCE metadata per guarded allocation. */
46struct kfence_metadata {
47 struct list_head list; /* Freelist node; access under kfence_freelist_lock. */
48 struct rcu_head rcu_head; /* For delayed freeing. */
49
50 /*
51 * Lock protecting below data; to ensure consistency of the below data,
52 * since the following may execute concurrently: __kfence_alloc(),
53 * __kfence_free(), kfence_handle_page_fault(). However, note that we
54 * cannot grab the same metadata off the freelist twice, and multiple
55 * __kfence_alloc() cannot run concurrently on the same metadata.
56 */
57 raw_spinlock_t lock;
58
59 /* The current state of the object; see above. */
60 enum kfence_object_state state;
61
62 /*
63 * Allocated object address; cannot be calculated from size, because of
64 * alignment requirements.
65 *
66 * Invariant: ALIGN_DOWN(addr, PAGE_SIZE) is constant.
67 */
68 unsigned long addr;
69
70 /*
71 * The size of the original allocation.
72 */
73 size_t size;
74
75 /*
76 * The kmem_cache cache of the last allocation; NULL if never allocated
77 * or the cache has already been destroyed.
78 */
79 struct kmem_cache *cache;
80
81 /*
82 * In case of an invalid access, the page that was unprotected; we
83 * optimistically only store one address.
84 */
85 unsigned long unprotected_page;
86
87 /* Allocation and free stack information. */
88 struct kfence_track alloc_track;
89 struct kfence_track free_track;
Marco Elver08f6b102021-11-05 13:45:34 -070090 /* For updating alloc_covered on frees. */
91 u32 alloc_stack_hash;
Alexander Potapenko0ce20dd2021-02-25 17:18:53 -080092};
93
94extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
95
96/* KFENCE error types for report generation. */
97enum kfence_error_type {
98 KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */
99 KFENCE_ERROR_UAF, /* Detected a use-after-free access. */
100 KFENCE_ERROR_CORRUPTION, /* Detected a memory corruption on free. */
101 KFENCE_ERROR_INVALID, /* Invalid access of unknown type. */
102 KFENCE_ERROR_INVALID_FREE, /* Invalid free. */
103};
104
Marco Elverbc8fbc52021-02-25 17:19:31 -0800105void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
Marco Elverd438fab2021-02-25 17:19:08 -0800106 const struct kfence_metadata *meta, enum kfence_error_type type);
Alexander Potapenko0ce20dd2021-02-25 17:18:53 -0800107
108void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta);
109
110#endif /* MM_KFENCE_KFENCE_H */