Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | // error-inject.c: Function-level error injection table |
| 3 | #include <linux/error-injection.h> |
| 4 | #include <linux/debugfs.h> |
| 5 | #include <linux/kallsyms.h> |
| 6 | #include <linux/kprobes.h> |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/mutex.h> |
| 9 | #include <linux/list.h> |
| 10 | #include <linux/slab.h> |
| 11 | |
| 12 | /* Whitelist of symbols that can be overridden for error injection. */ |
| 13 | static LIST_HEAD(error_injection_list); |
| 14 | static DEFINE_MUTEX(ei_mutex); |
| 15 | struct ei_entry { |
| 16 | struct list_head list; |
| 17 | unsigned long start_addr; |
| 18 | unsigned long end_addr; |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 19 | int etype; |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 20 | void *priv; |
| 21 | }; |
| 22 | |
| 23 | bool within_error_injection_list(unsigned long addr) |
| 24 | { |
| 25 | struct ei_entry *ent; |
| 26 | bool ret = false; |
| 27 | |
| 28 | mutex_lock(&ei_mutex); |
| 29 | list_for_each_entry(ent, &error_injection_list, list) { |
| 30 | if (addr >= ent->start_addr && addr < ent->end_addr) { |
| 31 | ret = true; |
| 32 | break; |
| 33 | } |
| 34 | } |
| 35 | mutex_unlock(&ei_mutex); |
| 36 | return ret; |
| 37 | } |
| 38 | |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 39 | int get_injectable_error_type(unsigned long addr) |
| 40 | { |
| 41 | struct ei_entry *ent; |
| 42 | |
| 43 | list_for_each_entry(ent, &error_injection_list, list) { |
| 44 | if (addr >= ent->start_addr && addr < ent->end_addr) |
| 45 | return ent->etype; |
| 46 | } |
| 47 | return EI_ETYPE_NONE; |
| 48 | } |
| 49 | |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 50 | /* |
| 51 | * Lookup and populate the error_injection_list. |
| 52 | * |
| 53 | * For safety reasons we only allow certain functions to be overridden with |
| 54 | * bpf_error_injection, so we need to populate the list of the symbols that have |
| 55 | * been marked as safe for overriding. |
| 56 | */ |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 57 | static void populate_error_injection_list(struct error_injection_entry *start, |
| 58 | struct error_injection_entry *end, |
| 59 | void *priv) |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 60 | { |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 61 | struct error_injection_entry *iter; |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 62 | struct ei_entry *ent; |
| 63 | unsigned long entry, offset = 0, size = 0; |
| 64 | |
| 65 | mutex_lock(&ei_mutex); |
| 66 | for (iter = start; iter < end; iter++) { |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 67 | entry = arch_deref_entry_point((void *)iter->addr); |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 68 | |
| 69 | if (!kernel_text_address(entry) || |
| 70 | !kallsyms_lookup_size_offset(entry, &size, &offset)) { |
| 71 | pr_err("Failed to find error inject entry at %p\n", |
| 72 | (void *)entry); |
| 73 | continue; |
| 74 | } |
| 75 | |
| 76 | ent = kmalloc(sizeof(*ent), GFP_KERNEL); |
| 77 | if (!ent) |
| 78 | break; |
| 79 | ent->start_addr = entry; |
| 80 | ent->end_addr = entry + size; |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 81 | ent->etype = iter->etype; |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 82 | ent->priv = priv; |
| 83 | INIT_LIST_HEAD(&ent->list); |
| 84 | list_add_tail(&ent->list, &error_injection_list); |
| 85 | } |
| 86 | mutex_unlock(&ei_mutex); |
| 87 | } |
| 88 | |
| 89 | /* Markers of the _error_inject_whitelist section */ |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 90 | extern struct error_injection_entry __start_error_injection_whitelist[]; |
| 91 | extern struct error_injection_entry __stop_error_injection_whitelist[]; |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 92 | |
| 93 | static void __init populate_kernel_ei_list(void) |
| 94 | { |
| 95 | populate_error_injection_list(__start_error_injection_whitelist, |
| 96 | __stop_error_injection_whitelist, |
| 97 | NULL); |
| 98 | } |
| 99 | |
| 100 | #ifdef CONFIG_MODULES |
| 101 | static void module_load_ei_list(struct module *mod) |
| 102 | { |
| 103 | if (!mod->num_ei_funcs) |
| 104 | return; |
| 105 | |
| 106 | populate_error_injection_list(mod->ei_funcs, |
| 107 | mod->ei_funcs + mod->num_ei_funcs, mod); |
| 108 | } |
| 109 | |
| 110 | static void module_unload_ei_list(struct module *mod) |
| 111 | { |
| 112 | struct ei_entry *ent, *n; |
| 113 | |
| 114 | if (!mod->num_ei_funcs) |
| 115 | return; |
| 116 | |
| 117 | mutex_lock(&ei_mutex); |
| 118 | list_for_each_entry_safe(ent, n, &error_injection_list, list) { |
| 119 | if (ent->priv == mod) { |
| 120 | list_del_init(&ent->list); |
| 121 | kfree(ent); |
| 122 | } |
| 123 | } |
| 124 | mutex_unlock(&ei_mutex); |
| 125 | } |
| 126 | |
| 127 | /* Module notifier call back, checking error injection table on the module */ |
| 128 | static int ei_module_callback(struct notifier_block *nb, |
| 129 | unsigned long val, void *data) |
| 130 | { |
| 131 | struct module *mod = data; |
| 132 | |
| 133 | if (val == MODULE_STATE_COMING) |
| 134 | module_load_ei_list(mod); |
| 135 | else if (val == MODULE_STATE_GOING) |
| 136 | module_unload_ei_list(mod); |
| 137 | |
| 138 | return NOTIFY_DONE; |
| 139 | } |
| 140 | |
| 141 | static struct notifier_block ei_module_nb = { |
| 142 | .notifier_call = ei_module_callback, |
| 143 | .priority = 0 |
| 144 | }; |
| 145 | |
| 146 | static __init int module_ei_init(void) |
| 147 | { |
| 148 | return register_module_notifier(&ei_module_nb); |
| 149 | } |
| 150 | #else /* !CONFIG_MODULES */ |
| 151 | #define module_ei_init() (0) |
| 152 | #endif |
| 153 | |
| 154 | /* |
| 155 | * error_injection/whitelist -- shows which functions can be overridden for |
| 156 | * error injection. |
| 157 | */ |
| 158 | static void *ei_seq_start(struct seq_file *m, loff_t *pos) |
| 159 | { |
| 160 | mutex_lock(&ei_mutex); |
| 161 | return seq_list_start(&error_injection_list, *pos); |
| 162 | } |
| 163 | |
| 164 | static void ei_seq_stop(struct seq_file *m, void *v) |
| 165 | { |
| 166 | mutex_unlock(&ei_mutex); |
| 167 | } |
| 168 | |
| 169 | static void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos) |
| 170 | { |
| 171 | return seq_list_next(v, &error_injection_list, pos); |
| 172 | } |
| 173 | |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 174 | static const char *error_type_string(int etype) |
| 175 | { |
| 176 | switch (etype) { |
| 177 | case EI_ETYPE_NULL: |
| 178 | return "NULL"; |
| 179 | case EI_ETYPE_ERRNO: |
| 180 | return "ERRNO"; |
| 181 | case EI_ETYPE_ERRNO_NULL: |
| 182 | return "ERRNO_NULL"; |
Barnabás Pőcze | 537cd89 | 2020-12-15 20:47:10 -0800 | [diff] [blame] | 183 | case EI_ETYPE_TRUE: |
| 184 | return "TRUE"; |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 185 | default: |
| 186 | return "(unknown)"; |
| 187 | } |
| 188 | } |
| 189 | |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 190 | static int ei_seq_show(struct seq_file *m, void *v) |
| 191 | { |
| 192 | struct ei_entry *ent = list_entry(v, struct ei_entry, list); |
| 193 | |
Sakari Ailus | d75f773 | 2019-03-25 21:32:28 +0200 | [diff] [blame] | 194 | seq_printf(m, "%ps\t%s\n", (void *)ent->start_addr, |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 195 | error_type_string(ent->etype)); |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 196 | return 0; |
| 197 | } |
| 198 | |
| 199 | static const struct seq_operations ei_seq_ops = { |
| 200 | .start = ei_seq_start, |
| 201 | .next = ei_seq_next, |
| 202 | .stop = ei_seq_stop, |
| 203 | .show = ei_seq_show, |
| 204 | }; |
| 205 | |
| 206 | static int ei_open(struct inode *inode, struct file *filp) |
| 207 | { |
| 208 | return seq_open(filp, &ei_seq_ops); |
| 209 | } |
| 210 | |
| 211 | static const struct file_operations debugfs_ei_ops = { |
| 212 | .open = ei_open, |
| 213 | .read = seq_read, |
| 214 | .llseek = seq_lseek, |
| 215 | .release = seq_release, |
| 216 | }; |
| 217 | |
| 218 | static int __init ei_debugfs_init(void) |
| 219 | { |
| 220 | struct dentry *dir, *file; |
| 221 | |
| 222 | dir = debugfs_create_dir("error_injection", NULL); |
| 223 | if (!dir) |
| 224 | return -ENOMEM; |
| 225 | |
| 226 | file = debugfs_create_file("list", 0444, dir, NULL, &debugfs_ei_ops); |
| 227 | if (!file) { |
| 228 | debugfs_remove(dir); |
| 229 | return -ENOMEM; |
| 230 | } |
| 231 | |
| 232 | return 0; |
| 233 | } |
| 234 | |
| 235 | static int __init init_error_injection(void) |
| 236 | { |
| 237 | populate_kernel_ei_list(); |
| 238 | |
| 239 | if (!module_ei_init()) |
| 240 | ei_debugfs_init(); |
| 241 | |
| 242 | return 0; |
| 243 | } |
| 244 | late_initcall(init_error_injection); |