Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | // error-inject.c: Function-level error injection table |
| 3 | #include <linux/error-injection.h> |
| 4 | #include <linux/debugfs.h> |
| 5 | #include <linux/kallsyms.h> |
| 6 | #include <linux/kprobes.h> |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/mutex.h> |
| 9 | #include <linux/list.h> |
| 10 | #include <linux/slab.h> |
| 11 | |
| 12 | /* Whitelist of symbols that can be overridden for error injection. */ |
| 13 | static LIST_HEAD(error_injection_list); |
| 14 | static DEFINE_MUTEX(ei_mutex); |
| 15 | struct ei_entry { |
| 16 | struct list_head list; |
| 17 | unsigned long start_addr; |
| 18 | unsigned long end_addr; |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 19 | int etype; |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 20 | void *priv; |
| 21 | }; |
| 22 | |
| 23 | bool within_error_injection_list(unsigned long addr) |
| 24 | { |
| 25 | struct ei_entry *ent; |
| 26 | bool ret = false; |
| 27 | |
| 28 | mutex_lock(&ei_mutex); |
| 29 | list_for_each_entry(ent, &error_injection_list, list) { |
| 30 | if (addr >= ent->start_addr && addr < ent->end_addr) { |
| 31 | ret = true; |
| 32 | break; |
| 33 | } |
| 34 | } |
| 35 | mutex_unlock(&ei_mutex); |
| 36 | return ret; |
| 37 | } |
| 38 | |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 39 | int get_injectable_error_type(unsigned long addr) |
| 40 | { |
| 41 | struct ei_entry *ent; |
| 42 | |
| 43 | list_for_each_entry(ent, &error_injection_list, list) { |
| 44 | if (addr >= ent->start_addr && addr < ent->end_addr) |
| 45 | return ent->etype; |
| 46 | } |
| 47 | return EI_ETYPE_NONE; |
| 48 | } |
| 49 | |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 50 | /* |
| 51 | * Lookup and populate the error_injection_list. |
| 52 | * |
| 53 | * For safety reasons we only allow certain functions to be overridden with |
| 54 | * bpf_error_injection, so we need to populate the list of the symbols that have |
| 55 | * been marked as safe for overriding. |
| 56 | */ |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 57 | static void populate_error_injection_list(struct error_injection_entry *start, |
| 58 | struct error_injection_entry *end, |
| 59 | void *priv) |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 60 | { |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 61 | struct error_injection_entry *iter; |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 62 | struct ei_entry *ent; |
| 63 | unsigned long entry, offset = 0, size = 0; |
| 64 | |
| 65 | mutex_lock(&ei_mutex); |
| 66 | for (iter = start; iter < end; iter++) { |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 67 | entry = arch_deref_entry_point((void *)iter->addr); |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 68 | |
| 69 | if (!kernel_text_address(entry) || |
| 70 | !kallsyms_lookup_size_offset(entry, &size, &offset)) { |
| 71 | pr_err("Failed to find error inject entry at %p\n", |
| 72 | (void *)entry); |
| 73 | continue; |
| 74 | } |
| 75 | |
| 76 | ent = kmalloc(sizeof(*ent), GFP_KERNEL); |
| 77 | if (!ent) |
| 78 | break; |
| 79 | ent->start_addr = entry; |
| 80 | ent->end_addr = entry + size; |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 81 | ent->etype = iter->etype; |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 82 | ent->priv = priv; |
| 83 | INIT_LIST_HEAD(&ent->list); |
| 84 | list_add_tail(&ent->list, &error_injection_list); |
| 85 | } |
| 86 | mutex_unlock(&ei_mutex); |
| 87 | } |
| 88 | |
| 89 | /* Markers of the _error_inject_whitelist section */ |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 90 | extern struct error_injection_entry __start_error_injection_whitelist[]; |
| 91 | extern struct error_injection_entry __stop_error_injection_whitelist[]; |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 92 | |
| 93 | static void __init populate_kernel_ei_list(void) |
| 94 | { |
| 95 | populate_error_injection_list(__start_error_injection_whitelist, |
| 96 | __stop_error_injection_whitelist, |
| 97 | NULL); |
| 98 | } |
| 99 | |
| 100 | #ifdef CONFIG_MODULES |
| 101 | static void module_load_ei_list(struct module *mod) |
| 102 | { |
| 103 | if (!mod->num_ei_funcs) |
| 104 | return; |
| 105 | |
| 106 | populate_error_injection_list(mod->ei_funcs, |
| 107 | mod->ei_funcs + mod->num_ei_funcs, mod); |
| 108 | } |
| 109 | |
| 110 | static void module_unload_ei_list(struct module *mod) |
| 111 | { |
| 112 | struct ei_entry *ent, *n; |
| 113 | |
| 114 | if (!mod->num_ei_funcs) |
| 115 | return; |
| 116 | |
| 117 | mutex_lock(&ei_mutex); |
| 118 | list_for_each_entry_safe(ent, n, &error_injection_list, list) { |
| 119 | if (ent->priv == mod) { |
| 120 | list_del_init(&ent->list); |
| 121 | kfree(ent); |
| 122 | } |
| 123 | } |
| 124 | mutex_unlock(&ei_mutex); |
| 125 | } |
| 126 | |
| 127 | /* Module notifier call back, checking error injection table on the module */ |
| 128 | static int ei_module_callback(struct notifier_block *nb, |
| 129 | unsigned long val, void *data) |
| 130 | { |
| 131 | struct module *mod = data; |
| 132 | |
| 133 | if (val == MODULE_STATE_COMING) |
| 134 | module_load_ei_list(mod); |
| 135 | else if (val == MODULE_STATE_GOING) |
| 136 | module_unload_ei_list(mod); |
| 137 | |
| 138 | return NOTIFY_DONE; |
| 139 | } |
| 140 | |
| 141 | static struct notifier_block ei_module_nb = { |
| 142 | .notifier_call = ei_module_callback, |
| 143 | .priority = 0 |
| 144 | }; |
| 145 | |
| 146 | static __init int module_ei_init(void) |
| 147 | { |
| 148 | return register_module_notifier(&ei_module_nb); |
| 149 | } |
| 150 | #else /* !CONFIG_MODULES */ |
| 151 | #define module_ei_init() (0) |
| 152 | #endif |
| 153 | |
| 154 | /* |
| 155 | * error_injection/whitelist -- shows which functions can be overridden for |
| 156 | * error injection. |
| 157 | */ |
| 158 | static void *ei_seq_start(struct seq_file *m, loff_t *pos) |
| 159 | { |
| 160 | mutex_lock(&ei_mutex); |
| 161 | return seq_list_start(&error_injection_list, *pos); |
| 162 | } |
| 163 | |
| 164 | static void ei_seq_stop(struct seq_file *m, void *v) |
| 165 | { |
| 166 | mutex_unlock(&ei_mutex); |
| 167 | } |
| 168 | |
| 169 | static void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos) |
| 170 | { |
| 171 | return seq_list_next(v, &error_injection_list, pos); |
| 172 | } |
| 173 | |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 174 | static const char *error_type_string(int etype) |
| 175 | { |
| 176 | switch (etype) { |
| 177 | case EI_ETYPE_NULL: |
| 178 | return "NULL"; |
| 179 | case EI_ETYPE_ERRNO: |
| 180 | return "ERRNO"; |
| 181 | case EI_ETYPE_ERRNO_NULL: |
| 182 | return "ERRNO_NULL"; |
| 183 | default: |
| 184 | return "(unknown)"; |
| 185 | } |
| 186 | } |
| 187 | |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 188 | static int ei_seq_show(struct seq_file *m, void *v) |
| 189 | { |
| 190 | struct ei_entry *ent = list_entry(v, struct ei_entry, list); |
| 191 | |
Sakari Ailus | d75f773 | 2019-03-25 21:32:28 +0200 | [diff] [blame] | 192 | seq_printf(m, "%ps\t%s\n", (void *)ent->start_addr, |
Masami Hiramatsu | 663faf9 | 2018-01-13 02:55:33 +0900 | [diff] [blame] | 193 | error_type_string(ent->etype)); |
Masami Hiramatsu | 540adea | 2018-01-13 02:55:03 +0900 | [diff] [blame] | 194 | return 0; |
| 195 | } |
| 196 | |
| 197 | static const struct seq_operations ei_seq_ops = { |
| 198 | .start = ei_seq_start, |
| 199 | .next = ei_seq_next, |
| 200 | .stop = ei_seq_stop, |
| 201 | .show = ei_seq_show, |
| 202 | }; |
| 203 | |
| 204 | static int ei_open(struct inode *inode, struct file *filp) |
| 205 | { |
| 206 | return seq_open(filp, &ei_seq_ops); |
| 207 | } |
| 208 | |
| 209 | static const struct file_operations debugfs_ei_ops = { |
| 210 | .open = ei_open, |
| 211 | .read = seq_read, |
| 212 | .llseek = seq_lseek, |
| 213 | .release = seq_release, |
| 214 | }; |
| 215 | |
| 216 | static int __init ei_debugfs_init(void) |
| 217 | { |
| 218 | struct dentry *dir, *file; |
| 219 | |
| 220 | dir = debugfs_create_dir("error_injection", NULL); |
| 221 | if (!dir) |
| 222 | return -ENOMEM; |
| 223 | |
| 224 | file = debugfs_create_file("list", 0444, dir, NULL, &debugfs_ei_ops); |
| 225 | if (!file) { |
| 226 | debugfs_remove(dir); |
| 227 | return -ENOMEM; |
| 228 | } |
| 229 | |
| 230 | return 0; |
| 231 | } |
| 232 | |
| 233 | static int __init init_error_injection(void) |
| 234 | { |
| 235 | populate_kernel_ei_list(); |
| 236 | |
| 237 | if (!module_ei_init()) |
| 238 | ei_debugfs_init(); |
| 239 | |
| 240 | return 0; |
| 241 | } |
| 242 | late_initcall(init_error_injection); |