Thomas Gleixner | 1ccea77 | 2019-05-19 15:51:43 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 2 | /* |
| 3 | * patch.c - livepatch patching functions |
| 4 | * |
| 5 | * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> |
| 6 | * Copyright (C) 2014 SUSE |
| 7 | * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com> |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 11 | |
| 12 | #include <linux/livepatch.h> |
| 13 | #include <linux/list.h> |
| 14 | #include <linux/ftrace.h> |
| 15 | #include <linux/rculist.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/bug.h> |
| 18 | #include <linux/printk.h> |
Joe Lawrence | 93862e3 | 2017-10-13 15:08:41 -0400 | [diff] [blame] | 19 | #include "core.h" |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 20 | #include "patch.h" |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 21 | #include "transition.h" |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 22 | |
| 23 | static LIST_HEAD(klp_ops); |
| 24 | |
Petr Mladek | 1951491 | 2019-01-09 13:43:19 +0100 | [diff] [blame] | 25 | struct klp_ops *klp_find_ops(void *old_func) |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 26 | { |
| 27 | struct klp_ops *ops; |
| 28 | struct klp_func *func; |
| 29 | |
| 30 | list_for_each_entry(ops, &klp_ops, node) { |
| 31 | func = list_first_entry(&ops->func_stack, struct klp_func, |
| 32 | stack_node); |
Petr Mladek | 1951491 | 2019-01-09 13:43:19 +0100 | [diff] [blame] | 33 | if (func->old_func == old_func) |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 34 | return ops; |
| 35 | } |
| 36 | |
| 37 | return NULL; |
| 38 | } |
| 39 | |
| 40 | static void notrace klp_ftrace_handler(unsigned long ip, |
| 41 | unsigned long parent_ip, |
| 42 | struct ftrace_ops *fops, |
Steven Rostedt (VMware) | d19ad07 | 2020-10-28 17:42:17 -0400 | [diff] [blame] | 43 | struct ftrace_regs *fregs) |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 44 | { |
| 45 | struct klp_ops *ops; |
| 46 | struct klp_func *func; |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 47 | int patch_state; |
Steven Rostedt (VMware) | 13f3ea9 | 2020-11-05 21:32:41 -0500 | [diff] [blame] | 48 | int bit; |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 49 | |
| 50 | ops = container_of(fops, struct klp_ops, fops); |
| 51 | |
王贇 | ce5e480 | 2021-10-27 11:14:44 +0800 | [diff] [blame^] | 52 | /* |
| 53 | * The ftrace_test_recursion_trylock() will disable preemption, |
| 54 | * which is required for the variant of synchronize_rcu() that is |
| 55 | * used to allow patching functions where RCU is not watching. |
| 56 | * See klp_synchronize_transition() for more details. |
| 57 | */ |
Steven Rostedt (VMware) | 773c167 | 2020-11-05 21:32:46 -0500 | [diff] [blame] | 58 | bit = ftrace_test_recursion_trylock(ip, parent_ip); |
Steven Rostedt (VMware) | 4b750b5 | 2020-11-05 21:32:42 -0500 | [diff] [blame] | 59 | if (WARN_ON_ONCE(bit < 0)) |
Steven Rostedt (VMware) | 13f3ea9 | 2020-11-05 21:32:41 -0500 | [diff] [blame] | 60 | return; |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 61 | |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 62 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, |
| 63 | stack_node); |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 64 | |
| 65 | /* |
| 66 | * func should never be NULL because preemption should be disabled here |
| 67 | * and unregister_ftrace_function() does the equivalent of a |
Paul E. McKenney | 6932689 | 2018-11-07 14:16:57 -0800 | [diff] [blame] | 68 | * synchronize_rcu() before the func_stack removal. |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 69 | */ |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 70 | if (WARN_ON_ONCE(!func)) |
| 71 | goto unlock; |
| 72 | |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 73 | /* |
| 74 | * In the enable path, enforce the order of the ops->func_stack and |
| 75 | * func->transition reads. The corresponding write barrier is in |
| 76 | * __klp_enable_patch(). |
| 77 | * |
| 78 | * (Note that this barrier technically isn't needed in the disable |
| 79 | * path. In the rare case where klp_update_patch_state() runs before |
| 80 | * this handler, its TIF_PATCH_PENDING read and this func->transition |
| 81 | * read need to be ordered. But klp_update_patch_state() already |
| 82 | * enforces that.) |
| 83 | */ |
| 84 | smp_rmb(); |
| 85 | |
| 86 | if (unlikely(func->transition)) { |
| 87 | |
| 88 | /* |
| 89 | * Enforce the order of the func->transition and |
| 90 | * current->patch_state reads. Otherwise we could read an |
| 91 | * out-of-date task state and pick the wrong function. The |
| 92 | * corresponding write barrier is in klp_init_transition(). |
| 93 | */ |
| 94 | smp_rmb(); |
| 95 | |
| 96 | patch_state = current->patch_state; |
| 97 | |
| 98 | WARN_ON_ONCE(patch_state == KLP_UNDEFINED); |
| 99 | |
| 100 | if (patch_state == KLP_UNPATCHED) { |
| 101 | /* |
| 102 | * Use the previously patched version of the function. |
| 103 | * If no previous patches exist, continue with the |
| 104 | * original function. |
| 105 | */ |
| 106 | func = list_entry_rcu(func->stack_node.next, |
| 107 | struct klp_func, stack_node); |
| 108 | |
| 109 | if (&func->stack_node == &ops->func_stack) |
| 110 | goto unlock; |
| 111 | } |
| 112 | } |
| 113 | |
Jason Baron | e1452b6 | 2019-01-09 13:43:25 +0100 | [diff] [blame] | 114 | /* |
| 115 | * NOPs are used to replace existing patches with original code. |
| 116 | * Do nothing! Setting pc would cause an infinite loop. |
| 117 | */ |
| 118 | if (func->nop) |
| 119 | goto unlock; |
| 120 | |
Steven Rostedt (VMware) | 2860cd8 | 2020-10-28 17:15:27 -0400 | [diff] [blame] | 121 | klp_arch_set_pc(fregs, (unsigned long)func->new_func); |
Jason Baron | e1452b6 | 2019-01-09 13:43:25 +0100 | [diff] [blame] | 122 | |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 123 | unlock: |
Steven Rostedt (VMware) | 13f3ea9 | 2020-11-05 21:32:41 -0500 | [diff] [blame] | 124 | ftrace_test_recursion_unlock(bit); |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | /* |
| 128 | * Convert a function address into the appropriate ftrace location. |
| 129 | * |
| 130 | * Usually this is just the address of the function, but on some architectures |
| 131 | * it's more complicated so allow them to provide a custom behaviour. |
| 132 | */ |
| 133 | #ifndef klp_get_ftrace_location |
| 134 | static unsigned long klp_get_ftrace_location(unsigned long faddr) |
| 135 | { |
| 136 | return faddr; |
| 137 | } |
| 138 | #endif |
| 139 | |
| 140 | static void klp_unpatch_func(struct klp_func *func) |
| 141 | { |
| 142 | struct klp_ops *ops; |
| 143 | |
| 144 | if (WARN_ON(!func->patched)) |
| 145 | return; |
Petr Mladek | 1951491 | 2019-01-09 13:43:19 +0100 | [diff] [blame] | 146 | if (WARN_ON(!func->old_func)) |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 147 | return; |
| 148 | |
Petr Mladek | 1951491 | 2019-01-09 13:43:19 +0100 | [diff] [blame] | 149 | ops = klp_find_ops(func->old_func); |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 150 | if (WARN_ON(!ops)) |
| 151 | return; |
| 152 | |
| 153 | if (list_is_singular(&ops->func_stack)) { |
| 154 | unsigned long ftrace_loc; |
| 155 | |
Petr Mladek | 1951491 | 2019-01-09 13:43:19 +0100 | [diff] [blame] | 156 | ftrace_loc = |
| 157 | klp_get_ftrace_location((unsigned long)func->old_func); |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 158 | if (WARN_ON(!ftrace_loc)) |
| 159 | return; |
| 160 | |
| 161 | WARN_ON(unregister_ftrace_function(&ops->fops)); |
| 162 | WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0)); |
| 163 | |
| 164 | list_del_rcu(&func->stack_node); |
| 165 | list_del(&ops->node); |
| 166 | kfree(ops); |
| 167 | } else { |
| 168 | list_del_rcu(&func->stack_node); |
| 169 | } |
| 170 | |
| 171 | func->patched = false; |
| 172 | } |
| 173 | |
| 174 | static int klp_patch_func(struct klp_func *func) |
| 175 | { |
| 176 | struct klp_ops *ops; |
| 177 | int ret; |
| 178 | |
Petr Mladek | 1951491 | 2019-01-09 13:43:19 +0100 | [diff] [blame] | 179 | if (WARN_ON(!func->old_func)) |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 180 | return -EINVAL; |
| 181 | |
| 182 | if (WARN_ON(func->patched)) |
| 183 | return -EINVAL; |
| 184 | |
Petr Mladek | 1951491 | 2019-01-09 13:43:19 +0100 | [diff] [blame] | 185 | ops = klp_find_ops(func->old_func); |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 186 | if (!ops) { |
| 187 | unsigned long ftrace_loc; |
| 188 | |
Petr Mladek | 1951491 | 2019-01-09 13:43:19 +0100 | [diff] [blame] | 189 | ftrace_loc = |
| 190 | klp_get_ftrace_location((unsigned long)func->old_func); |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 191 | if (!ftrace_loc) { |
| 192 | pr_err("failed to find location for function '%s'\n", |
| 193 | func->old_name); |
| 194 | return -EINVAL; |
| 195 | } |
| 196 | |
| 197 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); |
| 198 | if (!ops) |
| 199 | return -ENOMEM; |
| 200 | |
| 201 | ops->fops.func = klp_ftrace_handler; |
Steven Rostedt (VMware) | 2860cd8 | 2020-10-28 17:15:27 -0400 | [diff] [blame] | 202 | ops->fops.flags = FTRACE_OPS_FL_DYNAMIC | |
| 203 | #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS |
| 204 | FTRACE_OPS_FL_SAVE_REGS | |
| 205 | #endif |
Miroslav Benes | 7162431 | 2019-10-16 13:33:13 +0200 | [diff] [blame] | 206 | FTRACE_OPS_FL_IPMODIFY | |
| 207 | FTRACE_OPS_FL_PERMANENT; |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 208 | |
| 209 | list_add(&ops->node, &klp_ops); |
| 210 | |
| 211 | INIT_LIST_HEAD(&ops->func_stack); |
| 212 | list_add_rcu(&func->stack_node, &ops->func_stack); |
| 213 | |
| 214 | ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0); |
| 215 | if (ret) { |
| 216 | pr_err("failed to set ftrace filter for function '%s' (%d)\n", |
| 217 | func->old_name, ret); |
| 218 | goto err; |
| 219 | } |
| 220 | |
| 221 | ret = register_ftrace_function(&ops->fops); |
| 222 | if (ret) { |
| 223 | pr_err("failed to register ftrace handler for function '%s' (%d)\n", |
| 224 | func->old_name, ret); |
| 225 | ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0); |
| 226 | goto err; |
| 227 | } |
| 228 | |
| 229 | |
| 230 | } else { |
| 231 | list_add_rcu(&func->stack_node, &ops->func_stack); |
| 232 | } |
| 233 | |
| 234 | func->patched = true; |
| 235 | |
| 236 | return 0; |
| 237 | |
| 238 | err: |
| 239 | list_del_rcu(&func->stack_node); |
| 240 | list_del(&ops->node); |
| 241 | kfree(ops); |
| 242 | return ret; |
| 243 | } |
| 244 | |
Petr Mladek | d697bad | 2019-01-09 13:43:26 +0100 | [diff] [blame] | 245 | static void __klp_unpatch_object(struct klp_object *obj, bool nops_only) |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 246 | { |
| 247 | struct klp_func *func; |
| 248 | |
Petr Mladek | d697bad | 2019-01-09 13:43:26 +0100 | [diff] [blame] | 249 | klp_for_each_func(obj, func) { |
| 250 | if (nops_only && !func->nop) |
| 251 | continue; |
| 252 | |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 253 | if (func->patched) |
| 254 | klp_unpatch_func(func); |
Petr Mladek | d697bad | 2019-01-09 13:43:26 +0100 | [diff] [blame] | 255 | } |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 256 | |
Petr Mladek | d697bad | 2019-01-09 13:43:26 +0100 | [diff] [blame] | 257 | if (obj->dynamic || !nops_only) |
| 258 | obj->patched = false; |
| 259 | } |
| 260 | |
| 261 | |
| 262 | void klp_unpatch_object(struct klp_object *obj) |
| 263 | { |
| 264 | __klp_unpatch_object(obj, false); |
Josh Poimboeuf | c349cdc | 2017-02-13 19:42:37 -0600 | [diff] [blame] | 265 | } |
| 266 | |
| 267 | int klp_patch_object(struct klp_object *obj) |
| 268 | { |
| 269 | struct klp_func *func; |
| 270 | int ret; |
| 271 | |
| 272 | if (WARN_ON(obj->patched)) |
| 273 | return -EINVAL; |
| 274 | |
| 275 | klp_for_each_func(obj, func) { |
| 276 | ret = klp_patch_func(func); |
| 277 | if (ret) { |
| 278 | klp_unpatch_object(obj); |
| 279 | return ret; |
| 280 | } |
| 281 | } |
| 282 | obj->patched = true; |
| 283 | |
| 284 | return 0; |
| 285 | } |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 286 | |
Petr Mladek | d697bad | 2019-01-09 13:43:26 +0100 | [diff] [blame] | 287 | static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only) |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 288 | { |
| 289 | struct klp_object *obj; |
| 290 | |
| 291 | klp_for_each_object(patch, obj) |
| 292 | if (obj->patched) |
Petr Mladek | d697bad | 2019-01-09 13:43:26 +0100 | [diff] [blame] | 293 | __klp_unpatch_object(obj, nops_only); |
| 294 | } |
| 295 | |
| 296 | void klp_unpatch_objects(struct klp_patch *patch) |
| 297 | { |
| 298 | __klp_unpatch_objects(patch, false); |
| 299 | } |
| 300 | |
| 301 | void klp_unpatch_objects_dynamic(struct klp_patch *patch) |
| 302 | { |
| 303 | __klp_unpatch_objects(patch, true); |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 304 | } |