blob: fe316c021d73dd7556b44ecc0bac802d78df6d21 [file] [log] [blame]
Thomas Gleixner1ccea772019-05-19 15:51:43 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Josh Poimboeufc349cdc2017-02-13 19:42:37 -06002/*
3 * patch.c - livepatch patching functions
4 *
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
7 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
Josh Poimboeufc349cdc2017-02-13 19:42:37 -06008 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/livepatch.h>
13#include <linux/list.h>
14#include <linux/ftrace.h>
15#include <linux/rculist.h>
16#include <linux/slab.h>
17#include <linux/bug.h>
18#include <linux/printk.h>
Joe Lawrence93862e32017-10-13 15:08:41 -040019#include "core.h"
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060020#include "patch.h"
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060021#include "transition.h"
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060022
23static LIST_HEAD(klp_ops);
24
Petr Mladek19514912019-01-09 13:43:19 +010025struct klp_ops *klp_find_ops(void *old_func)
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060026{
27 struct klp_ops *ops;
28 struct klp_func *func;
29
30 list_for_each_entry(ops, &klp_ops, node) {
31 func = list_first_entry(&ops->func_stack, struct klp_func,
32 stack_node);
Petr Mladek19514912019-01-09 13:43:19 +010033 if (func->old_func == old_func)
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060034 return ops;
35 }
36
37 return NULL;
38}
39
40static void notrace klp_ftrace_handler(unsigned long ip,
41 unsigned long parent_ip,
42 struct ftrace_ops *fops,
Steven Rostedt (VMware)d19ad072020-10-28 17:42:17 -040043 struct ftrace_regs *fregs)
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060044{
45 struct klp_ops *ops;
46 struct klp_func *func;
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060047 int patch_state;
Steven Rostedt (VMware)13f3ea92020-11-05 21:32:41 -050048 int bit;
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060049
50 ops = container_of(fops, struct klp_ops, fops);
51
王贇ce5e4802021-10-27 11:14:44 +080052 /*
53 * The ftrace_test_recursion_trylock() will disable preemption,
54 * which is required for the variant of synchronize_rcu() that is
55 * used to allow patching functions where RCU is not watching.
56 * See klp_synchronize_transition() for more details.
57 */
Steven Rostedt (VMware)773c1672020-11-05 21:32:46 -050058 bit = ftrace_test_recursion_trylock(ip, parent_ip);
Steven Rostedt (VMware)4b750b52020-11-05 21:32:42 -050059 if (WARN_ON_ONCE(bit < 0))
Steven Rostedt (VMware)13f3ea92020-11-05 21:32:41 -050060 return;
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060061
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060062 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
63 stack_node);
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060064
65 /*
66 * func should never be NULL because preemption should be disabled here
67 * and unregister_ftrace_function() does the equivalent of a
Paul E. McKenney69326892018-11-07 14:16:57 -080068 * synchronize_rcu() before the func_stack removal.
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060069 */
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060070 if (WARN_ON_ONCE(!func))
71 goto unlock;
72
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060073 /*
74 * In the enable path, enforce the order of the ops->func_stack and
75 * func->transition reads. The corresponding write barrier is in
76 * __klp_enable_patch().
77 *
78 * (Note that this barrier technically isn't needed in the disable
79 * path. In the rare case where klp_update_patch_state() runs before
80 * this handler, its TIF_PATCH_PENDING read and this func->transition
81 * read need to be ordered. But klp_update_patch_state() already
82 * enforces that.)
83 */
84 smp_rmb();
85
86 if (unlikely(func->transition)) {
87
88 /*
89 * Enforce the order of the func->transition and
90 * current->patch_state reads. Otherwise we could read an
91 * out-of-date task state and pick the wrong function. The
92 * corresponding write barrier is in klp_init_transition().
93 */
94 smp_rmb();
95
96 patch_state = current->patch_state;
97
98 WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
99
100 if (patch_state == KLP_UNPATCHED) {
101 /*
102 * Use the previously patched version of the function.
103 * If no previous patches exist, continue with the
104 * original function.
105 */
106 func = list_entry_rcu(func->stack_node.next,
107 struct klp_func, stack_node);
108
109 if (&func->stack_node == &ops->func_stack)
110 goto unlock;
111 }
112 }
113
Jason Barone1452b62019-01-09 13:43:25 +0100114 /*
115 * NOPs are used to replace existing patches with original code.
116 * Do nothing! Setting pc would cause an infinite loop.
117 */
118 if (func->nop)
119 goto unlock;
120
Steven Rostedt (VMware)2860cd82020-10-28 17:15:27 -0400121 klp_arch_set_pc(fregs, (unsigned long)func->new_func);
Jason Barone1452b62019-01-09 13:43:25 +0100122
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600123unlock:
Steven Rostedt (VMware)13f3ea92020-11-05 21:32:41 -0500124 ftrace_test_recursion_unlock(bit);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600125}
126
127/*
128 * Convert a function address into the appropriate ftrace location.
129 *
130 * Usually this is just the address of the function, but on some architectures
131 * it's more complicated so allow them to provide a custom behaviour.
132 */
133#ifndef klp_get_ftrace_location
134static unsigned long klp_get_ftrace_location(unsigned long faddr)
135{
136 return faddr;
137}
138#endif
139
140static void klp_unpatch_func(struct klp_func *func)
141{
142 struct klp_ops *ops;
143
144 if (WARN_ON(!func->patched))
145 return;
Petr Mladek19514912019-01-09 13:43:19 +0100146 if (WARN_ON(!func->old_func))
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600147 return;
148
Petr Mladek19514912019-01-09 13:43:19 +0100149 ops = klp_find_ops(func->old_func);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600150 if (WARN_ON(!ops))
151 return;
152
153 if (list_is_singular(&ops->func_stack)) {
154 unsigned long ftrace_loc;
155
Petr Mladek19514912019-01-09 13:43:19 +0100156 ftrace_loc =
157 klp_get_ftrace_location((unsigned long)func->old_func);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600158 if (WARN_ON(!ftrace_loc))
159 return;
160
161 WARN_ON(unregister_ftrace_function(&ops->fops));
162 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
163
164 list_del_rcu(&func->stack_node);
165 list_del(&ops->node);
166 kfree(ops);
167 } else {
168 list_del_rcu(&func->stack_node);
169 }
170
171 func->patched = false;
172}
173
174static int klp_patch_func(struct klp_func *func)
175{
176 struct klp_ops *ops;
177 int ret;
178
Petr Mladek19514912019-01-09 13:43:19 +0100179 if (WARN_ON(!func->old_func))
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600180 return -EINVAL;
181
182 if (WARN_ON(func->patched))
183 return -EINVAL;
184
Petr Mladek19514912019-01-09 13:43:19 +0100185 ops = klp_find_ops(func->old_func);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600186 if (!ops) {
187 unsigned long ftrace_loc;
188
Petr Mladek19514912019-01-09 13:43:19 +0100189 ftrace_loc =
190 klp_get_ftrace_location((unsigned long)func->old_func);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600191 if (!ftrace_loc) {
192 pr_err("failed to find location for function '%s'\n",
193 func->old_name);
194 return -EINVAL;
195 }
196
197 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
198 if (!ops)
199 return -ENOMEM;
200
201 ops->fops.func = klp_ftrace_handler;
Steven Rostedt (VMware)2860cd82020-10-28 17:15:27 -0400202 ops->fops.flags = FTRACE_OPS_FL_DYNAMIC |
203#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
204 FTRACE_OPS_FL_SAVE_REGS |
205#endif
Miroslav Benes71624312019-10-16 13:33:13 +0200206 FTRACE_OPS_FL_IPMODIFY |
207 FTRACE_OPS_FL_PERMANENT;
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600208
209 list_add(&ops->node, &klp_ops);
210
211 INIT_LIST_HEAD(&ops->func_stack);
212 list_add_rcu(&func->stack_node, &ops->func_stack);
213
214 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
215 if (ret) {
216 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
217 func->old_name, ret);
218 goto err;
219 }
220
221 ret = register_ftrace_function(&ops->fops);
222 if (ret) {
223 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
224 func->old_name, ret);
225 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
226 goto err;
227 }
228
229
230 } else {
231 list_add_rcu(&func->stack_node, &ops->func_stack);
232 }
233
234 func->patched = true;
235
236 return 0;
237
238err:
239 list_del_rcu(&func->stack_node);
240 list_del(&ops->node);
241 kfree(ops);
242 return ret;
243}
244
Petr Mladekd697bad2019-01-09 13:43:26 +0100245static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600246{
247 struct klp_func *func;
248
Petr Mladekd697bad2019-01-09 13:43:26 +0100249 klp_for_each_func(obj, func) {
250 if (nops_only && !func->nop)
251 continue;
252
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600253 if (func->patched)
254 klp_unpatch_func(func);
Petr Mladekd697bad2019-01-09 13:43:26 +0100255 }
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600256
Petr Mladekd697bad2019-01-09 13:43:26 +0100257 if (obj->dynamic || !nops_only)
258 obj->patched = false;
259}
260
261
262void klp_unpatch_object(struct klp_object *obj)
263{
264 __klp_unpatch_object(obj, false);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600265}
266
267int klp_patch_object(struct klp_object *obj)
268{
269 struct klp_func *func;
270 int ret;
271
272 if (WARN_ON(obj->patched))
273 return -EINVAL;
274
275 klp_for_each_func(obj, func) {
276 ret = klp_patch_func(func);
277 if (ret) {
278 klp_unpatch_object(obj);
279 return ret;
280 }
281 }
282 obj->patched = true;
283
284 return 0;
285}
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600286
Petr Mladekd697bad2019-01-09 13:43:26 +0100287static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600288{
289 struct klp_object *obj;
290
291 klp_for_each_object(patch, obj)
292 if (obj->patched)
Petr Mladekd697bad2019-01-09 13:43:26 +0100293 __klp_unpatch_object(obj, nops_only);
294}
295
296void klp_unpatch_objects(struct klp_patch *patch)
297{
298 __klp_unpatch_objects(patch, false);
299}
300
301void klp_unpatch_objects_dynamic(struct klp_patch *patch)
302{
303 __klp_unpatch_objects(patch, true);
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600304}