blob: f89f9e7e9b07453f8245065346ef7741246942a4 [file] [log] [blame]
Thomas Gleixner1ccea772019-05-19 15:51:43 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Josh Poimboeufc349cdc2017-02-13 19:42:37 -06002/*
3 * patch.c - livepatch patching functions
4 *
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
7 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
Josh Poimboeufc349cdc2017-02-13 19:42:37 -06008 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/livepatch.h>
13#include <linux/list.h>
14#include <linux/ftrace.h>
15#include <linux/rculist.h>
16#include <linux/slab.h>
17#include <linux/bug.h>
18#include <linux/printk.h>
Joe Lawrence93862e32017-10-13 15:08:41 -040019#include "core.h"
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060020#include "patch.h"
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060021#include "transition.h"
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060022
23static LIST_HEAD(klp_ops);
24
Petr Mladek19514912019-01-09 13:43:19 +010025struct klp_ops *klp_find_ops(void *old_func)
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060026{
27 struct klp_ops *ops;
28 struct klp_func *func;
29
30 list_for_each_entry(ops, &klp_ops, node) {
31 func = list_first_entry(&ops->func_stack, struct klp_func,
32 stack_node);
Petr Mladek19514912019-01-09 13:43:19 +010033 if (func->old_func == old_func)
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060034 return ops;
35 }
36
37 return NULL;
38}
39
40static void notrace klp_ftrace_handler(unsigned long ip,
41 unsigned long parent_ip,
42 struct ftrace_ops *fops,
Steven Rostedt (VMware)d19ad072020-10-28 17:42:17 -040043 struct ftrace_regs *fregs)
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060044{
Steven Rostedt (VMware)d19ad072020-10-28 17:42:17 -040045 struct pt_regs *regs = ftrace_get_regs(fregs);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060046 struct klp_ops *ops;
47 struct klp_func *func;
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060048 int patch_state;
Steven Rostedt (VMware)13f3ea92020-11-05 21:32:41 -050049 int bit;
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060050
51 ops = container_of(fops, struct klp_ops, fops);
52
Steven Rostedt (VMware)773c1672020-11-05 21:32:46 -050053 bit = ftrace_test_recursion_trylock(ip, parent_ip);
Steven Rostedt (VMware)4b750b52020-11-05 21:32:42 -050054 if (WARN_ON_ONCE(bit < 0))
Steven Rostedt (VMware)13f3ea92020-11-05 21:32:41 -050055 return;
Petr Mladek842c0882017-06-14 10:54:52 +020056 /*
Paul E. McKenney69326892018-11-07 14:16:57 -080057 * A variant of synchronize_rcu() is used to allow patching functions
Petr Mladek842c0882017-06-14 10:54:52 +020058 * where RCU is not watching, see klp_synchronize_transition().
59 */
60 preempt_disable_notrace();
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060061
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060062 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
63 stack_node);
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060064
65 /*
66 * func should never be NULL because preemption should be disabled here
67 * and unregister_ftrace_function() does the equivalent of a
Paul E. McKenney69326892018-11-07 14:16:57 -080068 * synchronize_rcu() before the func_stack removal.
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060069 */
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060070 if (WARN_ON_ONCE(!func))
71 goto unlock;
72
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060073 /*
74 * In the enable path, enforce the order of the ops->func_stack and
75 * func->transition reads. The corresponding write barrier is in
76 * __klp_enable_patch().
77 *
78 * (Note that this barrier technically isn't needed in the disable
79 * path. In the rare case where klp_update_patch_state() runs before
80 * this handler, its TIF_PATCH_PENDING read and this func->transition
81 * read need to be ordered. But klp_update_patch_state() already
82 * enforces that.)
83 */
84 smp_rmb();
85
86 if (unlikely(func->transition)) {
87
88 /*
89 * Enforce the order of the func->transition and
90 * current->patch_state reads. Otherwise we could read an
91 * out-of-date task state and pick the wrong function. The
92 * corresponding write barrier is in klp_init_transition().
93 */
94 smp_rmb();
95
96 patch_state = current->patch_state;
97
98 WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
99
100 if (patch_state == KLP_UNPATCHED) {
101 /*
102 * Use the previously patched version of the function.
103 * If no previous patches exist, continue with the
104 * original function.
105 */
106 func = list_entry_rcu(func->stack_node.next,
107 struct klp_func, stack_node);
108
109 if (&func->stack_node == &ops->func_stack)
110 goto unlock;
111 }
112 }
113
Jason Barone1452b62019-01-09 13:43:25 +0100114 /*
115 * NOPs are used to replace existing patches with original code.
116 * Do nothing! Setting pc would cause an infinite loop.
117 */
118 if (func->nop)
119 goto unlock;
120
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600121 klp_arch_set_pc(regs, (unsigned long)func->new_func);
Jason Barone1452b62019-01-09 13:43:25 +0100122
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600123unlock:
Petr Mladek842c0882017-06-14 10:54:52 +0200124 preempt_enable_notrace();
Steven Rostedt (VMware)13f3ea92020-11-05 21:32:41 -0500125 ftrace_test_recursion_unlock(bit);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600126}
127
128/*
129 * Convert a function address into the appropriate ftrace location.
130 *
131 * Usually this is just the address of the function, but on some architectures
132 * it's more complicated so allow them to provide a custom behaviour.
133 */
134#ifndef klp_get_ftrace_location
135static unsigned long klp_get_ftrace_location(unsigned long faddr)
136{
137 return faddr;
138}
139#endif
140
141static void klp_unpatch_func(struct klp_func *func)
142{
143 struct klp_ops *ops;
144
145 if (WARN_ON(!func->patched))
146 return;
Petr Mladek19514912019-01-09 13:43:19 +0100147 if (WARN_ON(!func->old_func))
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600148 return;
149
Petr Mladek19514912019-01-09 13:43:19 +0100150 ops = klp_find_ops(func->old_func);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600151 if (WARN_ON(!ops))
152 return;
153
154 if (list_is_singular(&ops->func_stack)) {
155 unsigned long ftrace_loc;
156
Petr Mladek19514912019-01-09 13:43:19 +0100157 ftrace_loc =
158 klp_get_ftrace_location((unsigned long)func->old_func);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600159 if (WARN_ON(!ftrace_loc))
160 return;
161
162 WARN_ON(unregister_ftrace_function(&ops->fops));
163 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
164
165 list_del_rcu(&func->stack_node);
166 list_del(&ops->node);
167 kfree(ops);
168 } else {
169 list_del_rcu(&func->stack_node);
170 }
171
172 func->patched = false;
173}
174
175static int klp_patch_func(struct klp_func *func)
176{
177 struct klp_ops *ops;
178 int ret;
179
Petr Mladek19514912019-01-09 13:43:19 +0100180 if (WARN_ON(!func->old_func))
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600181 return -EINVAL;
182
183 if (WARN_ON(func->patched))
184 return -EINVAL;
185
Petr Mladek19514912019-01-09 13:43:19 +0100186 ops = klp_find_ops(func->old_func);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600187 if (!ops) {
188 unsigned long ftrace_loc;
189
Petr Mladek19514912019-01-09 13:43:19 +0100190 ftrace_loc =
191 klp_get_ftrace_location((unsigned long)func->old_func);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600192 if (!ftrace_loc) {
193 pr_err("failed to find location for function '%s'\n",
194 func->old_name);
195 return -EINVAL;
196 }
197
198 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
199 if (!ops)
200 return -ENOMEM;
201
202 ops->fops.func = klp_ftrace_handler;
203 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
204 FTRACE_OPS_FL_DYNAMIC |
Miroslav Benes71624312019-10-16 13:33:13 +0200205 FTRACE_OPS_FL_IPMODIFY |
206 FTRACE_OPS_FL_PERMANENT;
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600207
208 list_add(&ops->node, &klp_ops);
209
210 INIT_LIST_HEAD(&ops->func_stack);
211 list_add_rcu(&func->stack_node, &ops->func_stack);
212
213 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
214 if (ret) {
215 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
216 func->old_name, ret);
217 goto err;
218 }
219
220 ret = register_ftrace_function(&ops->fops);
221 if (ret) {
222 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
223 func->old_name, ret);
224 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
225 goto err;
226 }
227
228
229 } else {
230 list_add_rcu(&func->stack_node, &ops->func_stack);
231 }
232
233 func->patched = true;
234
235 return 0;
236
237err:
238 list_del_rcu(&func->stack_node);
239 list_del(&ops->node);
240 kfree(ops);
241 return ret;
242}
243
Petr Mladekd697bad2019-01-09 13:43:26 +0100244static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600245{
246 struct klp_func *func;
247
Petr Mladekd697bad2019-01-09 13:43:26 +0100248 klp_for_each_func(obj, func) {
249 if (nops_only && !func->nop)
250 continue;
251
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600252 if (func->patched)
253 klp_unpatch_func(func);
Petr Mladekd697bad2019-01-09 13:43:26 +0100254 }
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600255
Petr Mladekd697bad2019-01-09 13:43:26 +0100256 if (obj->dynamic || !nops_only)
257 obj->patched = false;
258}
259
260
261void klp_unpatch_object(struct klp_object *obj)
262{
263 __klp_unpatch_object(obj, false);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600264}
265
266int klp_patch_object(struct klp_object *obj)
267{
268 struct klp_func *func;
269 int ret;
270
271 if (WARN_ON(obj->patched))
272 return -EINVAL;
273
274 klp_for_each_func(obj, func) {
275 ret = klp_patch_func(func);
276 if (ret) {
277 klp_unpatch_object(obj);
278 return ret;
279 }
280 }
281 obj->patched = true;
282
283 return 0;
284}
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600285
Petr Mladekd697bad2019-01-09 13:43:26 +0100286static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600287{
288 struct klp_object *obj;
289
290 klp_for_each_object(patch, obj)
291 if (obj->patched)
Petr Mladekd697bad2019-01-09 13:43:26 +0100292 __klp_unpatch_object(obj, nops_only);
293}
294
295void klp_unpatch_objects(struct klp_patch *patch)
296{
297 __klp_unpatch_objects(patch, false);
298}
299
300void klp_unpatch_objects_dynamic(struct klp_patch *patch)
301{
302 __klp_unpatch_objects(patch, true);
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600303}