blob: 99cb3ad05eb4103d0a60aadd9c1dc764dcfefe77 [file] [log] [blame]
Josh Poimboeufc349cdc2017-02-13 19:42:37 -06001/*
2 * patch.c - livepatch patching functions
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/livepatch.h>
25#include <linux/list.h>
26#include <linux/ftrace.h>
27#include <linux/rculist.h>
28#include <linux/slab.h>
29#include <linux/bug.h>
30#include <linux/printk.h>
Joe Lawrence93862e32017-10-13 15:08:41 -040031#include "core.h"
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060032#include "patch.h"
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060033#include "transition.h"
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060034
35static LIST_HEAD(klp_ops);
36
Petr Mladek19514912019-01-09 13:43:19 +010037struct klp_ops *klp_find_ops(void *old_func)
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060038{
39 struct klp_ops *ops;
40 struct klp_func *func;
41
42 list_for_each_entry(ops, &klp_ops, node) {
43 func = list_first_entry(&ops->func_stack, struct klp_func,
44 stack_node);
Petr Mladek19514912019-01-09 13:43:19 +010045 if (func->old_func == old_func)
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060046 return ops;
47 }
48
49 return NULL;
50}
51
52static void notrace klp_ftrace_handler(unsigned long ip,
53 unsigned long parent_ip,
54 struct ftrace_ops *fops,
55 struct pt_regs *regs)
56{
57 struct klp_ops *ops;
58 struct klp_func *func;
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060059 int patch_state;
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060060
61 ops = container_of(fops, struct klp_ops, fops);
62
Petr Mladek842c0882017-06-14 10:54:52 +020063 /*
Paul E. McKenney69326892018-11-07 14:16:57 -080064 * A variant of synchronize_rcu() is used to allow patching functions
Petr Mladek842c0882017-06-14 10:54:52 +020065 * where RCU is not watching, see klp_synchronize_transition().
66 */
67 preempt_disable_notrace();
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060068
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060069 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
70 stack_node);
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060071
72 /*
73 * func should never be NULL because preemption should be disabled here
74 * and unregister_ftrace_function() does the equivalent of a
Paul E. McKenney69326892018-11-07 14:16:57 -080075 * synchronize_rcu() before the func_stack removal.
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060076 */
Josh Poimboeufc349cdc2017-02-13 19:42:37 -060077 if (WARN_ON_ONCE(!func))
78 goto unlock;
79
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060080 /*
81 * In the enable path, enforce the order of the ops->func_stack and
82 * func->transition reads. The corresponding write barrier is in
83 * __klp_enable_patch().
84 *
85 * (Note that this barrier technically isn't needed in the disable
86 * path. In the rare case where klp_update_patch_state() runs before
87 * this handler, its TIF_PATCH_PENDING read and this func->transition
88 * read need to be ordered. But klp_update_patch_state() already
89 * enforces that.)
90 */
91 smp_rmb();
92
93 if (unlikely(func->transition)) {
94
95 /*
96 * Enforce the order of the func->transition and
97 * current->patch_state reads. Otherwise we could read an
98 * out-of-date task state and pick the wrong function. The
99 * corresponding write barrier is in klp_init_transition().
100 */
101 smp_rmb();
102
103 patch_state = current->patch_state;
104
105 WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
106
107 if (patch_state == KLP_UNPATCHED) {
108 /*
109 * Use the previously patched version of the function.
110 * If no previous patches exist, continue with the
111 * original function.
112 */
113 func = list_entry_rcu(func->stack_node.next,
114 struct klp_func, stack_node);
115
116 if (&func->stack_node == &ops->func_stack)
117 goto unlock;
118 }
119 }
120
Jason Barone1452b62019-01-09 13:43:25 +0100121 /*
122 * NOPs are used to replace existing patches with original code.
123 * Do nothing! Setting pc would cause an infinite loop.
124 */
125 if (func->nop)
126 goto unlock;
127
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600128 klp_arch_set_pc(regs, (unsigned long)func->new_func);
Jason Barone1452b62019-01-09 13:43:25 +0100129
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600130unlock:
Petr Mladek842c0882017-06-14 10:54:52 +0200131 preempt_enable_notrace();
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600132}
133
134/*
135 * Convert a function address into the appropriate ftrace location.
136 *
137 * Usually this is just the address of the function, but on some architectures
138 * it's more complicated so allow them to provide a custom behaviour.
139 */
140#ifndef klp_get_ftrace_location
141static unsigned long klp_get_ftrace_location(unsigned long faddr)
142{
143 return faddr;
144}
145#endif
146
147static void klp_unpatch_func(struct klp_func *func)
148{
149 struct klp_ops *ops;
150
151 if (WARN_ON(!func->patched))
152 return;
Petr Mladek19514912019-01-09 13:43:19 +0100153 if (WARN_ON(!func->old_func))
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600154 return;
155
Petr Mladek19514912019-01-09 13:43:19 +0100156 ops = klp_find_ops(func->old_func);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600157 if (WARN_ON(!ops))
158 return;
159
160 if (list_is_singular(&ops->func_stack)) {
161 unsigned long ftrace_loc;
162
Petr Mladek19514912019-01-09 13:43:19 +0100163 ftrace_loc =
164 klp_get_ftrace_location((unsigned long)func->old_func);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600165 if (WARN_ON(!ftrace_loc))
166 return;
167
168 WARN_ON(unregister_ftrace_function(&ops->fops));
169 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
170
171 list_del_rcu(&func->stack_node);
172 list_del(&ops->node);
173 kfree(ops);
174 } else {
175 list_del_rcu(&func->stack_node);
176 }
177
178 func->patched = false;
179}
180
181static int klp_patch_func(struct klp_func *func)
182{
183 struct klp_ops *ops;
184 int ret;
185
Petr Mladek19514912019-01-09 13:43:19 +0100186 if (WARN_ON(!func->old_func))
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600187 return -EINVAL;
188
189 if (WARN_ON(func->patched))
190 return -EINVAL;
191
Petr Mladek19514912019-01-09 13:43:19 +0100192 ops = klp_find_ops(func->old_func);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600193 if (!ops) {
194 unsigned long ftrace_loc;
195
Petr Mladek19514912019-01-09 13:43:19 +0100196 ftrace_loc =
197 klp_get_ftrace_location((unsigned long)func->old_func);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600198 if (!ftrace_loc) {
199 pr_err("failed to find location for function '%s'\n",
200 func->old_name);
201 return -EINVAL;
202 }
203
204 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
205 if (!ops)
206 return -ENOMEM;
207
208 ops->fops.func = klp_ftrace_handler;
209 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
210 FTRACE_OPS_FL_DYNAMIC |
211 FTRACE_OPS_FL_IPMODIFY;
212
213 list_add(&ops->node, &klp_ops);
214
215 INIT_LIST_HEAD(&ops->func_stack);
216 list_add_rcu(&func->stack_node, &ops->func_stack);
217
218 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
219 if (ret) {
220 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
221 func->old_name, ret);
222 goto err;
223 }
224
225 ret = register_ftrace_function(&ops->fops);
226 if (ret) {
227 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
228 func->old_name, ret);
229 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
230 goto err;
231 }
232
233
234 } else {
235 list_add_rcu(&func->stack_node, &ops->func_stack);
236 }
237
238 func->patched = true;
239
240 return 0;
241
242err:
243 list_del_rcu(&func->stack_node);
244 list_del(&ops->node);
245 kfree(ops);
246 return ret;
247}
248
Petr Mladekd697bad2019-01-09 13:43:26 +0100249static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600250{
251 struct klp_func *func;
252
Petr Mladekd697bad2019-01-09 13:43:26 +0100253 klp_for_each_func(obj, func) {
254 if (nops_only && !func->nop)
255 continue;
256
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600257 if (func->patched)
258 klp_unpatch_func(func);
Petr Mladekd697bad2019-01-09 13:43:26 +0100259 }
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600260
Petr Mladekd697bad2019-01-09 13:43:26 +0100261 if (obj->dynamic || !nops_only)
262 obj->patched = false;
263}
264
265
266void klp_unpatch_object(struct klp_object *obj)
267{
268 __klp_unpatch_object(obj, false);
Josh Poimboeufc349cdc2017-02-13 19:42:37 -0600269}
270
271int klp_patch_object(struct klp_object *obj)
272{
273 struct klp_func *func;
274 int ret;
275
276 if (WARN_ON(obj->patched))
277 return -EINVAL;
278
279 klp_for_each_func(obj, func) {
280 ret = klp_patch_func(func);
281 if (ret) {
282 klp_unpatch_object(obj);
283 return ret;
284 }
285 }
286 obj->patched = true;
287
288 return 0;
289}
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600290
Petr Mladekd697bad2019-01-09 13:43:26 +0100291static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600292{
293 struct klp_object *obj;
294
295 klp_for_each_object(patch, obj)
296 if (obj->patched)
Petr Mladekd697bad2019-01-09 13:43:26 +0100297 __klp_unpatch_object(obj, nops_only);
298}
299
300void klp_unpatch_objects(struct klp_patch *patch)
301{
302 __klp_unpatch_objects(patch, false);
303}
304
305void klp_unpatch_objects_dynamic(struct klp_patch *patch)
306{
307 __klp_unpatch_objects(patch, true);
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600308}