blob: e842534d349354d127f7babd02dcf3460c01f57d [file] [log] [blame]
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001/*
2 * core.c - Kernel Live Patching Core
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/mutex.h>
26#include <linux/slab.h>
27#include <linux/ftrace.h>
28#include <linux/list.h>
29#include <linux/kallsyms.h>
30#include <linux/livepatch.h>
31
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060032/**
33 * struct klp_ops - structure for tracking registered ftrace ops structs
34 *
35 * A single ftrace_ops is shared between all enabled replacement functions
36 * (klp_func structs) which have the same old_addr. This allows the switch
37 * between function versions to happen instantaneously by updating the klp_ops
38 * struct's func_stack list. The winner is the klp_func at the top of the
39 * func_stack (front of the list).
40 *
41 * @node: node for the global klp_ops list
42 * @func_stack: list head for the stack of klp_func's (active func is on top)
43 * @fops: registered ftrace ops struct
Seth Jenningsb700e7f2014-12-16 11:58:19 -060044 */
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060045struct klp_ops {
46 struct list_head node;
47 struct list_head func_stack;
48 struct ftrace_ops fops;
49};
Seth Jenningsb700e7f2014-12-16 11:58:19 -060050
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060051/*
52 * The klp_mutex protects the global lists and state transitions of any
53 * structure reachable from them. References to any structure must be obtained
54 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
55 * ensure it gets consistent data).
56 */
Seth Jenningsb700e7f2014-12-16 11:58:19 -060057static DEFINE_MUTEX(klp_mutex);
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060058
Seth Jenningsb700e7f2014-12-16 11:58:19 -060059static LIST_HEAD(klp_patches);
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060060static LIST_HEAD(klp_ops);
Seth Jenningsb700e7f2014-12-16 11:58:19 -060061
62static struct kobject *klp_root_kobj;
63
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060064static struct klp_ops *klp_find_ops(unsigned long old_addr)
65{
66 struct klp_ops *ops;
67 struct klp_func *func;
68
69 list_for_each_entry(ops, &klp_ops, node) {
70 func = list_first_entry(&ops->func_stack, struct klp_func,
71 stack_node);
72 if (func->old_addr == old_addr)
73 return ops;
74 }
75
76 return NULL;
77}
78
Seth Jenningsb700e7f2014-12-16 11:58:19 -060079static bool klp_is_module(struct klp_object *obj)
80{
81 return obj->name;
82}
83
84static bool klp_is_object_loaded(struct klp_object *obj)
85{
86 return !obj->name || obj->mod;
87}
88
89/* sets obj->mod if object is not vmlinux and module is found */
90static void klp_find_object_module(struct klp_object *obj)
91{
Petr Mladek8cb2c2d2015-03-12 12:55:13 +010092 struct module *mod;
93
Seth Jenningsb700e7f2014-12-16 11:58:19 -060094 if (!klp_is_module(obj))
95 return;
96
97 mutex_lock(&module_mutex);
98 /*
Petr Mladek8cb2c2d2015-03-12 12:55:13 +010099 * We do not want to block removal of patched modules and therefore
100 * we do not take a reference here. The patches are removed by
101 * a going module handler instead.
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600102 */
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100103 mod = find_module(obj->name);
104 /*
105 * Do not mess work of the module coming and going notifiers.
106 * Note that the patch might still be needed before the going handler
107 * is called. Module functions can be called even in the GOING state
108 * until mod->exit() finishes. This is especially important for
109 * patches that modify semantic of the functions.
110 */
111 if (mod && mod->klp_alive)
112 obj->mod = mod;
113
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600114 mutex_unlock(&module_mutex);
115}
116
117/* klp_mutex must be held by caller */
118static bool klp_is_patch_registered(struct klp_patch *patch)
119{
120 struct klp_patch *mypatch;
121
122 list_for_each_entry(mypatch, &klp_patches, list)
123 if (mypatch == patch)
124 return true;
125
126 return false;
127}
128
129static bool klp_initialized(void)
130{
Nicholas Mc Guiree76ff062015-05-11 07:52:29 +0200131 return !!klp_root_kobj;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600132}
133
134struct klp_find_arg {
135 const char *objname;
136 const char *name;
137 unsigned long addr;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600138 unsigned long count;
Chris J Argesb2b018e2015-12-01 20:40:54 -0600139 unsigned long pos;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600140};
141
142static int klp_find_callback(void *data, const char *name,
143 struct module *mod, unsigned long addr)
144{
145 struct klp_find_arg *args = data;
146
147 if ((mod && !args->objname) || (!mod && args->objname))
148 return 0;
149
150 if (strcmp(args->name, name))
151 return 0;
152
153 if (args->objname && strcmp(args->objname, mod->name))
154 return 0;
155
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600156 args->addr = addr;
157 args->count++;
158
Chris J Argesb2b018e2015-12-01 20:40:54 -0600159 /*
160 * Finish the search when the symbol is found for the desired position
161 * or the position is not defined for a non-unique symbol.
162 */
163 if ((args->pos && (args->count == args->pos)) ||
164 (!args->pos && (args->count > 1)))
165 return 1;
166
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600167 return 0;
168}
169
170static int klp_find_object_symbol(const char *objname, const char *name,
Chris J Argesb2b018e2015-12-01 20:40:54 -0600171 unsigned long sympos, unsigned long *addr)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600172{
173 struct klp_find_arg args = {
174 .objname = objname,
175 .name = name,
176 .addr = 0,
Chris J Argesb2b018e2015-12-01 20:40:54 -0600177 .count = 0,
178 .pos = sympos,
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600179 };
180
Miroslav Benes9a1bd632015-06-01 17:48:37 +0200181 mutex_lock(&module_mutex);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600182 kallsyms_on_each_symbol(klp_find_callback, &args);
Miroslav Benes9a1bd632015-06-01 17:48:37 +0200183 mutex_unlock(&module_mutex);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600184
Chris J Argesb2b018e2015-12-01 20:40:54 -0600185 /*
186 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
187 * otherwise ensure the symbol position count matches sympos.
188 */
189 if (args.addr == 0)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600190 pr_err("symbol '%s' not found in symbol table\n", name);
Chris J Argesb2b018e2015-12-01 20:40:54 -0600191 else if (args.count > 1 && sympos == 0) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600192 pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n",
193 args.count, name, objname);
Chris J Argesb2b018e2015-12-01 20:40:54 -0600194 } else if (sympos != args.count && sympos > 0) {
195 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
196 sympos, name, objname ? objname : "vmlinux");
197 } else {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600198 *addr = args.addr;
199 return 0;
200 }
201
202 *addr = 0;
203 return -EINVAL;
204}
205
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600206/*
207 * external symbols are located outside the parent object (where the parent
208 * object is either vmlinux or the kmod being patched).
209 */
210static int klp_find_external_symbol(struct module *pmod, const char *name,
211 unsigned long *addr)
212{
213 const struct kernel_symbol *sym;
214
215 /* first, check if it's an exported symbol */
216 preempt_disable();
217 sym = find_symbol(name, NULL, NULL, true, true);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600218 if (sym) {
219 *addr = sym->value;
Peter Zijlstrac064a0d2015-02-28 22:24:48 +0100220 preempt_enable();
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600221 return 0;
222 }
Peter Zijlstrac064a0d2015-02-28 22:24:48 +0100223 preempt_enable();
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600224
Chris J Argesb2b018e2015-12-01 20:40:54 -0600225 /*
226 * Check if it's in another .o within the patch module. This also
227 * checks that the external symbol is unique.
228 */
229 return klp_find_object_symbol(pmod->name, name, 0, addr);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600230}
231
232static int klp_write_object_relocations(struct module *pmod,
233 struct klp_object *obj)
234{
235 int ret;
Chris J Arges064c89d2015-12-01 20:40:55 -0600236 unsigned long val;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600237 struct klp_reloc *reloc;
238
239 if (WARN_ON(!klp_is_object_loaded(obj)))
240 return -EINVAL;
241
242 if (WARN_ON(!obj->relocs))
243 return -EINVAL;
244
245 for (reloc = obj->relocs; reloc->name; reloc++) {
Chris J Arges064c89d2015-12-01 20:40:55 -0600246 /* discover the address of the referenced symbol */
247 if (reloc->external) {
248 if (reloc->sympos > 0) {
249 pr_err("non-zero sympos for external reloc symbol '%s' is not supported\n",
250 reloc->name);
251 return -EINVAL;
252 }
253 ret = klp_find_external_symbol(pmod, reloc->name, &val);
254 } else
255 ret = klp_find_object_symbol(obj->name,
256 reloc->name,
257 reloc->sympos,
258 &val);
259 if (ret)
260 return ret;
Zhou Chengminge41b1042015-11-06 14:25:00 +0800261
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600262 ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
Chris J Arges064c89d2015-12-01 20:40:55 -0600263 val + reloc->addend);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600264 if (ret) {
265 pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
Chris J Arges064c89d2015-12-01 20:40:55 -0600266 reloc->name, val, ret);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600267 return ret;
268 }
269 }
270
271 return 0;
272}
273
274static void notrace klp_ftrace_handler(unsigned long ip,
275 unsigned long parent_ip,
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600276 struct ftrace_ops *fops,
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600277 struct pt_regs *regs)
278{
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600279 struct klp_ops *ops;
280 struct klp_func *func;
281
282 ops = container_of(fops, struct klp_ops, fops);
283
284 rcu_read_lock();
285 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
286 stack_node);
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600287 if (WARN_ON_ONCE(!func))
Petr Mladekc4ce0da2015-02-18 18:02:13 +0100288 goto unlock;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600289
Li Binb5bfc512014-12-19 14:11:17 +0800290 klp_arch_set_pc(regs, (unsigned long)func->new_func);
Petr Mladekc4ce0da2015-02-18 18:02:13 +0100291unlock:
292 rcu_read_unlock();
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600293}
294
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600295static void klp_disable_func(struct klp_func *func)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600296{
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600297 struct klp_ops *ops;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600298
Minfei Huang225f58f2015-07-14 11:15:37 +0800299 if (WARN_ON(func->state != KLP_ENABLED))
300 return;
301 if (WARN_ON(!func->old_addr))
302 return;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600303
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600304 ops = klp_find_ops(func->old_addr);
305 if (WARN_ON(!ops))
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600306 return;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600307
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600308 if (list_is_singular(&ops->func_stack)) {
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600309 WARN_ON(unregister_ftrace_function(&ops->fops));
310 WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600311
312 list_del_rcu(&func->stack_node);
313 list_del(&ops->node);
314 kfree(ops);
315 } else {
316 list_del_rcu(&func->stack_node);
317 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600318
319 func->state = KLP_DISABLED;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600320}
321
322static int klp_enable_func(struct klp_func *func)
323{
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600324 struct klp_ops *ops;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600325 int ret;
326
327 if (WARN_ON(!func->old_addr))
328 return -EINVAL;
329
330 if (WARN_ON(func->state != KLP_DISABLED))
331 return -EINVAL;
332
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600333 ops = klp_find_ops(func->old_addr);
334 if (!ops) {
335 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
336 if (!ops)
337 return -ENOMEM;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600338
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600339 ops->fops.func = klp_ftrace_handler;
340 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
341 FTRACE_OPS_FL_DYNAMIC |
342 FTRACE_OPS_FL_IPMODIFY;
343
344 list_add(&ops->node, &klp_ops);
345
346 INIT_LIST_HEAD(&ops->func_stack);
347 list_add_rcu(&func->stack_node, &ops->func_stack);
348
349 ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
350 if (ret) {
351 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
352 func->old_name, ret);
353 goto err;
354 }
355
356 ret = register_ftrace_function(&ops->fops);
357 if (ret) {
358 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
359 func->old_name, ret);
360 ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
361 goto err;
362 }
363
364
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600365 } else {
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600366 list_add_rcu(&func->stack_node, &ops->func_stack);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600367 }
368
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600369 func->state = KLP_ENABLED;
370
Josh Poimboeufdbed7dd2015-01-20 16:07:55 -0600371 return 0;
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600372
373err:
374 list_del_rcu(&func->stack_node);
375 list_del(&ops->node);
376 kfree(ops);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600377 return ret;
378}
379
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600380static void klp_disable_object(struct klp_object *obj)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600381{
382 struct klp_func *func;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600383
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200384 klp_for_each_func(obj, func)
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600385 if (func->state == KLP_ENABLED)
386 klp_disable_func(func);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600387
388 obj->state = KLP_DISABLED;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600389}
390
391static int klp_enable_object(struct klp_object *obj)
392{
393 struct klp_func *func;
394 int ret;
395
396 if (WARN_ON(obj->state != KLP_DISABLED))
397 return -EINVAL;
398
399 if (WARN_ON(!klp_is_object_loaded(obj)))
400 return -EINVAL;
401
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200402 klp_for_each_func(obj, func) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600403 ret = klp_enable_func(func);
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600404 if (ret) {
405 klp_disable_object(obj);
406 return ret;
407 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600408 }
409 obj->state = KLP_ENABLED;
410
411 return 0;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600412}
413
414static int __klp_disable_patch(struct klp_patch *patch)
415{
416 struct klp_object *obj;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600417
Josh Poimboeuf83a90bb2015-01-20 09:26:18 -0600418 /* enforce stacking: only the last enabled patch can be disabled */
419 if (!list_is_last(&patch->list, &klp_patches) &&
420 list_next_entry(patch, list)->state == KLP_ENABLED)
421 return -EBUSY;
422
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600423 pr_notice("disabling patch '%s'\n", patch->mod->name);
424
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200425 klp_for_each_object(patch, obj) {
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600426 if (obj->state == KLP_ENABLED)
427 klp_disable_object(obj);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600428 }
429
430 patch->state = KLP_DISABLED;
431
432 return 0;
433}
434
435/**
436 * klp_disable_patch() - disables a registered patch
437 * @patch: The registered, enabled patch to be disabled
438 *
439 * Unregisters the patched functions from ftrace.
440 *
441 * Return: 0 on success, otherwise error
442 */
443int klp_disable_patch(struct klp_patch *patch)
444{
445 int ret;
446
447 mutex_lock(&klp_mutex);
448
449 if (!klp_is_patch_registered(patch)) {
450 ret = -EINVAL;
451 goto err;
452 }
453
454 if (patch->state == KLP_DISABLED) {
455 ret = -EINVAL;
456 goto err;
457 }
458
459 ret = __klp_disable_patch(patch);
460
461err:
462 mutex_unlock(&klp_mutex);
463 return ret;
464}
465EXPORT_SYMBOL_GPL(klp_disable_patch);
466
467static int __klp_enable_patch(struct klp_patch *patch)
468{
469 struct klp_object *obj;
470 int ret;
471
472 if (WARN_ON(patch->state != KLP_DISABLED))
473 return -EINVAL;
474
Josh Poimboeuf83a90bb2015-01-20 09:26:18 -0600475 /* enforce stacking: only the first disabled patch can be enabled */
476 if (patch->list.prev != &klp_patches &&
477 list_prev_entry(patch, list)->state == KLP_DISABLED)
478 return -EBUSY;
479
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600480 pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
481 add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
482
483 pr_notice("enabling patch '%s'\n", patch->mod->name);
484
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200485 klp_for_each_object(patch, obj) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600486 if (!klp_is_object_loaded(obj))
487 continue;
488
489 ret = klp_enable_object(obj);
490 if (ret)
491 goto unregister;
492 }
493
494 patch->state = KLP_ENABLED;
495
496 return 0;
497
498unregister:
499 WARN_ON(__klp_disable_patch(patch));
500 return ret;
501}
502
503/**
504 * klp_enable_patch() - enables a registered patch
505 * @patch: The registered, disabled patch to be enabled
506 *
507 * Performs the needed symbol lookups and code relocations,
508 * then registers the patched functions with ftrace.
509 *
510 * Return: 0 on success, otherwise error
511 */
512int klp_enable_patch(struct klp_patch *patch)
513{
514 int ret;
515
516 mutex_lock(&klp_mutex);
517
518 if (!klp_is_patch_registered(patch)) {
519 ret = -EINVAL;
520 goto err;
521 }
522
523 ret = __klp_enable_patch(patch);
524
525err:
526 mutex_unlock(&klp_mutex);
527 return ret;
528}
529EXPORT_SYMBOL_GPL(klp_enable_patch);
530
531/*
532 * Sysfs Interface
533 *
534 * /sys/kernel/livepatch
535 * /sys/kernel/livepatch/<patch>
536 * /sys/kernel/livepatch/<patch>/enabled
537 * /sys/kernel/livepatch/<patch>/<object>
538 * /sys/kernel/livepatch/<patch>/<object>/<func>
539 */
540
541static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
542 const char *buf, size_t count)
543{
544 struct klp_patch *patch;
545 int ret;
546 unsigned long val;
547
548 ret = kstrtoul(buf, 10, &val);
549 if (ret)
550 return -EINVAL;
551
552 if (val != KLP_DISABLED && val != KLP_ENABLED)
553 return -EINVAL;
554
555 patch = container_of(kobj, struct klp_patch, kobj);
556
557 mutex_lock(&klp_mutex);
558
559 if (val == patch->state) {
560 /* already in requested state */
561 ret = -EINVAL;
562 goto err;
563 }
564
565 if (val == KLP_ENABLED) {
566 ret = __klp_enable_patch(patch);
567 if (ret)
568 goto err;
569 } else {
570 ret = __klp_disable_patch(patch);
571 if (ret)
572 goto err;
573 }
574
575 mutex_unlock(&klp_mutex);
576
577 return count;
578
579err:
580 mutex_unlock(&klp_mutex);
581 return ret;
582}
583
584static ssize_t enabled_show(struct kobject *kobj,
585 struct kobj_attribute *attr, char *buf)
586{
587 struct klp_patch *patch;
588
589 patch = container_of(kobj, struct klp_patch, kobj);
590 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
591}
592
593static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
594static struct attribute *klp_patch_attrs[] = {
595 &enabled_kobj_attr.attr,
596 NULL
597};
598
599static void klp_kobj_release_patch(struct kobject *kobj)
600{
601 /*
602 * Once we have a consistency model we'll need to module_put() the
603 * patch module here. See klp_register_patch() for more details.
604 */
605}
606
607static struct kobj_type klp_ktype_patch = {
608 .release = klp_kobj_release_patch,
609 .sysfs_ops = &kobj_sysfs_ops,
610 .default_attrs = klp_patch_attrs,
611};
612
Miroslav Benescad706d2015-05-19 12:01:18 +0200613static void klp_kobj_release_object(struct kobject *kobj)
614{
615}
616
617static struct kobj_type klp_ktype_object = {
618 .release = klp_kobj_release_object,
619 .sysfs_ops = &kobj_sysfs_ops,
620};
621
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600622static void klp_kobj_release_func(struct kobject *kobj)
623{
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600624}
625
626static struct kobj_type klp_ktype_func = {
627 .release = klp_kobj_release_func,
628 .sysfs_ops = &kobj_sysfs_ops,
629};
630
631/*
632 * Free all functions' kobjects in the array up to some limit. When limit is
633 * NULL, all kobjects are freed.
634 */
635static void klp_free_funcs_limited(struct klp_object *obj,
636 struct klp_func *limit)
637{
638 struct klp_func *func;
639
640 for (func = obj->funcs; func->old_name && func != limit; func++)
641 kobject_put(&func->kobj);
642}
643
644/* Clean up when a patched object is unloaded */
645static void klp_free_object_loaded(struct klp_object *obj)
646{
647 struct klp_func *func;
648
649 obj->mod = NULL;
650
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200651 klp_for_each_func(obj, func)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600652 func->old_addr = 0;
653}
654
655/*
656 * Free all objects' kobjects in the array up to some limit. When limit is
657 * NULL, all kobjects are freed.
658 */
659static void klp_free_objects_limited(struct klp_patch *patch,
660 struct klp_object *limit)
661{
662 struct klp_object *obj;
663
664 for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
665 klp_free_funcs_limited(obj, NULL);
Miroslav Benescad706d2015-05-19 12:01:18 +0200666 kobject_put(&obj->kobj);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600667 }
668}
669
670static void klp_free_patch(struct klp_patch *patch)
671{
672 klp_free_objects_limited(patch, NULL);
673 if (!list_empty(&patch->list))
674 list_del(&patch->list);
675 kobject_put(&patch->kobj);
676}
677
678static int klp_init_func(struct klp_object *obj, struct klp_func *func)
679{
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600680 INIT_LIST_HEAD(&func->stack_node);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600681 func->state = KLP_DISABLED;
682
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600683 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
Miroslav Benescad706d2015-05-19 12:01:18 +0200684 &obj->kobj, "%s", func->old_name);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600685}
686
687/* parts of the initialization that is done only when the object is loaded */
688static int klp_init_object_loaded(struct klp_patch *patch,
689 struct klp_object *obj)
690{
691 struct klp_func *func;
692 int ret;
693
694 if (obj->relocs) {
695 ret = klp_write_object_relocations(patch->mod, obj);
696 if (ret)
697 return ret;
698 }
699
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200700 klp_for_each_func(obj, func) {
Chris J Argesb2b018e2015-12-01 20:40:54 -0600701 ret = klp_find_object_symbol(obj->name, func->old_name,
702 func->old_sympos,
703 &func->old_addr);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600704 if (ret)
705 return ret;
706 }
707
708 return 0;
709}
710
711static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
712{
713 struct klp_func *func;
714 int ret;
715 const char *name;
716
717 if (!obj->funcs)
718 return -EINVAL;
719
720 obj->state = KLP_DISABLED;
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100721 obj->mod = NULL;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600722
723 klp_find_object_module(obj);
724
725 name = klp_is_module(obj) ? obj->name : "vmlinux";
Miroslav Benescad706d2015-05-19 12:01:18 +0200726 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
727 &patch->kobj, "%s", name);
728 if (ret)
729 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600730
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200731 klp_for_each_func(obj, func) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600732 ret = klp_init_func(obj, func);
733 if (ret)
734 goto free;
735 }
736
737 if (klp_is_object_loaded(obj)) {
738 ret = klp_init_object_loaded(patch, obj);
739 if (ret)
740 goto free;
741 }
742
743 return 0;
744
745free:
746 klp_free_funcs_limited(obj, func);
Miroslav Benescad706d2015-05-19 12:01:18 +0200747 kobject_put(&obj->kobj);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600748 return ret;
749}
750
751static int klp_init_patch(struct klp_patch *patch)
752{
753 struct klp_object *obj;
754 int ret;
755
756 if (!patch->objs)
757 return -EINVAL;
758
759 mutex_lock(&klp_mutex);
760
761 patch->state = KLP_DISABLED;
762
763 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
Jiri Kosinae0b561e2015-02-15 10:03:20 +0100764 klp_root_kobj, "%s", patch->mod->name);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600765 if (ret)
766 goto unlock;
767
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200768 klp_for_each_object(patch, obj) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600769 ret = klp_init_object(patch, obj);
770 if (ret)
771 goto free;
772 }
773
Josh Poimboeuf99590ba2015-01-09 14:03:04 -0600774 list_add_tail(&patch->list, &klp_patches);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600775
776 mutex_unlock(&klp_mutex);
777
778 return 0;
779
780free:
781 klp_free_objects_limited(patch, obj);
782 kobject_put(&patch->kobj);
783unlock:
784 mutex_unlock(&klp_mutex);
785 return ret;
786}
787
788/**
789 * klp_unregister_patch() - unregisters a patch
790 * @patch: Disabled patch to be unregistered
791 *
792 * Frees the data structures and removes the sysfs interface.
793 *
794 * Return: 0 on success, otherwise error
795 */
796int klp_unregister_patch(struct klp_patch *patch)
797{
798 int ret = 0;
799
800 mutex_lock(&klp_mutex);
801
802 if (!klp_is_patch_registered(patch)) {
803 ret = -EINVAL;
804 goto out;
805 }
806
807 if (patch->state == KLP_ENABLED) {
808 ret = -EBUSY;
809 goto out;
810 }
811
812 klp_free_patch(patch);
813
814out:
815 mutex_unlock(&klp_mutex);
816 return ret;
817}
818EXPORT_SYMBOL_GPL(klp_unregister_patch);
819
820/**
821 * klp_register_patch() - registers a patch
822 * @patch: Patch to be registered
823 *
824 * Initializes the data structure associated with the patch and
825 * creates the sysfs interface.
826 *
827 * Return: 0 on success, otherwise error
828 */
829int klp_register_patch(struct klp_patch *patch)
830{
831 int ret;
832
833 if (!klp_initialized())
834 return -ENODEV;
835
836 if (!patch || !patch->mod)
837 return -EINVAL;
838
839 /*
840 * A reference is taken on the patch module to prevent it from being
841 * unloaded. Right now, we don't allow patch modules to unload since
842 * there is currently no method to determine if a thread is still
843 * running in the patched code contained in the patch module once
844 * the ftrace registration is successful.
845 */
846 if (!try_module_get(patch->mod))
847 return -ENODEV;
848
849 ret = klp_init_patch(patch);
850 if (ret)
851 module_put(patch->mod);
852
853 return ret;
854}
855EXPORT_SYMBOL_GPL(klp_register_patch);
856
Minfei Huang36e505c2015-05-15 10:22:48 +0800857static int klp_module_notify_coming(struct klp_patch *patch,
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600858 struct klp_object *obj)
859{
860 struct module *pmod = patch->mod;
861 struct module *mod = obj->mod;
862 int ret;
863
864 ret = klp_init_object_loaded(patch, obj);
Minfei Huang36e505c2015-05-15 10:22:48 +0800865 if (ret) {
866 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
867 pmod->name, mod->name, ret);
868 return ret;
869 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600870
871 if (patch->state == KLP_DISABLED)
Minfei Huang36e505c2015-05-15 10:22:48 +0800872 return 0;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600873
874 pr_notice("applying patch '%s' to loading module '%s'\n",
875 pmod->name, mod->name);
876
877 ret = klp_enable_object(obj);
Minfei Huang36e505c2015-05-15 10:22:48 +0800878 if (ret)
879 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
880 pmod->name, mod->name, ret);
881 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600882}
883
884static void klp_module_notify_going(struct klp_patch *patch,
885 struct klp_object *obj)
886{
887 struct module *pmod = patch->mod;
888 struct module *mod = obj->mod;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600889
890 if (patch->state == KLP_DISABLED)
891 goto disabled;
892
893 pr_notice("reverting patch '%s' on unloading module '%s'\n",
894 pmod->name, mod->name);
895
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600896 klp_disable_object(obj);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600897
898disabled:
899 klp_free_object_loaded(obj);
900}
901
902static int klp_module_notify(struct notifier_block *nb, unsigned long action,
903 void *data)
904{
Minfei Huang36e505c2015-05-15 10:22:48 +0800905 int ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600906 struct module *mod = data;
907 struct klp_patch *patch;
908 struct klp_object *obj;
909
910 if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
911 return 0;
912
913 mutex_lock(&klp_mutex);
914
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100915 /*
916 * Each module has to know that the notifier has been called.
917 * We never know what module will get patched by a new patch.
918 */
919 if (action == MODULE_STATE_COMING)
920 mod->klp_alive = true;
921 else /* MODULE_STATE_GOING */
922 mod->klp_alive = false;
923
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600924 list_for_each_entry(patch, &klp_patches, list) {
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200925 klp_for_each_object(patch, obj) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600926 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
927 continue;
928
929 if (action == MODULE_STATE_COMING) {
930 obj->mod = mod;
Minfei Huang36e505c2015-05-15 10:22:48 +0800931 ret = klp_module_notify_coming(patch, obj);
932 if (ret) {
933 obj->mod = NULL;
934 pr_warn("patch '%s' is in an inconsistent state!\n",
935 patch->mod->name);
936 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600937 } else /* MODULE_STATE_GOING */
938 klp_module_notify_going(patch, obj);
939
940 break;
941 }
942 }
943
944 mutex_unlock(&klp_mutex);
945
946 return 0;
947}
948
949static struct notifier_block klp_module_nb = {
950 .notifier_call = klp_module_notify,
951 .priority = INT_MIN+1, /* called late but before ftrace notifier */
952};
953
Minfei Huang26029d82015-05-22 22:26:29 +0800954static int __init klp_init(void)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600955{
956 int ret;
957
Jiri Kosinab9dfe0b2015-01-09 10:53:21 +0100958 ret = klp_check_compiler_support();
959 if (ret) {
960 pr_info("Your compiler is too old; turning off.\n");
961 return -EINVAL;
962 }
963
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600964 ret = register_module_notifier(&klp_module_nb);
965 if (ret)
966 return ret;
967
968 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
969 if (!klp_root_kobj) {
970 ret = -ENOMEM;
971 goto unregister;
972 }
973
974 return 0;
975
976unregister:
977 unregister_module_notifier(&klp_module_nb);
978 return ret;
979}
980
981module_init(klp_init);