Thomas Gleixner | 1ccea77 | 2019-05-19 15:51:43 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 2 | /* |
| 3 | * transition.c - Kernel Live Patching transition functions |
| 4 | * |
| 5 | * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com> |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 9 | |
| 10 | #include <linux/cpu.h> |
| 11 | #include <linux/stacktrace.h> |
Jiri Kosina | 1051742 | 2017-03-08 14:27:05 +0100 | [diff] [blame] | 12 | #include "core.h" |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 13 | #include "patch.h" |
| 14 | #include "transition.h" |
| 15 | #include "../sched/sched.h" |
| 16 | |
| 17 | #define MAX_STACK_ENTRIES 100 |
| 18 | #define STACK_ERR_BUF_SIZE 128 |
| 19 | |
Miroslav Benes | cba82de | 2019-01-15 17:45:06 +0100 | [diff] [blame] | 20 | #define SIGNALS_TIMEOUT 15 |
| 21 | |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 22 | struct klp_patch *klp_transition_patch; |
| 23 | |
| 24 | static int klp_target_state = KLP_UNDEFINED; |
| 25 | |
Miroslav Benes | cba82de | 2019-01-15 17:45:06 +0100 | [diff] [blame] | 26 | static unsigned int klp_signals_cnt; |
| 27 | |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 28 | /* |
| 29 | * This work can be performed periodically to finish patching or unpatching any |
| 30 | * "straggler" tasks which failed to transition in the first attempt. |
| 31 | */ |
| 32 | static void klp_transition_work_fn(struct work_struct *work) |
| 33 | { |
| 34 | mutex_lock(&klp_mutex); |
| 35 | |
| 36 | if (klp_transition_patch) |
| 37 | klp_try_complete_transition(); |
| 38 | |
| 39 | mutex_unlock(&klp_mutex); |
| 40 | } |
| 41 | static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); |
| 42 | |
| 43 | /* |
Petr Mladek | 842c088 | 2017-06-14 10:54:52 +0200 | [diff] [blame] | 44 | * This function is just a stub to implement a hard force |
Paul E. McKenney | 6932689 | 2018-11-07 14:16:57 -0800 | [diff] [blame] | 45 | * of synchronize_rcu(). This requires synchronizing |
Petr Mladek | 842c088 | 2017-06-14 10:54:52 +0200 | [diff] [blame] | 46 | * tasks even in userspace and idle. |
| 47 | */ |
| 48 | static void klp_sync(struct work_struct *work) |
| 49 | { |
| 50 | } |
| 51 | |
| 52 | /* |
| 53 | * We allow to patch also functions where RCU is not watching, |
| 54 | * e.g. before user_exit(). We can not rely on the RCU infrastructure |
| 55 | * to do the synchronization. Instead hard force the sched synchronization. |
| 56 | * |
| 57 | * This approach allows to use RCU functions for manipulating func_stack |
| 58 | * safely. |
| 59 | */ |
| 60 | static void klp_synchronize_transition(void) |
| 61 | { |
| 62 | schedule_on_each_cpu(klp_sync); |
| 63 | } |
| 64 | |
| 65 | /* |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 66 | * The transition to the target patch state is complete. Clean up the data |
| 67 | * structures. |
| 68 | */ |
| 69 | static void klp_complete_transition(void) |
| 70 | { |
| 71 | struct klp_object *obj; |
| 72 | struct klp_func *func; |
| 73 | struct task_struct *g, *task; |
| 74 | unsigned int cpu; |
| 75 | |
Joe Lawrence | af02679 | 2017-10-13 15:08:43 -0400 | [diff] [blame] | 76 | pr_debug("'%s': completing %s transition\n", |
| 77 | klp_transition_patch->mod->name, |
| 78 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); |
| 79 | |
Petr Mladek | d697bad | 2019-01-09 13:43:26 +0100 | [diff] [blame] | 80 | if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) { |
Petr Mladek | 7e35e4e | 2019-10-30 16:43:09 +0100 | [diff] [blame^] | 81 | klp_unpatch_replaced_patches(klp_transition_patch); |
Petr Mladek | d697bad | 2019-01-09 13:43:26 +0100 | [diff] [blame] | 82 | klp_discard_nops(klp_transition_patch); |
| 83 | } |
Jason Baron | e1452b6 | 2019-01-09 13:43:25 +0100 | [diff] [blame] | 84 | |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 85 | if (klp_target_state == KLP_UNPATCHED) { |
| 86 | /* |
| 87 | * All tasks have transitioned to KLP_UNPATCHED so we can now |
| 88 | * remove the new functions from the func_stack. |
| 89 | */ |
| 90 | klp_unpatch_objects(klp_transition_patch); |
| 91 | |
| 92 | /* |
| 93 | * Make sure klp_ftrace_handler() can no longer see functions |
| 94 | * from this patch on the ops->func_stack. Otherwise, after |
| 95 | * func->transition gets cleared, the handler may choose a |
| 96 | * removed function. |
| 97 | */ |
Petr Mladek | 842c088 | 2017-06-14 10:54:52 +0200 | [diff] [blame] | 98 | klp_synchronize_transition(); |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 99 | } |
| 100 | |
Miroslav Benes | d0807da | 2018-01-10 11:01:28 +0100 | [diff] [blame] | 101 | klp_for_each_object(klp_transition_patch, obj) |
| 102 | klp_for_each_func(obj, func) |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 103 | func->transition = false; |
Josh Poimboeuf | 3ec2477 | 2017-03-06 11:20:29 -0600 | [diff] [blame] | 104 | |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 105 | /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ |
| 106 | if (klp_target_state == KLP_PATCHED) |
Petr Mladek | 842c088 | 2017-06-14 10:54:52 +0200 | [diff] [blame] | 107 | klp_synchronize_transition(); |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 108 | |
| 109 | read_lock(&tasklist_lock); |
| 110 | for_each_process_thread(g, task) { |
| 111 | WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); |
| 112 | task->patch_state = KLP_UNDEFINED; |
| 113 | } |
| 114 | read_unlock(&tasklist_lock); |
| 115 | |
| 116 | for_each_possible_cpu(cpu) { |
| 117 | task = idle_task(cpu); |
| 118 | WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); |
| 119 | task->patch_state = KLP_UNDEFINED; |
| 120 | } |
| 121 | |
Joe Lawrence | 93862e3 | 2017-10-13 15:08:41 -0400 | [diff] [blame] | 122 | klp_for_each_object(klp_transition_patch, obj) { |
| 123 | if (!klp_is_object_loaded(obj)) |
| 124 | continue; |
| 125 | if (klp_target_state == KLP_PATCHED) |
| 126 | klp_post_patch_callback(obj); |
| 127 | else if (klp_target_state == KLP_UNPATCHED) |
| 128 | klp_post_unpatch_callback(obj); |
| 129 | } |
| 130 | |
Joe Lawrence | 6116c30 | 2017-10-13 15:08:42 -0400 | [diff] [blame] | 131 | pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name, |
| 132 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); |
| 133 | |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 134 | klp_target_state = KLP_UNDEFINED; |
| 135 | klp_transition_patch = NULL; |
| 136 | } |
| 137 | |
| 138 | /* |
| 139 | * This is called in the error path, to cancel a transition before it has |
| 140 | * started, i.e. klp_init_transition() has been called but |
| 141 | * klp_start_transition() hasn't. If the transition *has* been started, |
| 142 | * klp_reverse_transition() should be used instead. |
| 143 | */ |
| 144 | void klp_cancel_transition(void) |
| 145 | { |
Josh Poimboeuf | 3ec2477 | 2017-03-06 11:20:29 -0600 | [diff] [blame] | 146 | if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED)) |
| 147 | return; |
| 148 | |
Joe Lawrence | af02679 | 2017-10-13 15:08:43 -0400 | [diff] [blame] | 149 | pr_debug("'%s': canceling patching transition, going to unpatch\n", |
| 150 | klp_transition_patch->mod->name); |
| 151 | |
Josh Poimboeuf | 3ec2477 | 2017-03-06 11:20:29 -0600 | [diff] [blame] | 152 | klp_target_state = KLP_UNPATCHED; |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 153 | klp_complete_transition(); |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 154 | } |
| 155 | |
| 156 | /* |
| 157 | * Switch the patched state of the task to the set of functions in the target |
| 158 | * patch state. |
| 159 | * |
| 160 | * NOTE: If task is not 'current', the caller must ensure the task is inactive. |
| 161 | * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value. |
| 162 | */ |
| 163 | void klp_update_patch_state(struct task_struct *task) |
| 164 | { |
Petr Mladek | 842c088 | 2017-06-14 10:54:52 +0200 | [diff] [blame] | 165 | /* |
Paul E. McKenney | 6932689 | 2018-11-07 14:16:57 -0800 | [diff] [blame] | 166 | * A variant of synchronize_rcu() is used to allow patching functions |
Petr Mladek | 842c088 | 2017-06-14 10:54:52 +0200 | [diff] [blame] | 167 | * where RCU is not watching, see klp_synchronize_transition(). |
| 168 | */ |
| 169 | preempt_disable_notrace(); |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 170 | |
| 171 | /* |
| 172 | * This test_and_clear_tsk_thread_flag() call also serves as a read |
| 173 | * barrier (smp_rmb) for two cases: |
| 174 | * |
| 175 | * 1) Enforce the order of the TIF_PATCH_PENDING read and the |
| 176 | * klp_target_state read. The corresponding write barrier is in |
| 177 | * klp_init_transition(). |
| 178 | * |
| 179 | * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read |
| 180 | * of func->transition, if klp_ftrace_handler() is called later on |
| 181 | * the same CPU. See __klp_disable_patch(). |
| 182 | */ |
| 183 | if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) |
| 184 | task->patch_state = READ_ONCE(klp_target_state); |
| 185 | |
Petr Mladek | 842c088 | 2017-06-14 10:54:52 +0200 | [diff] [blame] | 186 | preempt_enable_notrace(); |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 187 | } |
| 188 | |
| 189 | /* |
| 190 | * Determine whether the given stack trace includes any references to a |
| 191 | * to-be-patched or to-be-unpatched function. |
| 192 | */ |
Thomas Gleixner | 25e39e3 | 2019-04-25 11:45:18 +0200 | [diff] [blame] | 193 | static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, |
| 194 | unsigned int nr_entries) |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 195 | { |
| 196 | unsigned long func_addr, func_size, address; |
| 197 | struct klp_ops *ops; |
| 198 | int i; |
| 199 | |
Thomas Gleixner | 25e39e3 | 2019-04-25 11:45:18 +0200 | [diff] [blame] | 200 | for (i = 0; i < nr_entries; i++) { |
| 201 | address = entries[i]; |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 202 | |
| 203 | if (klp_target_state == KLP_UNPATCHED) { |
| 204 | /* |
| 205 | * Check for the to-be-unpatched function |
| 206 | * (the func itself). |
| 207 | */ |
| 208 | func_addr = (unsigned long)func->new_func; |
| 209 | func_size = func->new_size; |
| 210 | } else { |
| 211 | /* |
| 212 | * Check for the to-be-patched function |
| 213 | * (the previous func). |
| 214 | */ |
Petr Mladek | 1951491 | 2019-01-09 13:43:19 +0100 | [diff] [blame] | 215 | ops = klp_find_ops(func->old_func); |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 216 | |
| 217 | if (list_is_singular(&ops->func_stack)) { |
| 218 | /* original function */ |
Petr Mladek | 1951491 | 2019-01-09 13:43:19 +0100 | [diff] [blame] | 219 | func_addr = (unsigned long)func->old_func; |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 220 | func_size = func->old_size; |
| 221 | } else { |
| 222 | /* previously patched function */ |
| 223 | struct klp_func *prev; |
| 224 | |
| 225 | prev = list_next_entry(func, stack_node); |
| 226 | func_addr = (unsigned long)prev->new_func; |
| 227 | func_size = prev->new_size; |
| 228 | } |
| 229 | } |
| 230 | |
| 231 | if (address >= func_addr && address < func_addr + func_size) |
| 232 | return -EAGAIN; |
| 233 | } |
| 234 | |
| 235 | return 0; |
| 236 | } |
| 237 | |
| 238 | /* |
| 239 | * Determine whether it's safe to transition the task to the target patch state |
| 240 | * by looking for any to-be-patched or to-be-unpatched functions on its stack. |
| 241 | */ |
| 242 | static int klp_check_stack(struct task_struct *task, char *err_buf) |
| 243 | { |
| 244 | static unsigned long entries[MAX_STACK_ENTRIES]; |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 245 | struct klp_object *obj; |
| 246 | struct klp_func *func; |
Thomas Gleixner | 25e39e3 | 2019-04-25 11:45:18 +0200 | [diff] [blame] | 247 | int ret, nr_entries; |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 248 | |
Thomas Gleixner | 25e39e3 | 2019-04-25 11:45:18 +0200 | [diff] [blame] | 249 | ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries)); |
Thomas Gleixner | 25e39e3 | 2019-04-25 11:45:18 +0200 | [diff] [blame] | 250 | if (ret < 0) { |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 251 | snprintf(err_buf, STACK_ERR_BUF_SIZE, |
| 252 | "%s: %s:%d has an unreliable stack\n", |
| 253 | __func__, task->comm, task->pid); |
| 254 | return ret; |
| 255 | } |
Thomas Gleixner | 25e39e3 | 2019-04-25 11:45:18 +0200 | [diff] [blame] | 256 | nr_entries = ret; |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 257 | |
| 258 | klp_for_each_object(klp_transition_patch, obj) { |
| 259 | if (!obj->patched) |
| 260 | continue; |
| 261 | klp_for_each_func(obj, func) { |
Thomas Gleixner | 25e39e3 | 2019-04-25 11:45:18 +0200 | [diff] [blame] | 262 | ret = klp_check_stack_func(func, entries, nr_entries); |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 263 | if (ret) { |
| 264 | snprintf(err_buf, STACK_ERR_BUF_SIZE, |
| 265 | "%s: %s:%d is sleeping on function %s\n", |
| 266 | __func__, task->comm, task->pid, |
| 267 | func->old_name); |
| 268 | return ret; |
| 269 | } |
| 270 | } |
| 271 | } |
| 272 | |
| 273 | return 0; |
| 274 | } |
| 275 | |
| 276 | /* |
| 277 | * Try to safely switch a task to the target patch state. If it's currently |
| 278 | * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or |
| 279 | * if the stack is unreliable, return false. |
| 280 | */ |
| 281 | static bool klp_try_switch_task(struct task_struct *task) |
| 282 | { |
Petr Mladek | f36e664 | 2019-05-31 09:41:47 +0200 | [diff] [blame] | 283 | static char err_buf[STACK_ERR_BUF_SIZE]; |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 284 | struct rq *rq; |
| 285 | struct rq_flags flags; |
| 286 | int ret; |
| 287 | bool success = false; |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 288 | |
| 289 | err_buf[0] = '\0'; |
| 290 | |
| 291 | /* check if this task has already switched over */ |
| 292 | if (task->patch_state == klp_target_state) |
| 293 | return true; |
| 294 | |
| 295 | /* |
Miroslav Benes | 67059d6 | 2019-06-11 16:13:19 +0200 | [diff] [blame] | 296 | * For arches which don't have reliable stack traces, we have to rely |
| 297 | * on other methods (e.g., switching tasks at kernel exit). |
| 298 | */ |
| 299 | if (!klp_have_reliable_stack()) |
| 300 | return false; |
| 301 | |
| 302 | /* |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 303 | * Now try to check the stack for any to-be-patched or to-be-unpatched |
| 304 | * functions. If all goes well, switch the task to the target patch |
| 305 | * state. |
| 306 | */ |
| 307 | rq = task_rq_lock(task, &flags); |
| 308 | |
| 309 | if (task_running(rq, task) && task != current) { |
| 310 | snprintf(err_buf, STACK_ERR_BUF_SIZE, |
| 311 | "%s: %s:%d is running\n", __func__, task->comm, |
| 312 | task->pid); |
| 313 | goto done; |
| 314 | } |
| 315 | |
| 316 | ret = klp_check_stack(task, err_buf); |
| 317 | if (ret) |
| 318 | goto done; |
| 319 | |
| 320 | success = true; |
| 321 | |
| 322 | clear_tsk_thread_flag(task, TIF_PATCH_PENDING); |
| 323 | task->patch_state = klp_target_state; |
| 324 | |
| 325 | done: |
| 326 | task_rq_unlock(rq, task, &flags); |
| 327 | |
| 328 | /* |
| 329 | * Due to console deadlock issues, pr_debug() can't be used while |
| 330 | * holding the task rq lock. Instead we have to use a temporary buffer |
| 331 | * and print the debug message after releasing the lock. |
| 332 | */ |
| 333 | if (err_buf[0] != '\0') |
| 334 | pr_debug("%s", err_buf); |
| 335 | |
| 336 | return success; |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 337 | } |
| 338 | |
| 339 | /* |
Miroslav Benes | 0b3d527 | 2019-01-15 17:45:07 +0100 | [diff] [blame] | 340 | * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set. |
| 341 | * Kthreads with TIF_PATCH_PENDING set are woken up. |
| 342 | */ |
| 343 | static void klp_send_signals(void) |
| 344 | { |
| 345 | struct task_struct *g, *task; |
| 346 | |
| 347 | if (klp_signals_cnt == SIGNALS_TIMEOUT) |
| 348 | pr_notice("signaling remaining tasks\n"); |
| 349 | |
| 350 | read_lock(&tasklist_lock); |
| 351 | for_each_process_thread(g, task) { |
| 352 | if (!klp_patch_pending(task)) |
| 353 | continue; |
| 354 | |
| 355 | /* |
| 356 | * There is a small race here. We could see TIF_PATCH_PENDING |
| 357 | * set and decide to wake up a kthread or send a fake signal. |
| 358 | * Meanwhile the task could migrate itself and the action |
| 359 | * would be meaningless. It is not serious though. |
| 360 | */ |
| 361 | if (task->flags & PF_KTHREAD) { |
| 362 | /* |
| 363 | * Wake up a kthread which sleeps interruptedly and |
| 364 | * still has not been migrated. |
| 365 | */ |
| 366 | wake_up_state(task, TASK_INTERRUPTIBLE); |
| 367 | } else { |
| 368 | /* |
| 369 | * Send fake signal to all non-kthread tasks which are |
| 370 | * still not migrated. |
| 371 | */ |
| 372 | spin_lock_irq(&task->sighand->siglock); |
| 373 | signal_wake_up(task, 0); |
| 374 | spin_unlock_irq(&task->sighand->siglock); |
| 375 | } |
| 376 | } |
| 377 | read_unlock(&tasklist_lock); |
| 378 | } |
| 379 | |
| 380 | /* |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 381 | * Try to switch all remaining tasks to the target patch state by walking the |
| 382 | * stacks of sleeping tasks and looking for any to-be-patched or |
| 383 | * to-be-unpatched functions. If such functions are found, the task can't be |
| 384 | * switched yet. |
| 385 | * |
| 386 | * If any tasks are still stuck in the initial patch state, schedule a retry. |
| 387 | */ |
| 388 | void klp_try_complete_transition(void) |
| 389 | { |
| 390 | unsigned int cpu; |
| 391 | struct task_struct *g, *task; |
Petr Mladek | 958ef1e | 2019-01-09 13:43:23 +0100 | [diff] [blame] | 392 | struct klp_patch *patch; |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 393 | bool complete = true; |
| 394 | |
| 395 | WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); |
| 396 | |
| 397 | /* |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 398 | * Try to switch the tasks to the target patch state by walking their |
| 399 | * stacks and looking for any to-be-patched or to-be-unpatched |
| 400 | * functions. If such functions are found on a stack, or if the stack |
| 401 | * is deemed unreliable, the task can't be switched yet. |
| 402 | * |
| 403 | * Usually this will transition most (or all) of the tasks on a system |
| 404 | * unless the patch includes changes to a very common function. |
| 405 | */ |
| 406 | read_lock(&tasklist_lock); |
| 407 | for_each_process_thread(g, task) |
| 408 | if (!klp_try_switch_task(task)) |
| 409 | complete = false; |
| 410 | read_unlock(&tasklist_lock); |
| 411 | |
| 412 | /* |
| 413 | * Ditto for the idle "swapper" tasks. |
| 414 | */ |
| 415 | get_online_cpus(); |
| 416 | for_each_possible_cpu(cpu) { |
| 417 | task = idle_task(cpu); |
| 418 | if (cpu_online(cpu)) { |
| 419 | if (!klp_try_switch_task(task)) |
| 420 | complete = false; |
| 421 | } else if (task->patch_state != klp_target_state) { |
| 422 | /* offline idle tasks can be switched immediately */ |
| 423 | clear_tsk_thread_flag(task, TIF_PATCH_PENDING); |
| 424 | task->patch_state = klp_target_state; |
| 425 | } |
| 426 | } |
| 427 | put_online_cpus(); |
| 428 | |
| 429 | if (!complete) { |
Miroslav Benes | cba82de | 2019-01-15 17:45:06 +0100 | [diff] [blame] | 430 | if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT)) |
| 431 | klp_send_signals(); |
| 432 | klp_signals_cnt++; |
| 433 | |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 434 | /* |
| 435 | * Some tasks weren't able to be switched over. Try again |
| 436 | * later and/or wait for other methods like kernel exit |
| 437 | * switching. |
| 438 | */ |
| 439 | schedule_delayed_work(&klp_transition_work, |
| 440 | round_jiffies_relative(HZ)); |
| 441 | return; |
| 442 | } |
| 443 | |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 444 | /* we're done, now cleanup the data structures */ |
Petr Mladek | 958ef1e | 2019-01-09 13:43:23 +0100 | [diff] [blame] | 445 | patch = klp_transition_patch; |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 446 | klp_complete_transition(); |
Petr Mladek | 958ef1e | 2019-01-09 13:43:23 +0100 | [diff] [blame] | 447 | |
| 448 | /* |
Petr Mladek | 7e35e4e | 2019-10-30 16:43:09 +0100 | [diff] [blame^] | 449 | * It would make more sense to free the unused patches in |
Petr Mladek | 958ef1e | 2019-01-09 13:43:23 +0100 | [diff] [blame] | 450 | * klp_complete_transition() but it is called also |
| 451 | * from klp_cancel_transition(). |
| 452 | */ |
Petr Mladek | 7e35e4e | 2019-10-30 16:43:09 +0100 | [diff] [blame^] | 453 | if (!patch->enabled) |
| 454 | klp_free_patch_async(patch); |
| 455 | else if (patch->replace) |
| 456 | klp_free_replaced_patches_async(patch); |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 457 | } |
| 458 | |
| 459 | /* |
| 460 | * Start the transition to the specified target patch state so tasks can begin |
| 461 | * switching to it. |
| 462 | */ |
| 463 | void klp_start_transition(void) |
| 464 | { |
| 465 | struct task_struct *g, *task; |
| 466 | unsigned int cpu; |
| 467 | |
| 468 | WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); |
| 469 | |
Joe Lawrence | af02679 | 2017-10-13 15:08:43 -0400 | [diff] [blame] | 470 | pr_notice("'%s': starting %s transition\n", |
| 471 | klp_transition_patch->mod->name, |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 472 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); |
| 473 | |
| 474 | /* |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 475 | * Mark all normal tasks as needing a patch state update. They'll |
| 476 | * switch either in klp_try_complete_transition() or as they exit the |
| 477 | * kernel. |
| 478 | */ |
| 479 | read_lock(&tasklist_lock); |
| 480 | for_each_process_thread(g, task) |
| 481 | if (task->patch_state != klp_target_state) |
| 482 | set_tsk_thread_flag(task, TIF_PATCH_PENDING); |
| 483 | read_unlock(&tasklist_lock); |
| 484 | |
| 485 | /* |
| 486 | * Mark all idle tasks as needing a patch state update. They'll switch |
| 487 | * either in klp_try_complete_transition() or at the idle loop switch |
| 488 | * point. |
| 489 | */ |
| 490 | for_each_possible_cpu(cpu) { |
| 491 | task = idle_task(cpu); |
| 492 | if (task->patch_state != klp_target_state) |
| 493 | set_tsk_thread_flag(task, TIF_PATCH_PENDING); |
| 494 | } |
Miroslav Benes | cba82de | 2019-01-15 17:45:06 +0100 | [diff] [blame] | 495 | |
| 496 | klp_signals_cnt = 0; |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 497 | } |
| 498 | |
| 499 | /* |
| 500 | * Initialize the global target patch state and all tasks to the initial patch |
| 501 | * state, and initialize all function transition states to true in preparation |
| 502 | * for patching or unpatching. |
| 503 | */ |
| 504 | void klp_init_transition(struct klp_patch *patch, int state) |
| 505 | { |
| 506 | struct task_struct *g, *task; |
| 507 | unsigned int cpu; |
| 508 | struct klp_object *obj; |
| 509 | struct klp_func *func; |
| 510 | int initial_state = !state; |
| 511 | |
| 512 | WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED); |
| 513 | |
| 514 | klp_transition_patch = patch; |
| 515 | |
| 516 | /* |
| 517 | * Set the global target patch state which tasks will switch to. This |
| 518 | * has no effect until the TIF_PATCH_PENDING flags get set later. |
| 519 | */ |
| 520 | klp_target_state = state; |
| 521 | |
Joe Lawrence | af02679 | 2017-10-13 15:08:43 -0400 | [diff] [blame] | 522 | pr_debug("'%s': initializing %s transition\n", patch->mod->name, |
| 523 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); |
| 524 | |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 525 | /* |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 526 | * Initialize all tasks to the initial patch state to prepare them for |
| 527 | * switching to the target state. |
| 528 | */ |
| 529 | read_lock(&tasklist_lock); |
| 530 | for_each_process_thread(g, task) { |
| 531 | WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); |
| 532 | task->patch_state = initial_state; |
| 533 | } |
| 534 | read_unlock(&tasklist_lock); |
| 535 | |
| 536 | /* |
| 537 | * Ditto for the idle "swapper" tasks. |
| 538 | */ |
| 539 | for_each_possible_cpu(cpu) { |
| 540 | task = idle_task(cpu); |
| 541 | WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); |
| 542 | task->patch_state = initial_state; |
| 543 | } |
| 544 | |
| 545 | /* |
| 546 | * Enforce the order of the task->patch_state initializations and the |
| 547 | * func->transition updates to ensure that klp_ftrace_handler() doesn't |
| 548 | * see a func in transition with a task->patch_state of KLP_UNDEFINED. |
| 549 | * |
| 550 | * Also enforce the order of the klp_target_state write and future |
| 551 | * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't |
| 552 | * set a task->patch_state to KLP_UNDEFINED. |
| 553 | */ |
| 554 | smp_wmb(); |
| 555 | |
| 556 | /* |
| 557 | * Set the func transition states so klp_ftrace_handler() will know to |
| 558 | * switch to the transition logic. |
| 559 | * |
| 560 | * When patching, the funcs aren't yet in the func_stack and will be |
| 561 | * made visible to the ftrace handler shortly by the calls to |
| 562 | * klp_patch_object(). |
| 563 | * |
| 564 | * When unpatching, the funcs are already in the func_stack and so are |
| 565 | * already visible to the ftrace handler. |
| 566 | */ |
| 567 | klp_for_each_object(patch, obj) |
| 568 | klp_for_each_func(obj, func) |
| 569 | func->transition = true; |
| 570 | } |
| 571 | |
| 572 | /* |
| 573 | * This function can be called in the middle of an existing transition to |
| 574 | * reverse the direction of the target patch state. This can be done to |
| 575 | * effectively cancel an existing enable or disable operation if there are any |
| 576 | * tasks which are stuck in the initial patch state. |
| 577 | */ |
| 578 | void klp_reverse_transition(void) |
| 579 | { |
| 580 | unsigned int cpu; |
| 581 | struct task_struct *g, *task; |
| 582 | |
Joe Lawrence | af02679 | 2017-10-13 15:08:43 -0400 | [diff] [blame] | 583 | pr_debug("'%s': reversing transition from %s\n", |
| 584 | klp_transition_patch->mod->name, |
| 585 | klp_target_state == KLP_PATCHED ? "patching to unpatching" : |
| 586 | "unpatching to patching"); |
| 587 | |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 588 | klp_transition_patch->enabled = !klp_transition_patch->enabled; |
| 589 | |
| 590 | klp_target_state = !klp_target_state; |
| 591 | |
| 592 | /* |
| 593 | * Clear all TIF_PATCH_PENDING flags to prevent races caused by |
| 594 | * klp_update_patch_state() running in parallel with |
| 595 | * klp_start_transition(). |
| 596 | */ |
| 597 | read_lock(&tasklist_lock); |
| 598 | for_each_process_thread(g, task) |
| 599 | clear_tsk_thread_flag(task, TIF_PATCH_PENDING); |
| 600 | read_unlock(&tasklist_lock); |
| 601 | |
| 602 | for_each_possible_cpu(cpu) |
| 603 | clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); |
| 604 | |
| 605 | /* Let any remaining calls to klp_update_patch_state() complete */ |
Petr Mladek | 842c088 | 2017-06-14 10:54:52 +0200 | [diff] [blame] | 606 | klp_synchronize_transition(); |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 607 | |
| 608 | klp_start_transition(); |
| 609 | } |
| 610 | |
| 611 | /* Called from copy_process() during fork */ |
| 612 | void klp_copy_process(struct task_struct *child) |
| 613 | { |
| 614 | child->patch_state = current->patch_state; |
| 615 | |
| 616 | /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ |
| 617 | } |
Miroslav Benes | 43347d5 | 2017-11-15 14:50:13 +0100 | [diff] [blame] | 618 | |
| 619 | /* |
Miroslav Benes | c99a2be | 2017-11-22 11:29:21 +0100 | [diff] [blame] | 620 | * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an |
| 621 | * existing transition to finish. |
| 622 | * |
| 623 | * NOTE: klp_update_patch_state(task) requires the task to be inactive or |
| 624 | * 'current'. This is not the case here and the consistency model could be |
| 625 | * broken. Administrator, who is the only one to execute the |
| 626 | * klp_force_transitions(), has to be aware of this. |
| 627 | */ |
| 628 | void klp_force_transition(void) |
| 629 | { |
Petr Mladek | 6800728 | 2019-01-09 13:43:22 +0100 | [diff] [blame] | 630 | struct klp_patch *patch; |
Miroslav Benes | c99a2be | 2017-11-22 11:29:21 +0100 | [diff] [blame] | 631 | struct task_struct *g, *task; |
| 632 | unsigned int cpu; |
| 633 | |
| 634 | pr_warn("forcing remaining tasks to the patched state\n"); |
| 635 | |
| 636 | read_lock(&tasklist_lock); |
| 637 | for_each_process_thread(g, task) |
| 638 | klp_update_patch_state(task); |
| 639 | read_unlock(&tasklist_lock); |
| 640 | |
| 641 | for_each_possible_cpu(cpu) |
| 642 | klp_update_patch_state(idle_task(cpu)); |
| 643 | |
Petr Mladek | ecba29f | 2019-02-04 14:56:50 +0100 | [diff] [blame] | 644 | klp_for_each_patch(patch) |
Petr Mladek | 6800728 | 2019-01-09 13:43:22 +0100 | [diff] [blame] | 645 | patch->forced = true; |
Miroslav Benes | c99a2be | 2017-11-22 11:29:21 +0100 | [diff] [blame] | 646 | } |