Thomas Gleixner | 40b0b3f | 2019-06-03 07:44:46 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 2 | /* |
Dave Young | 2965faa | 2015-09-09 15:38:55 -0700 | [diff] [blame] | 3 | * kexec.c - kexec_load system call |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 4 | * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 5 | */ |
| 6 | |
Minfei Huang | de90a6b | 2015-11-06 16:32:45 -0800 | [diff] [blame] | 7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 8 | |
Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 9 | #include <linux/capability.h> |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 10 | #include <linux/mm.h> |
| 11 | #include <linux/file.h> |
Mimi Zohar | a210fd3 | 2018-07-13 14:05:57 -0400 | [diff] [blame] | 12 | #include <linux/security.h> |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 13 | #include <linux/kexec.h> |
Andrew Morton | 8c5a1cf | 2008-08-15 00:40:27 -0700 | [diff] [blame] | 14 | #include <linux/mutex.h> |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 15 | #include <linux/list.h> |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 16 | #include <linux/syscalls.h> |
Dave Young | a43cac0 | 2015-09-09 15:38:51 -0700 | [diff] [blame] | 17 | #include <linux/vmalloc.h> |
Dave Young | 2965faa | 2015-09-09 15:38:55 -0700 | [diff] [blame] | 18 | #include <linux/slab.h> |
Alexander Nyberg | 6e274d1 | 2005-06-25 14:58:26 -0700 | [diff] [blame] | 19 | |
Dave Young | a43cac0 | 2015-09-09 15:38:51 -0700 | [diff] [blame] | 20 | #include "kexec_internal.h" |
| 21 | |
Vivek Goyal | dabe786 | 2014-08-08 14:25:45 -0700 | [diff] [blame] | 22 | static int copy_user_segment_list(struct kimage *image, |
| 23 | unsigned long nr_segments, |
| 24 | struct kexec_segment __user *segments) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 25 | { |
Vivek Goyal | dabe786 | 2014-08-08 14:25:45 -0700 | [diff] [blame] | 26 | int ret; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 27 | size_t segment_bytes; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 28 | |
| 29 | /* Read in the segments */ |
| 30 | image->nr_segments = nr_segments; |
| 31 | segment_bytes = nr_segments * sizeof(*segments); |
Vivek Goyal | dabe786 | 2014-08-08 14:25:45 -0700 | [diff] [blame] | 32 | ret = copy_from_user(image->segment, segments, segment_bytes); |
| 33 | if (ret) |
| 34 | ret = -EFAULT; |
| 35 | |
| 36 | return ret; |
| 37 | } |
| 38 | |
Vivek Goyal | 255aedd | 2014-08-08 14:25:48 -0700 | [diff] [blame] | 39 | static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, |
| 40 | unsigned long nr_segments, |
| 41 | struct kexec_segment __user *segments, |
| 42 | unsigned long flags) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 43 | { |
Vivek Goyal | 255aedd | 2014-08-08 14:25:48 -0700 | [diff] [blame] | 44 | int ret; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 45 | struct kimage *image; |
Vivek Goyal | 255aedd | 2014-08-08 14:25:48 -0700 | [diff] [blame] | 46 | bool kexec_on_panic = flags & KEXEC_ON_CRASH; |
| 47 | |
| 48 | if (kexec_on_panic) { |
| 49 | /* Verify we have a valid entry point */ |
Russell King | 43546d8 | 2016-08-02 14:06:04 -0700 | [diff] [blame] | 50 | if ((entry < phys_to_boot_phys(crashk_res.start)) || |
| 51 | (entry > phys_to_boot_phys(crashk_res.end))) |
Vivek Goyal | 255aedd | 2014-08-08 14:25:48 -0700 | [diff] [blame] | 52 | return -EADDRNOTAVAIL; |
| 53 | } |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 54 | |
| 55 | /* Allocate and initialize a controlling structure */ |
Vivek Goyal | dabe786 | 2014-08-08 14:25:45 -0700 | [diff] [blame] | 56 | image = do_kimage_alloc_init(); |
| 57 | if (!image) |
| 58 | return -ENOMEM; |
| 59 | |
| 60 | image->start = entry; |
| 61 | |
Vivek Goyal | 255aedd | 2014-08-08 14:25:48 -0700 | [diff] [blame] | 62 | ret = copy_user_segment_list(image, nr_segments, segments); |
| 63 | if (ret) |
Vivek Goyal | dabe786 | 2014-08-08 14:25:45 -0700 | [diff] [blame] | 64 | goto out_free_image; |
| 65 | |
Vivek Goyal | 255aedd | 2014-08-08 14:25:48 -0700 | [diff] [blame] | 66 | if (kexec_on_panic) { |
Xunlei Pang | cdf4b3f | 2016-01-20 15:00:31 -0800 | [diff] [blame] | 67 | /* Enable special crash kernel control page alloc policy. */ |
Vivek Goyal | 255aedd | 2014-08-08 14:25:48 -0700 | [diff] [blame] | 68 | image->control_page = crashk_res.start; |
| 69 | image->type = KEXEC_TYPE_CRASH; |
| 70 | } |
| 71 | |
Xunlei Pang | cdf4b3f | 2016-01-20 15:00:31 -0800 | [diff] [blame] | 72 | ret = sanity_check_segment_list(image); |
| 73 | if (ret) |
| 74 | goto out_free_image; |
| 75 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 76 | /* |
| 77 | * Find a location for the control code buffer, and add it |
| 78 | * the vector of segments so that it's pages will also be |
| 79 | * counted as destination pages. |
| 80 | */ |
Vivek Goyal | 255aedd | 2014-08-08 14:25:48 -0700 | [diff] [blame] | 81 | ret = -ENOMEM; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 82 | image->control_code_page = kimage_alloc_control_pages(image, |
Huang Ying | 163f687 | 2008-08-15 00:40:22 -0700 | [diff] [blame] | 83 | get_order(KEXEC_CONTROL_PAGE_SIZE)); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 84 | if (!image->control_code_page) { |
Fabian Frederick | e1bebcf | 2014-06-06 14:37:09 -0700 | [diff] [blame] | 85 | pr_err("Could not allocate control_code_buffer\n"); |
Vivek Goyal | dabe786 | 2014-08-08 14:25:45 -0700 | [diff] [blame] | 86 | goto out_free_image; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 87 | } |
| 88 | |
Vivek Goyal | 255aedd | 2014-08-08 14:25:48 -0700 | [diff] [blame] | 89 | if (!kexec_on_panic) { |
| 90 | image->swap_page = kimage_alloc_control_pages(image, 0); |
| 91 | if (!image->swap_page) { |
| 92 | pr_err("Could not allocate swap buffer\n"); |
| 93 | goto out_free_control_pages; |
| 94 | } |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 95 | } |
| 96 | |
Zhang Yanfei | b92e7e0 | 2013-02-27 17:03:29 -0800 | [diff] [blame] | 97 | *rimage = image; |
| 98 | return 0; |
Vivek Goyal | dabe786 | 2014-08-08 14:25:45 -0700 | [diff] [blame] | 99 | out_free_control_pages: |
Zhang Yanfei | b92e7e0 | 2013-02-27 17:03:29 -0800 | [diff] [blame] | 100 | kimage_free_page_list(&image->control_pages); |
Vivek Goyal | dabe786 | 2014-08-08 14:25:45 -0700 | [diff] [blame] | 101 | out_free_image: |
Zhang Yanfei | b92e7e0 | 2013-02-27 17:03:29 -0800 | [diff] [blame] | 102 | kfree(image); |
Vivek Goyal | 255aedd | 2014-08-08 14:25:48 -0700 | [diff] [blame] | 103 | return ret; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 104 | } |
| 105 | |
Minfei Huang | 0eea086 | 2016-05-23 16:24:19 -0700 | [diff] [blame] | 106 | static int do_kexec_load(unsigned long entry, unsigned long nr_segments, |
| 107 | struct kexec_segment __user *segments, unsigned long flags) |
| 108 | { |
| 109 | struct kimage **dest_image, *image; |
| 110 | unsigned long i; |
| 111 | int ret; |
| 112 | |
| 113 | if (flags & KEXEC_ON_CRASH) { |
| 114 | dest_image = &kexec_crash_image; |
| 115 | if (kexec_crash_image) |
| 116 | arch_kexec_unprotect_crashkres(); |
| 117 | } else { |
| 118 | dest_image = &kexec_image; |
| 119 | } |
| 120 | |
| 121 | if (nr_segments == 0) { |
| 122 | /* Uninstall image */ |
| 123 | kimage_free(xchg(dest_image, NULL)); |
| 124 | return 0; |
| 125 | } |
| 126 | if (flags & KEXEC_ON_CRASH) { |
| 127 | /* |
| 128 | * Loading another kernel to switch to if this one |
| 129 | * crashes. Free any current crash dump kernel before |
| 130 | * we corrupt it. |
| 131 | */ |
| 132 | kimage_free(xchg(&kexec_crash_image, NULL)); |
| 133 | } |
| 134 | |
| 135 | ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags); |
| 136 | if (ret) |
| 137 | return ret; |
| 138 | |
Minfei Huang | 0eea086 | 2016-05-23 16:24:19 -0700 | [diff] [blame] | 139 | if (flags & KEXEC_PRESERVE_CONTEXT) |
| 140 | image->preserve_context = 1; |
| 141 | |
| 142 | ret = machine_kexec_prepare(image); |
| 143 | if (ret) |
| 144 | goto out; |
| 145 | |
Xunlei Pang | 1229384 | 2017-07-12 14:33:21 -0700 | [diff] [blame] | 146 | /* |
| 147 | * Some architecture(like S390) may touch the crash memory before |
| 148 | * machine_kexec_prepare(), we must copy vmcoreinfo data after it. |
| 149 | */ |
| 150 | ret = kimage_crash_copy_vmcoreinfo(image); |
| 151 | if (ret) |
| 152 | goto out; |
| 153 | |
Minfei Huang | 0eea086 | 2016-05-23 16:24:19 -0700 | [diff] [blame] | 154 | for (i = 0; i < nr_segments; i++) { |
| 155 | ret = kimage_load_segment(image, &image->segment[i]); |
| 156 | if (ret) |
| 157 | goto out; |
| 158 | } |
| 159 | |
| 160 | kimage_terminate(image); |
| 161 | |
| 162 | /* Install the new kernel and uninstall the old */ |
| 163 | image = xchg(dest_image, image); |
| 164 | |
| 165 | out: |
| 166 | if ((flags & KEXEC_ON_CRASH) && kexec_crash_image) |
| 167 | arch_kexec_protect_crashkres(); |
| 168 | |
Minfei Huang | 0eea086 | 2016-05-23 16:24:19 -0700 | [diff] [blame] | 169 | kimage_free(image); |
| 170 | return ret; |
| 171 | } |
| 172 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 173 | /* |
| 174 | * Exec Kernel system call: for obvious reasons only root may call it. |
| 175 | * |
| 176 | * This call breaks up into three pieces. |
| 177 | * - A generic part which loads the new kernel from the current |
| 178 | * address space, and very carefully places the data in the |
| 179 | * allocated pages. |
| 180 | * |
| 181 | * - A generic part that interacts with the kernel and tells all of |
| 182 | * the devices to shut down. Preventing on-going dmas, and placing |
| 183 | * the devices in a consistent state so a later kernel can |
| 184 | * reinitialize them. |
| 185 | * |
| 186 | * - A machine specific part that includes the syscall number |
Geert Uytterhoeven | 002ace7 | 2013-09-15 11:35:37 +0200 | [diff] [blame] | 187 | * and then copies the image to it's final destination. And |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 188 | * jumps into the image at entry. |
| 189 | * |
| 190 | * kexec does not sync, or unmount filesystems so if you need |
| 191 | * that to happen you need to do that yourself. |
| 192 | */ |
Andrew Morton | 8c5a1cf | 2008-08-15 00:40:27 -0700 | [diff] [blame] | 193 | |
Dominik Brodowski | 6b27aef | 2018-03-17 15:18:30 +0100 | [diff] [blame] | 194 | static inline int kexec_load_check(unsigned long nr_segments, |
| 195 | unsigned long flags) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 196 | { |
Mimi Zohar | a210fd3 | 2018-07-13 14:05:57 -0400 | [diff] [blame] | 197 | int result; |
| 198 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 199 | /* We only trust the superuser with rebooting the system. */ |
Kees Cook | 7984754 | 2014-01-23 15:55:59 -0800 | [diff] [blame] | 200 | if (!capable(CAP_SYS_BOOT) || kexec_load_disabled) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 201 | return -EPERM; |
| 202 | |
Mimi Zohar | a210fd3 | 2018-07-13 14:05:57 -0400 | [diff] [blame] | 203 | /* Permit LSMs and IMA to fail the kexec */ |
| 204 | result = security_kernel_load_data(LOADING_KEXEC_IMAGE); |
| 205 | if (result < 0) |
| 206 | return result; |
| 207 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 208 | /* |
Matthew Garrett | 7d31f46 | 2019-08-19 17:17:42 -0700 | [diff] [blame] | 209 | * kexec can be used to circumvent module loading restrictions, so |
| 210 | * prevent loading in that case |
| 211 | */ |
| 212 | result = security_locked_down(LOCKDOWN_KEXEC); |
| 213 | if (result) |
| 214 | return result; |
| 215 | |
| 216 | /* |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 217 | * Verify we have a legal set of flags |
| 218 | * This leaves us room for future extensions. |
| 219 | */ |
| 220 | if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) |
| 221 | return -EINVAL; |
| 222 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 223 | /* Put an artificial cap on the number |
| 224 | * of segments passed to kexec_load. |
| 225 | */ |
| 226 | if (nr_segments > KEXEC_SEGMENT_MAX) |
| 227 | return -EINVAL; |
| 228 | |
Dominik Brodowski | 6b27aef | 2018-03-17 15:18:30 +0100 | [diff] [blame] | 229 | return 0; |
| 230 | } |
| 231 | |
| 232 | SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, |
| 233 | struct kexec_segment __user *, segments, unsigned long, flags) |
| 234 | { |
| 235 | int result; |
| 236 | |
| 237 | result = kexec_load_check(nr_segments, flags); |
| 238 | if (result) |
| 239 | return result; |
| 240 | |
| 241 | /* Verify we are on the appropriate architecture */ |
| 242 | if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && |
| 243 | ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) |
| 244 | return -EINVAL; |
| 245 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 246 | /* Because we write directly to the reserved memory |
| 247 | * region when loading crash kernels we need a mutex here to |
| 248 | * prevent multiple crash kernels from attempting to load |
| 249 | * simultaneously, and to prevent a crash kernel from loading |
| 250 | * over the top of a in use crash kernel. |
| 251 | * |
| 252 | * KISS: always take the mutex. |
| 253 | */ |
Andrew Morton | 8c5a1cf | 2008-08-15 00:40:27 -0700 | [diff] [blame] | 254 | if (!mutex_trylock(&kexec_mutex)) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 255 | return -EBUSY; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 256 | |
Minfei Huang | 0eea086 | 2016-05-23 16:24:19 -0700 | [diff] [blame] | 257 | result = do_kexec_load(entry, nr_segments, segments, flags); |
Xunlei Pang | 9b492cf | 2016-05-23 16:24:10 -0700 | [diff] [blame] | 258 | |
Andrew Morton | 8c5a1cf | 2008-08-15 00:40:27 -0700 | [diff] [blame] | 259 | mutex_unlock(&kexec_mutex); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 260 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 261 | return result; |
| 262 | } |
| 263 | |
| 264 | #ifdef CONFIG_COMPAT |
Heiko Carstens | ca2c405 | 2014-03-04 17:13:42 +0100 | [diff] [blame] | 265 | COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, |
| 266 | compat_ulong_t, nr_segments, |
| 267 | struct compat_kexec_segment __user *, segments, |
| 268 | compat_ulong_t, flags) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 269 | { |
| 270 | struct compat_kexec_segment in; |
| 271 | struct kexec_segment out, __user *ksegments; |
| 272 | unsigned long i, result; |
| 273 | |
Dominik Brodowski | 6b27aef | 2018-03-17 15:18:30 +0100 | [diff] [blame] | 274 | result = kexec_load_check(nr_segments, flags); |
| 275 | if (result) |
| 276 | return result; |
| 277 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 278 | /* Don't allow clients that don't understand the native |
| 279 | * architecture to do anything. |
| 280 | */ |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 281 | if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 282 | return -EINVAL; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 283 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 284 | ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); |
Fabian Frederick | e1bebcf | 2014-06-06 14:37:09 -0700 | [diff] [blame] | 285 | for (i = 0; i < nr_segments; i++) { |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 286 | result = copy_from_user(&in, &segments[i], sizeof(in)); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 287 | if (result) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 288 | return -EFAULT; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 289 | |
| 290 | out.buf = compat_ptr(in.buf); |
| 291 | out.bufsz = in.bufsz; |
| 292 | out.mem = in.mem; |
| 293 | out.memsz = in.memsz; |
| 294 | |
| 295 | result = copy_to_user(&ksegments[i], &out, sizeof(out)); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 296 | if (result) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 297 | return -EFAULT; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 298 | } |
| 299 | |
Dominik Brodowski | 6b27aef | 2018-03-17 15:18:30 +0100 | [diff] [blame] | 300 | /* Because we write directly to the reserved memory |
| 301 | * region when loading crash kernels we need a mutex here to |
| 302 | * prevent multiple crash kernels from attempting to load |
| 303 | * simultaneously, and to prevent a crash kernel from loading |
| 304 | * over the top of a in use crash kernel. |
| 305 | * |
| 306 | * KISS: always take the mutex. |
| 307 | */ |
| 308 | if (!mutex_trylock(&kexec_mutex)) |
| 309 | return -EBUSY; |
| 310 | |
| 311 | result = do_kexec_load(entry, nr_segments, ksegments, flags); |
| 312 | |
| 313 | mutex_unlock(&kexec_mutex); |
| 314 | |
| 315 | return result; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 316 | } |
| 317 | #endif |