Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1 | /* |
| 2 | * kexec.c - kexec system call |
| 3 | * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> |
| 4 | * |
| 5 | * This source code is licensed under the GNU General Public License, |
| 6 | * Version 2. See the file COPYING for more details. |
| 7 | */ |
| 8 | |
Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 9 | #include <linux/capability.h> |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 10 | #include <linux/mm.h> |
| 11 | #include <linux/file.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/fs.h> |
| 14 | #include <linux/kexec.h> |
Andrew Morton | 8c5a1cf | 2008-08-15 00:40:27 -0700 | [diff] [blame] | 15 | #include <linux/mutex.h> |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 16 | #include <linux/list.h> |
| 17 | #include <linux/highmem.h> |
| 18 | #include <linux/syscalls.h> |
| 19 | #include <linux/reboot.h> |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 20 | #include <linux/ioport.h> |
Alexander Nyberg | 6e274d1 | 2005-06-25 14:58:26 -0700 | [diff] [blame] | 21 | #include <linux/hardirq.h> |
Magnus Damm | 85916f8 | 2006-12-06 20:40:41 -0800 | [diff] [blame] | 22 | #include <linux/elf.h> |
| 23 | #include <linux/elfcore.h> |
Sam Ravnborg | 273b281 | 2009-10-18 00:52:28 +0200 | [diff] [blame] | 24 | #include <generated/utsrelease.h> |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 25 | #include <linux/utsname.h> |
| 26 | #include <linux/numa.h> |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 27 | #include <linux/suspend.h> |
| 28 | #include <linux/device.h> |
Huang Ying | 89081d1 | 2008-07-25 19:45:10 -0700 | [diff] [blame] | 29 | #include <linux/freezer.h> |
| 30 | #include <linux/pm.h> |
| 31 | #include <linux/cpu.h> |
| 32 | #include <linux/console.h> |
Luck, Tony | 5f41b8c | 2008-10-20 15:23:40 -0700 | [diff] [blame] | 33 | #include <linux/vmalloc.h> |
Amerigo Wang | 06a7f71 | 2009-12-15 16:47:46 -0800 | [diff] [blame] | 34 | #include <linux/swap.h> |
Rafael J. Wysocki | 19234c0 | 2011-04-20 00:36:11 +0200 | [diff] [blame] | 35 | #include <linux/syscore_ops.h> |
Alexander Nyberg | 6e274d1 | 2005-06-25 14:58:26 -0700 | [diff] [blame] | 36 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 37 | #include <asm/page.h> |
| 38 | #include <asm/uaccess.h> |
| 39 | #include <asm/io.h> |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 40 | #include <asm/sections.h> |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 41 | |
Vivek Goyal | cc57165 | 2006-01-09 20:51:41 -0800 | [diff] [blame] | 42 | /* Per cpu memory for storing cpu states in case of system crash. */ |
Tejun Heo | 43cf38e | 2010-02-02 14:38:57 +0900 | [diff] [blame] | 43 | note_buf_t __percpu *crash_notes; |
Vivek Goyal | cc57165 | 2006-01-09 20:51:41 -0800 | [diff] [blame] | 44 | |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 45 | /* vmcoreinfo stuff */ |
Dmitri Vorobiev | edb79a2 | 2009-04-02 16:58:58 -0700 | [diff] [blame] | 46 | static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 47 | u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; |
Ken'ichi Ohmichi | d768281 | 2007-10-16 23:27:28 -0700 | [diff] [blame] | 48 | size_t vmcoreinfo_size; |
| 49 | size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 50 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 51 | /* Location of the reserved area for the crash kernel */ |
| 52 | struct resource crashk_res = { |
| 53 | .name = "Crash kernel", |
| 54 | .start = 0, |
| 55 | .end = 0, |
| 56 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM |
| 57 | }; |
| 58 | |
Alexander Nyberg | 6e274d1 | 2005-06-25 14:58:26 -0700 | [diff] [blame] | 59 | int kexec_should_crash(struct task_struct *p) |
| 60 | { |
Serge E. Hallyn | b460cbc | 2007-10-18 23:39:52 -0700 | [diff] [blame] | 61 | if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) |
Alexander Nyberg | 6e274d1 | 2005-06-25 14:58:26 -0700 | [diff] [blame] | 62 | return 1; |
| 63 | return 0; |
| 64 | } |
| 65 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 66 | /* |
| 67 | * When kexec transitions to the new kernel there is a one-to-one |
| 68 | * mapping between physical and virtual addresses. On processors |
| 69 | * where you can disable the MMU this is trivial, and easy. For |
| 70 | * others it is still a simple predictable page table to setup. |
| 71 | * |
| 72 | * In that environment kexec copies the new kernel to its final |
| 73 | * resting place. This means I can only support memory whose |
| 74 | * physical address can fit in an unsigned long. In particular |
| 75 | * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. |
| 76 | * If the assembly stub has more restrictive requirements |
| 77 | * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be |
| 78 | * defined more restrictively in <asm/kexec.h>. |
| 79 | * |
| 80 | * The code for the transition from the current kernel to the |
| 81 | * the new kernel is placed in the control_code_buffer, whose size |
Huang Ying | 163f687 | 2008-08-15 00:40:22 -0700 | [diff] [blame] | 82 | * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 83 | * page of memory is necessary, but some architectures require more. |
| 84 | * Because this memory must be identity mapped in the transition from |
| 85 | * virtual to physical addresses it must live in the range |
| 86 | * 0 - TASK_SIZE, as only the user space mappings are arbitrarily |
| 87 | * modifiable. |
| 88 | * |
| 89 | * The assembly stub in the control code buffer is passed a linked list |
| 90 | * of descriptor pages detailing the source pages of the new kernel, |
| 91 | * and the destination addresses of those source pages. As this data |
| 92 | * structure is not used in the context of the current OS, it must |
| 93 | * be self-contained. |
| 94 | * |
| 95 | * The code has been made to work with highmem pages and will use a |
| 96 | * destination page in its final resting place (if it happens |
| 97 | * to allocate it). The end product of this is that most of the |
| 98 | * physical address space, and most of RAM can be used. |
| 99 | * |
| 100 | * Future directions include: |
| 101 | * - allocating a page table with the control code buffer identity |
| 102 | * mapped, to simplify machine_kexec and make kexec_on_panic more |
| 103 | * reliable. |
| 104 | */ |
| 105 | |
| 106 | /* |
| 107 | * KIMAGE_NO_DEST is an impossible destination address..., for |
| 108 | * allocating pages whose destination address we do not care about. |
| 109 | */ |
| 110 | #define KIMAGE_NO_DEST (-1UL) |
| 111 | |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 112 | static int kimage_is_destination_range(struct kimage *image, |
| 113 | unsigned long start, unsigned long end); |
| 114 | static struct page *kimage_alloc_page(struct kimage *image, |
Al Viro | 9796fdd | 2005-10-21 03:22:03 -0400 | [diff] [blame] | 115 | gfp_t gfp_mask, |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 116 | unsigned long dest); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 117 | |
| 118 | static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 119 | unsigned long nr_segments, |
| 120 | struct kexec_segment __user *segments) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 121 | { |
| 122 | size_t segment_bytes; |
| 123 | struct kimage *image; |
| 124 | unsigned long i; |
| 125 | int result; |
| 126 | |
| 127 | /* Allocate a controlling structure */ |
| 128 | result = -ENOMEM; |
Burman Yan | 4668edc | 2006-12-06 20:38:51 -0800 | [diff] [blame] | 129 | image = kzalloc(sizeof(*image), GFP_KERNEL); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 130 | if (!image) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 131 | goto out; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 132 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 133 | image->head = 0; |
| 134 | image->entry = &image->head; |
| 135 | image->last_entry = &image->head; |
| 136 | image->control_page = ~0; /* By default this does not apply */ |
| 137 | image->start = entry; |
| 138 | image->type = KEXEC_TYPE_DEFAULT; |
| 139 | |
| 140 | /* Initialize the list of control pages */ |
| 141 | INIT_LIST_HEAD(&image->control_pages); |
| 142 | |
| 143 | /* Initialize the list of destination pages */ |
| 144 | INIT_LIST_HEAD(&image->dest_pages); |
| 145 | |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 146 | /* Initialize the list of unusable pages */ |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 147 | INIT_LIST_HEAD(&image->unuseable_pages); |
| 148 | |
| 149 | /* Read in the segments */ |
| 150 | image->nr_segments = nr_segments; |
| 151 | segment_bytes = nr_segments * sizeof(*segments); |
| 152 | result = copy_from_user(image->segment, segments, segment_bytes); |
Dan Carpenter | f65a03f | 2010-08-10 18:03:31 -0700 | [diff] [blame] | 153 | if (result) { |
| 154 | result = -EFAULT; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 155 | goto out; |
Dan Carpenter | f65a03f | 2010-08-10 18:03:31 -0700 | [diff] [blame] | 156 | } |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 157 | |
| 158 | /* |
| 159 | * Verify we have good destination addresses. The caller is |
| 160 | * responsible for making certain we don't attempt to load |
| 161 | * the new image into invalid or reserved areas of RAM. This |
| 162 | * just verifies it is an address we can use. |
| 163 | * |
| 164 | * Since the kernel does everything in page size chunks ensure |
Uwe Kleine-König | b595076 | 2010-11-01 15:38:34 -0400 | [diff] [blame] | 165 | * the destination addresses are page aligned. Too many |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 166 | * special cases crop of when we don't do this. The most |
| 167 | * insidious is getting overlapping destination addresses |
| 168 | * simply because addresses are changed to page size |
| 169 | * granularity. |
| 170 | */ |
| 171 | result = -EADDRNOTAVAIL; |
| 172 | for (i = 0; i < nr_segments; i++) { |
| 173 | unsigned long mstart, mend; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 174 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 175 | mstart = image->segment[i].mem; |
| 176 | mend = mstart + image->segment[i].memsz; |
| 177 | if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) |
| 178 | goto out; |
| 179 | if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) |
| 180 | goto out; |
| 181 | } |
| 182 | |
| 183 | /* Verify our destination addresses do not overlap. |
| 184 | * If we alloed overlapping destination addresses |
| 185 | * through very weird things can happen with no |
| 186 | * easy explanation as one segment stops on another. |
| 187 | */ |
| 188 | result = -EINVAL; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 189 | for (i = 0; i < nr_segments; i++) { |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 190 | unsigned long mstart, mend; |
| 191 | unsigned long j; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 192 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 193 | mstart = image->segment[i].mem; |
| 194 | mend = mstart + image->segment[i].memsz; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 195 | for (j = 0; j < i; j++) { |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 196 | unsigned long pstart, pend; |
| 197 | pstart = image->segment[j].mem; |
| 198 | pend = pstart + image->segment[j].memsz; |
| 199 | /* Do the segments overlap ? */ |
| 200 | if ((mend > pstart) && (mstart < pend)) |
| 201 | goto out; |
| 202 | } |
| 203 | } |
| 204 | |
| 205 | /* Ensure our buffer sizes are strictly less than |
| 206 | * our memory sizes. This should always be the case, |
| 207 | * and it is easier to check up front than to be surprised |
| 208 | * later on. |
| 209 | */ |
| 210 | result = -EINVAL; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 211 | for (i = 0; i < nr_segments; i++) { |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 212 | if (image->segment[i].bufsz > image->segment[i].memsz) |
| 213 | goto out; |
| 214 | } |
| 215 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 216 | result = 0; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 217 | out: |
| 218 | if (result == 0) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 219 | *rimage = image; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 220 | else |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 221 | kfree(image); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 222 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 223 | return result; |
| 224 | |
| 225 | } |
| 226 | |
| 227 | static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 228 | unsigned long nr_segments, |
| 229 | struct kexec_segment __user *segments) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 230 | { |
| 231 | int result; |
| 232 | struct kimage *image; |
| 233 | |
| 234 | /* Allocate and initialize a controlling structure */ |
| 235 | image = NULL; |
| 236 | result = do_kimage_alloc(&image, entry, nr_segments, segments); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 237 | if (result) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 238 | goto out; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 239 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 240 | *rimage = image; |
| 241 | |
| 242 | /* |
| 243 | * Find a location for the control code buffer, and add it |
| 244 | * the vector of segments so that it's pages will also be |
| 245 | * counted as destination pages. |
| 246 | */ |
| 247 | result = -ENOMEM; |
| 248 | image->control_code_page = kimage_alloc_control_pages(image, |
Huang Ying | 163f687 | 2008-08-15 00:40:22 -0700 | [diff] [blame] | 249 | get_order(KEXEC_CONTROL_PAGE_SIZE)); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 250 | if (!image->control_code_page) { |
| 251 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); |
| 252 | goto out; |
| 253 | } |
| 254 | |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 255 | image->swap_page = kimage_alloc_control_pages(image, 0); |
| 256 | if (!image->swap_page) { |
| 257 | printk(KERN_ERR "Could not allocate swap buffer\n"); |
| 258 | goto out; |
| 259 | } |
| 260 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 261 | result = 0; |
| 262 | out: |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 263 | if (result == 0) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 264 | *rimage = image; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 265 | else |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 266 | kfree(image); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 267 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 268 | return result; |
| 269 | } |
| 270 | |
| 271 | static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 272 | unsigned long nr_segments, |
Alexey Dobriyan | 314b6a4 | 2005-06-27 22:29:33 -0700 | [diff] [blame] | 273 | struct kexec_segment __user *segments) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 274 | { |
| 275 | int result; |
| 276 | struct kimage *image; |
| 277 | unsigned long i; |
| 278 | |
| 279 | image = NULL; |
| 280 | /* Verify we have a valid entry point */ |
| 281 | if ((entry < crashk_res.start) || (entry > crashk_res.end)) { |
| 282 | result = -EADDRNOTAVAIL; |
| 283 | goto out; |
| 284 | } |
| 285 | |
| 286 | /* Allocate and initialize a controlling structure */ |
| 287 | result = do_kimage_alloc(&image, entry, nr_segments, segments); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 288 | if (result) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 289 | goto out; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 290 | |
| 291 | /* Enable the special crash kernel control page |
| 292 | * allocation policy. |
| 293 | */ |
| 294 | image->control_page = crashk_res.start; |
| 295 | image->type = KEXEC_TYPE_CRASH; |
| 296 | |
| 297 | /* |
| 298 | * Verify we have good destination addresses. Normally |
| 299 | * the caller is responsible for making certain we don't |
| 300 | * attempt to load the new image into invalid or reserved |
| 301 | * areas of RAM. But crash kernels are preloaded into a |
| 302 | * reserved area of ram. We must ensure the addresses |
| 303 | * are in the reserved area otherwise preloading the |
| 304 | * kernel could corrupt things. |
| 305 | */ |
| 306 | result = -EADDRNOTAVAIL; |
| 307 | for (i = 0; i < nr_segments; i++) { |
| 308 | unsigned long mstart, mend; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 309 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 310 | mstart = image->segment[i].mem; |
Vivek Goyal | 50cccc6 | 2005-06-25 14:57:55 -0700 | [diff] [blame] | 311 | mend = mstart + image->segment[i].memsz - 1; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 312 | /* Ensure we are within the crash kernel limits */ |
| 313 | if ((mstart < crashk_res.start) || (mend > crashk_res.end)) |
| 314 | goto out; |
| 315 | } |
| 316 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 317 | /* |
| 318 | * Find a location for the control code buffer, and add |
| 319 | * the vector of segments so that it's pages will also be |
| 320 | * counted as destination pages. |
| 321 | */ |
| 322 | result = -ENOMEM; |
| 323 | image->control_code_page = kimage_alloc_control_pages(image, |
Huang Ying | 163f687 | 2008-08-15 00:40:22 -0700 | [diff] [blame] | 324 | get_order(KEXEC_CONTROL_PAGE_SIZE)); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 325 | if (!image->control_code_page) { |
| 326 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); |
| 327 | goto out; |
| 328 | } |
| 329 | |
| 330 | result = 0; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 331 | out: |
| 332 | if (result == 0) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 333 | *rimage = image; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 334 | else |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 335 | kfree(image); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 336 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 337 | return result; |
| 338 | } |
| 339 | |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 340 | static int kimage_is_destination_range(struct kimage *image, |
| 341 | unsigned long start, |
| 342 | unsigned long end) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 343 | { |
| 344 | unsigned long i; |
| 345 | |
| 346 | for (i = 0; i < image->nr_segments; i++) { |
| 347 | unsigned long mstart, mend; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 348 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 349 | mstart = image->segment[i].mem; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 350 | mend = mstart + image->segment[i].memsz; |
| 351 | if ((end > mstart) && (start < mend)) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 352 | return 1; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 353 | } |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 354 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 355 | return 0; |
| 356 | } |
| 357 | |
Al Viro | 9796fdd | 2005-10-21 03:22:03 -0400 | [diff] [blame] | 358 | static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 359 | { |
| 360 | struct page *pages; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 361 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 362 | pages = alloc_pages(gfp_mask, order); |
| 363 | if (pages) { |
| 364 | unsigned int count, i; |
| 365 | pages->mapping = NULL; |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 366 | set_page_private(pages, order); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 367 | count = 1 << order; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 368 | for (i = 0; i < count; i++) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 369 | SetPageReserved(pages + i); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 370 | } |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 371 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 372 | return pages; |
| 373 | } |
| 374 | |
| 375 | static void kimage_free_pages(struct page *page) |
| 376 | { |
| 377 | unsigned int order, count, i; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 378 | |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 379 | order = page_private(page); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 380 | count = 1 << order; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 381 | for (i = 0; i < count; i++) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 382 | ClearPageReserved(page + i); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 383 | __free_pages(page, order); |
| 384 | } |
| 385 | |
| 386 | static void kimage_free_page_list(struct list_head *list) |
| 387 | { |
| 388 | struct list_head *pos, *next; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 389 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 390 | list_for_each_safe(pos, next, list) { |
| 391 | struct page *page; |
| 392 | |
| 393 | page = list_entry(pos, struct page, lru); |
| 394 | list_del(&page->lru); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 395 | kimage_free_pages(page); |
| 396 | } |
| 397 | } |
| 398 | |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 399 | static struct page *kimage_alloc_normal_control_pages(struct kimage *image, |
| 400 | unsigned int order) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 401 | { |
| 402 | /* Control pages are special, they are the intermediaries |
| 403 | * that are needed while we copy the rest of the pages |
| 404 | * to their final resting place. As such they must |
| 405 | * not conflict with either the destination addresses |
| 406 | * or memory the kernel is already using. |
| 407 | * |
| 408 | * The only case where we really need more than one of |
| 409 | * these are for architectures where we cannot disable |
| 410 | * the MMU and must instead generate an identity mapped |
| 411 | * page table for all of the memory. |
| 412 | * |
| 413 | * At worst this runs in O(N) of the image size. |
| 414 | */ |
| 415 | struct list_head extra_pages; |
| 416 | struct page *pages; |
| 417 | unsigned int count; |
| 418 | |
| 419 | count = 1 << order; |
| 420 | INIT_LIST_HEAD(&extra_pages); |
| 421 | |
| 422 | /* Loop while I can allocate a page and the page allocated |
| 423 | * is a destination page. |
| 424 | */ |
| 425 | do { |
| 426 | unsigned long pfn, epfn, addr, eaddr; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 427 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 428 | pages = kimage_alloc_pages(GFP_KERNEL, order); |
| 429 | if (!pages) |
| 430 | break; |
| 431 | pfn = page_to_pfn(pages); |
| 432 | epfn = pfn + count; |
| 433 | addr = pfn << PAGE_SHIFT; |
| 434 | eaddr = epfn << PAGE_SHIFT; |
| 435 | if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 436 | kimage_is_destination_range(image, addr, eaddr)) { |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 437 | list_add(&pages->lru, &extra_pages); |
| 438 | pages = NULL; |
| 439 | } |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 440 | } while (!pages); |
| 441 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 442 | if (pages) { |
| 443 | /* Remember the allocated page... */ |
| 444 | list_add(&pages->lru, &image->control_pages); |
| 445 | |
| 446 | /* Because the page is already in it's destination |
| 447 | * location we will never allocate another page at |
| 448 | * that address. Therefore kimage_alloc_pages |
| 449 | * will not return it (again) and we don't need |
| 450 | * to give it an entry in image->segment[]. |
| 451 | */ |
| 452 | } |
| 453 | /* Deal with the destination pages I have inadvertently allocated. |
| 454 | * |
| 455 | * Ideally I would convert multi-page allocations into single |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 456 | * page allocations, and add everything to image->dest_pages. |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 457 | * |
| 458 | * For now it is simpler to just free the pages. |
| 459 | */ |
| 460 | kimage_free_page_list(&extra_pages); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 461 | |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 462 | return pages; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 463 | } |
| 464 | |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 465 | static struct page *kimage_alloc_crash_control_pages(struct kimage *image, |
| 466 | unsigned int order) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 467 | { |
| 468 | /* Control pages are special, they are the intermediaries |
| 469 | * that are needed while we copy the rest of the pages |
| 470 | * to their final resting place. As such they must |
| 471 | * not conflict with either the destination addresses |
| 472 | * or memory the kernel is already using. |
| 473 | * |
| 474 | * Control pages are also the only pags we must allocate |
| 475 | * when loading a crash kernel. All of the other pages |
| 476 | * are specified by the segments and we just memcpy |
| 477 | * into them directly. |
| 478 | * |
| 479 | * The only case where we really need more than one of |
| 480 | * these are for architectures where we cannot disable |
| 481 | * the MMU and must instead generate an identity mapped |
| 482 | * page table for all of the memory. |
| 483 | * |
| 484 | * Given the low demand this implements a very simple |
| 485 | * allocator that finds the first hole of the appropriate |
| 486 | * size in the reserved memory region, and allocates all |
| 487 | * of the memory up to and including the hole. |
| 488 | */ |
| 489 | unsigned long hole_start, hole_end, size; |
| 490 | struct page *pages; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 491 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 492 | pages = NULL; |
| 493 | size = (1 << order) << PAGE_SHIFT; |
| 494 | hole_start = (image->control_page + (size - 1)) & ~(size - 1); |
| 495 | hole_end = hole_start + size - 1; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 496 | while (hole_end <= crashk_res.end) { |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 497 | unsigned long i; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 498 | |
Michael Holzheu | 3d214fa | 2011-10-30 15:16:36 +0100 | [diff] [blame] | 499 | if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 500 | break; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 501 | if (hole_end > crashk_res.end) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 502 | break; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 503 | /* See if I overlap any of the segments */ |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 504 | for (i = 0; i < image->nr_segments; i++) { |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 505 | unsigned long mstart, mend; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 506 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 507 | mstart = image->segment[i].mem; |
| 508 | mend = mstart + image->segment[i].memsz - 1; |
| 509 | if ((hole_end >= mstart) && (hole_start <= mend)) { |
| 510 | /* Advance the hole to the end of the segment */ |
| 511 | hole_start = (mend + (size - 1)) & ~(size - 1); |
| 512 | hole_end = hole_start + size - 1; |
| 513 | break; |
| 514 | } |
| 515 | } |
| 516 | /* If I don't overlap any segments I have found my hole! */ |
| 517 | if (i == image->nr_segments) { |
| 518 | pages = pfn_to_page(hole_start >> PAGE_SHIFT); |
| 519 | break; |
| 520 | } |
| 521 | } |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 522 | if (pages) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 523 | image->control_page = hole_end; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 524 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 525 | return pages; |
| 526 | } |
| 527 | |
| 528 | |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 529 | struct page *kimage_alloc_control_pages(struct kimage *image, |
| 530 | unsigned int order) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 531 | { |
| 532 | struct page *pages = NULL; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 533 | |
| 534 | switch (image->type) { |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 535 | case KEXEC_TYPE_DEFAULT: |
| 536 | pages = kimage_alloc_normal_control_pages(image, order); |
| 537 | break; |
| 538 | case KEXEC_TYPE_CRASH: |
| 539 | pages = kimage_alloc_crash_control_pages(image, order); |
| 540 | break; |
| 541 | } |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 542 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 543 | return pages; |
| 544 | } |
| 545 | |
| 546 | static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) |
| 547 | { |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 548 | if (*image->entry != 0) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 549 | image->entry++; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 550 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 551 | if (image->entry == image->last_entry) { |
| 552 | kimage_entry_t *ind_page; |
| 553 | struct page *page; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 554 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 555 | page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 556 | if (!page) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 557 | return -ENOMEM; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 558 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 559 | ind_page = page_address(page); |
| 560 | *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; |
| 561 | image->entry = ind_page; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 562 | image->last_entry = ind_page + |
| 563 | ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 564 | } |
| 565 | *image->entry = entry; |
| 566 | image->entry++; |
| 567 | *image->entry = 0; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 568 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 569 | return 0; |
| 570 | } |
| 571 | |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 572 | static int kimage_set_destination(struct kimage *image, |
| 573 | unsigned long destination) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 574 | { |
| 575 | int result; |
| 576 | |
| 577 | destination &= PAGE_MASK; |
| 578 | result = kimage_add_entry(image, destination | IND_DESTINATION); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 579 | if (result == 0) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 580 | image->destination = destination; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 581 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 582 | return result; |
| 583 | } |
| 584 | |
| 585 | |
| 586 | static int kimage_add_page(struct kimage *image, unsigned long page) |
| 587 | { |
| 588 | int result; |
| 589 | |
| 590 | page &= PAGE_MASK; |
| 591 | result = kimage_add_entry(image, page | IND_SOURCE); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 592 | if (result == 0) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 593 | image->destination += PAGE_SIZE; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 594 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 595 | return result; |
| 596 | } |
| 597 | |
| 598 | |
| 599 | static void kimage_free_extra_pages(struct kimage *image) |
| 600 | { |
| 601 | /* Walk through and free any extra destination pages I may have */ |
| 602 | kimage_free_page_list(&image->dest_pages); |
| 603 | |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 604 | /* Walk through and free any unusable pages I have cached */ |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 605 | kimage_free_page_list(&image->unuseable_pages); |
| 606 | |
| 607 | } |
WANG Cong | 7fccf03 | 2008-07-25 19:45:02 -0700 | [diff] [blame] | 608 | static void kimage_terminate(struct kimage *image) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 609 | { |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 610 | if (*image->entry != 0) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 611 | image->entry++; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 612 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 613 | *image->entry = IND_DONE; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 614 | } |
| 615 | |
| 616 | #define for_each_kimage_entry(image, ptr, entry) \ |
| 617 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ |
| 618 | ptr = (entry & IND_INDIRECTION)? \ |
| 619 | phys_to_virt((entry & PAGE_MASK)): ptr +1) |
| 620 | |
| 621 | static void kimage_free_entry(kimage_entry_t entry) |
| 622 | { |
| 623 | struct page *page; |
| 624 | |
| 625 | page = pfn_to_page(entry >> PAGE_SHIFT); |
| 626 | kimage_free_pages(page); |
| 627 | } |
| 628 | |
| 629 | static void kimage_free(struct kimage *image) |
| 630 | { |
| 631 | kimage_entry_t *ptr, entry; |
| 632 | kimage_entry_t ind = 0; |
| 633 | |
| 634 | if (!image) |
| 635 | return; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 636 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 637 | kimage_free_extra_pages(image); |
| 638 | for_each_kimage_entry(image, ptr, entry) { |
| 639 | if (entry & IND_INDIRECTION) { |
| 640 | /* Free the previous indirection page */ |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 641 | if (ind & IND_INDIRECTION) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 642 | kimage_free_entry(ind); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 643 | /* Save this indirection page until we are |
| 644 | * done with it. |
| 645 | */ |
| 646 | ind = entry; |
| 647 | } |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 648 | else if (entry & IND_SOURCE) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 649 | kimage_free_entry(entry); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 650 | } |
| 651 | /* Free the final indirection page */ |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 652 | if (ind & IND_INDIRECTION) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 653 | kimage_free_entry(ind); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 654 | |
| 655 | /* Handle any machine specific cleanup */ |
| 656 | machine_kexec_cleanup(image); |
| 657 | |
| 658 | /* Free the kexec control pages... */ |
| 659 | kimage_free_page_list(&image->control_pages); |
| 660 | kfree(image); |
| 661 | } |
| 662 | |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 663 | static kimage_entry_t *kimage_dst_used(struct kimage *image, |
| 664 | unsigned long page) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 665 | { |
| 666 | kimage_entry_t *ptr, entry; |
| 667 | unsigned long destination = 0; |
| 668 | |
| 669 | for_each_kimage_entry(image, ptr, entry) { |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 670 | if (entry & IND_DESTINATION) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 671 | destination = entry & PAGE_MASK; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 672 | else if (entry & IND_SOURCE) { |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 673 | if (page == destination) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 674 | return ptr; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 675 | destination += PAGE_SIZE; |
| 676 | } |
| 677 | } |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 678 | |
Alexey Dobriyan | 314b6a4 | 2005-06-27 22:29:33 -0700 | [diff] [blame] | 679 | return NULL; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 680 | } |
| 681 | |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 682 | static struct page *kimage_alloc_page(struct kimage *image, |
Al Viro | 9796fdd | 2005-10-21 03:22:03 -0400 | [diff] [blame] | 683 | gfp_t gfp_mask, |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 684 | unsigned long destination) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 685 | { |
| 686 | /* |
| 687 | * Here we implement safeguards to ensure that a source page |
| 688 | * is not copied to its destination page before the data on |
| 689 | * the destination page is no longer useful. |
| 690 | * |
| 691 | * To do this we maintain the invariant that a source page is |
| 692 | * either its own destination page, or it is not a |
| 693 | * destination page at all. |
| 694 | * |
| 695 | * That is slightly stronger than required, but the proof |
| 696 | * that no problems will not occur is trivial, and the |
| 697 | * implementation is simply to verify. |
| 698 | * |
| 699 | * When allocating all pages normally this algorithm will run |
| 700 | * in O(N) time, but in the worst case it will run in O(N^2) |
| 701 | * time. If the runtime is a problem the data structures can |
| 702 | * be fixed. |
| 703 | */ |
| 704 | struct page *page; |
| 705 | unsigned long addr; |
| 706 | |
| 707 | /* |
| 708 | * Walk through the list of destination pages, and see if I |
| 709 | * have a match. |
| 710 | */ |
| 711 | list_for_each_entry(page, &image->dest_pages, lru) { |
| 712 | addr = page_to_pfn(page) << PAGE_SHIFT; |
| 713 | if (addr == destination) { |
| 714 | list_del(&page->lru); |
| 715 | return page; |
| 716 | } |
| 717 | } |
| 718 | page = NULL; |
| 719 | while (1) { |
| 720 | kimage_entry_t *old; |
| 721 | |
| 722 | /* Allocate a page, if we run out of memory give up */ |
| 723 | page = kimage_alloc_pages(gfp_mask, 0); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 724 | if (!page) |
Alexey Dobriyan | 314b6a4 | 2005-06-27 22:29:33 -0700 | [diff] [blame] | 725 | return NULL; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 726 | /* If the page cannot be used file it away */ |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 727 | if (page_to_pfn(page) > |
| 728 | (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 729 | list_add(&page->lru, &image->unuseable_pages); |
| 730 | continue; |
| 731 | } |
| 732 | addr = page_to_pfn(page) << PAGE_SHIFT; |
| 733 | |
| 734 | /* If it is the destination page we want use it */ |
| 735 | if (addr == destination) |
| 736 | break; |
| 737 | |
| 738 | /* If the page is not a destination page use it */ |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 739 | if (!kimage_is_destination_range(image, addr, |
| 740 | addr + PAGE_SIZE)) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 741 | break; |
| 742 | |
| 743 | /* |
| 744 | * I know that the page is someones destination page. |
| 745 | * See if there is already a source page for this |
| 746 | * destination page. And if so swap the source pages. |
| 747 | */ |
| 748 | old = kimage_dst_used(image, addr); |
| 749 | if (old) { |
| 750 | /* If so move it */ |
| 751 | unsigned long old_addr; |
| 752 | struct page *old_page; |
| 753 | |
| 754 | old_addr = *old & PAGE_MASK; |
| 755 | old_page = pfn_to_page(old_addr >> PAGE_SHIFT); |
| 756 | copy_highpage(page, old_page); |
| 757 | *old = addr | (*old & ~PAGE_MASK); |
| 758 | |
| 759 | /* The old page I have found cannot be a |
Jonathan Steel | f9092f3 | 2008-09-22 13:57:45 -0700 | [diff] [blame] | 760 | * destination page, so return it if it's |
| 761 | * gfp_flags honor the ones passed in. |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 762 | */ |
Jonathan Steel | f9092f3 | 2008-09-22 13:57:45 -0700 | [diff] [blame] | 763 | if (!(gfp_mask & __GFP_HIGHMEM) && |
| 764 | PageHighMem(old_page)) { |
| 765 | kimage_free_pages(old_page); |
| 766 | continue; |
| 767 | } |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 768 | addr = old_addr; |
| 769 | page = old_page; |
| 770 | break; |
| 771 | } |
| 772 | else { |
| 773 | /* Place the page on the destination list I |
| 774 | * will use it later. |
| 775 | */ |
| 776 | list_add(&page->lru, &image->dest_pages); |
| 777 | } |
| 778 | } |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 779 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 780 | return page; |
| 781 | } |
| 782 | |
| 783 | static int kimage_load_normal_segment(struct kimage *image, |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 784 | struct kexec_segment *segment) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 785 | { |
| 786 | unsigned long maddr; |
| 787 | unsigned long ubytes, mbytes; |
| 788 | int result; |
Alexey Dobriyan | 314b6a4 | 2005-06-27 22:29:33 -0700 | [diff] [blame] | 789 | unsigned char __user *buf; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 790 | |
| 791 | result = 0; |
| 792 | buf = segment->buf; |
| 793 | ubytes = segment->bufsz; |
| 794 | mbytes = segment->memsz; |
| 795 | maddr = segment->mem; |
| 796 | |
| 797 | result = kimage_set_destination(image, maddr); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 798 | if (result < 0) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 799 | goto out; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 800 | |
| 801 | while (mbytes) { |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 802 | struct page *page; |
| 803 | char *ptr; |
| 804 | size_t uchunk, mchunk; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 805 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 806 | page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); |
Stephen Hemminger | c80544d | 2007-10-18 03:07:05 -0700 | [diff] [blame] | 807 | if (!page) { |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 808 | result = -ENOMEM; |
| 809 | goto out; |
| 810 | } |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 811 | result = kimage_add_page(image, page_to_pfn(page) |
| 812 | << PAGE_SHIFT); |
| 813 | if (result < 0) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 814 | goto out; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 815 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 816 | ptr = kmap(page); |
| 817 | /* Start with a clear page */ |
Jan Beulich | 3ecb01d | 2010-10-26 14:22:27 -0700 | [diff] [blame] | 818 | clear_page(ptr); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 819 | ptr += maddr & ~PAGE_MASK; |
| 820 | mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 821 | if (mchunk > mbytes) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 822 | mchunk = mbytes; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 823 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 824 | uchunk = mchunk; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 825 | if (uchunk > ubytes) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 826 | uchunk = ubytes; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 827 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 828 | result = copy_from_user(ptr, buf, uchunk); |
| 829 | kunmap(page); |
| 830 | if (result) { |
Dan Carpenter | f65a03f | 2010-08-10 18:03:31 -0700 | [diff] [blame] | 831 | result = -EFAULT; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 832 | goto out; |
| 833 | } |
| 834 | ubytes -= uchunk; |
| 835 | maddr += mchunk; |
| 836 | buf += mchunk; |
| 837 | mbytes -= mchunk; |
| 838 | } |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 839 | out: |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 840 | return result; |
| 841 | } |
| 842 | |
| 843 | static int kimage_load_crash_segment(struct kimage *image, |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 844 | struct kexec_segment *segment) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 845 | { |
| 846 | /* For crash dumps kernels we simply copy the data from |
| 847 | * user space to it's destination. |
| 848 | * We do things a page at a time for the sake of kmap. |
| 849 | */ |
| 850 | unsigned long maddr; |
| 851 | unsigned long ubytes, mbytes; |
| 852 | int result; |
Alexey Dobriyan | 314b6a4 | 2005-06-27 22:29:33 -0700 | [diff] [blame] | 853 | unsigned char __user *buf; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 854 | |
| 855 | result = 0; |
| 856 | buf = segment->buf; |
| 857 | ubytes = segment->bufsz; |
| 858 | mbytes = segment->memsz; |
| 859 | maddr = segment->mem; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 860 | while (mbytes) { |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 861 | struct page *page; |
| 862 | char *ptr; |
| 863 | size_t uchunk, mchunk; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 864 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 865 | page = pfn_to_page(maddr >> PAGE_SHIFT); |
Stephen Hemminger | c80544d | 2007-10-18 03:07:05 -0700 | [diff] [blame] | 866 | if (!page) { |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 867 | result = -ENOMEM; |
| 868 | goto out; |
| 869 | } |
| 870 | ptr = kmap(page); |
| 871 | ptr += maddr & ~PAGE_MASK; |
| 872 | mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 873 | if (mchunk > mbytes) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 874 | mchunk = mbytes; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 875 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 876 | uchunk = mchunk; |
| 877 | if (uchunk > ubytes) { |
| 878 | uchunk = ubytes; |
| 879 | /* Zero the trailing part of the page */ |
| 880 | memset(ptr + uchunk, 0, mchunk - uchunk); |
| 881 | } |
| 882 | result = copy_from_user(ptr, buf, uchunk); |
Zou Nan hai | a7956113 | 2006-12-07 09:51:35 -0800 | [diff] [blame] | 883 | kexec_flush_icache_page(page); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 884 | kunmap(page); |
| 885 | if (result) { |
Dan Carpenter | f65a03f | 2010-08-10 18:03:31 -0700 | [diff] [blame] | 886 | result = -EFAULT; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 887 | goto out; |
| 888 | } |
| 889 | ubytes -= uchunk; |
| 890 | maddr += mchunk; |
| 891 | buf += mchunk; |
| 892 | mbytes -= mchunk; |
| 893 | } |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 894 | out: |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 895 | return result; |
| 896 | } |
| 897 | |
| 898 | static int kimage_load_segment(struct kimage *image, |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 899 | struct kexec_segment *segment) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 900 | { |
| 901 | int result = -ENOMEM; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 902 | |
| 903 | switch (image->type) { |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 904 | case KEXEC_TYPE_DEFAULT: |
| 905 | result = kimage_load_normal_segment(image, segment); |
| 906 | break; |
| 907 | case KEXEC_TYPE_CRASH: |
| 908 | result = kimage_load_crash_segment(image, segment); |
| 909 | break; |
| 910 | } |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 911 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 912 | return result; |
| 913 | } |
| 914 | |
| 915 | /* |
| 916 | * Exec Kernel system call: for obvious reasons only root may call it. |
| 917 | * |
| 918 | * This call breaks up into three pieces. |
| 919 | * - A generic part which loads the new kernel from the current |
| 920 | * address space, and very carefully places the data in the |
| 921 | * allocated pages. |
| 922 | * |
| 923 | * - A generic part that interacts with the kernel and tells all of |
| 924 | * the devices to shut down. Preventing on-going dmas, and placing |
| 925 | * the devices in a consistent state so a later kernel can |
| 926 | * reinitialize them. |
| 927 | * |
| 928 | * - A machine specific part that includes the syscall number |
| 929 | * and the copies the image to it's final destination. And |
| 930 | * jumps into the image at entry. |
| 931 | * |
| 932 | * kexec does not sync, or unmount filesystems so if you need |
| 933 | * that to happen you need to do that yourself. |
| 934 | */ |
Jeff Moyer | c330dda | 2006-06-23 02:05:07 -0700 | [diff] [blame] | 935 | struct kimage *kexec_image; |
| 936 | struct kimage *kexec_crash_image; |
Andrew Morton | 8c5a1cf | 2008-08-15 00:40:27 -0700 | [diff] [blame] | 937 | |
| 938 | static DEFINE_MUTEX(kexec_mutex); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 939 | |
Heiko Carstens | 754fe8d | 2009-01-14 14:14:09 +0100 | [diff] [blame] | 940 | SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, |
| 941 | struct kexec_segment __user *, segments, unsigned long, flags) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 942 | { |
| 943 | struct kimage **dest_image, *image; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 944 | int result; |
| 945 | |
| 946 | /* We only trust the superuser with rebooting the system. */ |
| 947 | if (!capable(CAP_SYS_BOOT)) |
| 948 | return -EPERM; |
| 949 | |
| 950 | /* |
| 951 | * Verify we have a legal set of flags |
| 952 | * This leaves us room for future extensions. |
| 953 | */ |
| 954 | if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) |
| 955 | return -EINVAL; |
| 956 | |
| 957 | /* Verify we are on the appropriate architecture */ |
| 958 | if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && |
| 959 | ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 960 | return -EINVAL; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 961 | |
| 962 | /* Put an artificial cap on the number |
| 963 | * of segments passed to kexec_load. |
| 964 | */ |
| 965 | if (nr_segments > KEXEC_SEGMENT_MAX) |
| 966 | return -EINVAL; |
| 967 | |
| 968 | image = NULL; |
| 969 | result = 0; |
| 970 | |
| 971 | /* Because we write directly to the reserved memory |
| 972 | * region when loading crash kernels we need a mutex here to |
| 973 | * prevent multiple crash kernels from attempting to load |
| 974 | * simultaneously, and to prevent a crash kernel from loading |
| 975 | * over the top of a in use crash kernel. |
| 976 | * |
| 977 | * KISS: always take the mutex. |
| 978 | */ |
Andrew Morton | 8c5a1cf | 2008-08-15 00:40:27 -0700 | [diff] [blame] | 979 | if (!mutex_trylock(&kexec_mutex)) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 980 | return -EBUSY; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 981 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 982 | dest_image = &kexec_image; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 983 | if (flags & KEXEC_ON_CRASH) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 984 | dest_image = &kexec_crash_image; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 985 | if (nr_segments > 0) { |
| 986 | unsigned long i; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 987 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 988 | /* Loading another kernel to reboot into */ |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 989 | if ((flags & KEXEC_ON_CRASH) == 0) |
| 990 | result = kimage_normal_alloc(&image, entry, |
| 991 | nr_segments, segments); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 992 | /* Loading another kernel to switch to if this one crashes */ |
| 993 | else if (flags & KEXEC_ON_CRASH) { |
| 994 | /* Free any current crash dump kernel before |
| 995 | * we corrupt it. |
| 996 | */ |
| 997 | kimage_free(xchg(&kexec_crash_image, NULL)); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 998 | result = kimage_crash_alloc(&image, entry, |
| 999 | nr_segments, segments); |
Michael Holzheu | 558df72 | 2011-10-30 15:16:43 +0100 | [diff] [blame] | 1000 | crash_map_reserved_pages(); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1001 | } |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 1002 | if (result) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1003 | goto out; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 1004 | |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 1005 | if (flags & KEXEC_PRESERVE_CONTEXT) |
| 1006 | image->preserve_context = 1; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1007 | result = machine_kexec_prepare(image); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 1008 | if (result) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1009 | goto out; |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 1010 | |
| 1011 | for (i = 0; i < nr_segments; i++) { |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1012 | result = kimage_load_segment(image, &image->segment[i]); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 1013 | if (result) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1014 | goto out; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1015 | } |
WANG Cong | 7fccf03 | 2008-07-25 19:45:02 -0700 | [diff] [blame] | 1016 | kimage_terminate(image); |
Michael Holzheu | 558df72 | 2011-10-30 15:16:43 +0100 | [diff] [blame] | 1017 | if (flags & KEXEC_ON_CRASH) |
| 1018 | crash_unmap_reserved_pages(); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1019 | } |
| 1020 | /* Install the new kernel, and Uninstall the old */ |
| 1021 | image = xchg(dest_image, image); |
| 1022 | |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 1023 | out: |
Andrew Morton | 8c5a1cf | 2008-08-15 00:40:27 -0700 | [diff] [blame] | 1024 | mutex_unlock(&kexec_mutex); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1025 | kimage_free(image); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 1026 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1027 | return result; |
| 1028 | } |
| 1029 | |
Michael Holzheu | 558df72 | 2011-10-30 15:16:43 +0100 | [diff] [blame] | 1030 | /* |
| 1031 | * Add and remove page tables for crashkernel memory |
| 1032 | * |
| 1033 | * Provide an empty default implementation here -- architecture |
| 1034 | * code may override this |
| 1035 | */ |
| 1036 | void __weak crash_map_reserved_pages(void) |
| 1037 | {} |
| 1038 | |
| 1039 | void __weak crash_unmap_reserved_pages(void) |
| 1040 | {} |
| 1041 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1042 | #ifdef CONFIG_COMPAT |
| 1043 | asmlinkage long compat_sys_kexec_load(unsigned long entry, |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 1044 | unsigned long nr_segments, |
| 1045 | struct compat_kexec_segment __user *segments, |
| 1046 | unsigned long flags) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1047 | { |
| 1048 | struct compat_kexec_segment in; |
| 1049 | struct kexec_segment out, __user *ksegments; |
| 1050 | unsigned long i, result; |
| 1051 | |
| 1052 | /* Don't allow clients that don't understand the native |
| 1053 | * architecture to do anything. |
| 1054 | */ |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 1055 | if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1056 | return -EINVAL; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1057 | |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 1058 | if (nr_segments > KEXEC_SEGMENT_MAX) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1059 | return -EINVAL; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1060 | |
| 1061 | ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); |
| 1062 | for (i=0; i < nr_segments; i++) { |
| 1063 | result = copy_from_user(&in, &segments[i], sizeof(in)); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 1064 | if (result) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1065 | return -EFAULT; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1066 | |
| 1067 | out.buf = compat_ptr(in.buf); |
| 1068 | out.bufsz = in.bufsz; |
| 1069 | out.mem = in.mem; |
| 1070 | out.memsz = in.memsz; |
| 1071 | |
| 1072 | result = copy_to_user(&ksegments[i], &out, sizeof(out)); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 1073 | if (result) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1074 | return -EFAULT; |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1075 | } |
| 1076 | |
| 1077 | return sys_kexec_load(entry, nr_segments, ksegments, flags); |
| 1078 | } |
| 1079 | #endif |
| 1080 | |
Alexander Nyberg | 6e274d1 | 2005-06-25 14:58:26 -0700 | [diff] [blame] | 1081 | void crash_kexec(struct pt_regs *regs) |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1082 | { |
Andrew Morton | 8c5a1cf | 2008-08-15 00:40:27 -0700 | [diff] [blame] | 1083 | /* Take the kexec_mutex here to prevent sys_kexec_load |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1084 | * running on one cpu from replacing the crash kernel |
| 1085 | * we are using after a panic on a different cpu. |
| 1086 | * |
| 1087 | * If the crash kernel was not located in a fixed area |
| 1088 | * of memory the xchg(&kexec_crash_image) would be |
| 1089 | * sufficient. But since I reuse the memory... |
| 1090 | */ |
Andrew Morton | 8c5a1cf | 2008-08-15 00:40:27 -0700 | [diff] [blame] | 1091 | if (mutex_trylock(&kexec_mutex)) { |
David Wilder | c0ce7d0 | 2006-06-23 15:29:34 -0700 | [diff] [blame] | 1092 | if (kexec_crash_image) { |
Vivek Goyal | e996e58 | 2006-01-09 20:51:44 -0800 | [diff] [blame] | 1093 | struct pt_regs fixed_regs; |
KOSAKI Motohiro | 0f4bd46 | 2009-12-22 03:15:43 +0000 | [diff] [blame] | 1094 | |
Vivek Goyal | e996e58 | 2006-01-09 20:51:44 -0800 | [diff] [blame] | 1095 | crash_setup_regs(&fixed_regs, regs); |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 1096 | crash_save_vmcoreinfo(); |
Vivek Goyal | e996e58 | 2006-01-09 20:51:44 -0800 | [diff] [blame] | 1097 | machine_crash_shutdown(&fixed_regs); |
David Wilder | c0ce7d0 | 2006-06-23 15:29:34 -0700 | [diff] [blame] | 1098 | machine_kexec(kexec_crash_image); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1099 | } |
Andrew Morton | 8c5a1cf | 2008-08-15 00:40:27 -0700 | [diff] [blame] | 1100 | mutex_unlock(&kexec_mutex); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 1101 | } |
| 1102 | } |
Vivek Goyal | cc57165 | 2006-01-09 20:51:41 -0800 | [diff] [blame] | 1103 | |
Amerigo Wang | 06a7f71 | 2009-12-15 16:47:46 -0800 | [diff] [blame] | 1104 | size_t crash_get_memory_size(void) |
| 1105 | { |
Pavan Naregundi | e05bd33 | 2010-06-29 15:05:28 -0700 | [diff] [blame] | 1106 | size_t size = 0; |
Amerigo Wang | 06a7f71 | 2009-12-15 16:47:46 -0800 | [diff] [blame] | 1107 | mutex_lock(&kexec_mutex); |
Pavan Naregundi | e05bd33 | 2010-06-29 15:05:28 -0700 | [diff] [blame] | 1108 | if (crashk_res.end != crashk_res.start) |
Joe Perches | 28f65c11 | 2011-06-09 09:13:32 -0700 | [diff] [blame] | 1109 | size = resource_size(&crashk_res); |
Amerigo Wang | 06a7f71 | 2009-12-15 16:47:46 -0800 | [diff] [blame] | 1110 | mutex_unlock(&kexec_mutex); |
| 1111 | return size; |
| 1112 | } |
| 1113 | |
Anton Blanchard | c0bb9e4 | 2010-08-25 10:22:58 +1000 | [diff] [blame] | 1114 | void __weak crash_free_reserved_phys_range(unsigned long begin, |
| 1115 | unsigned long end) |
Amerigo Wang | 06a7f71 | 2009-12-15 16:47:46 -0800 | [diff] [blame] | 1116 | { |
| 1117 | unsigned long addr; |
| 1118 | |
| 1119 | for (addr = begin; addr < end; addr += PAGE_SIZE) { |
| 1120 | ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT)); |
| 1121 | init_page_count(pfn_to_page(addr >> PAGE_SHIFT)); |
| 1122 | free_page((unsigned long)__va(addr)); |
| 1123 | totalram_pages++; |
| 1124 | } |
| 1125 | } |
| 1126 | |
| 1127 | int crash_shrink_memory(unsigned long new_size) |
| 1128 | { |
| 1129 | int ret = 0; |
| 1130 | unsigned long start, end; |
Michael Holzheu | bec013c | 2012-01-12 17:20:15 -0800 | [diff] [blame] | 1131 | unsigned long old_size; |
Michael Holzheu | 6480e5a | 2012-01-12 17:20:14 -0800 | [diff] [blame] | 1132 | struct resource *ram_res; |
Amerigo Wang | 06a7f71 | 2009-12-15 16:47:46 -0800 | [diff] [blame] | 1133 | |
| 1134 | mutex_lock(&kexec_mutex); |
| 1135 | |
| 1136 | if (kexec_crash_image) { |
| 1137 | ret = -ENOENT; |
| 1138 | goto unlock; |
| 1139 | } |
| 1140 | start = crashk_res.start; |
| 1141 | end = crashk_res.end; |
Michael Holzheu | bec013c | 2012-01-12 17:20:15 -0800 | [diff] [blame] | 1142 | old_size = (end == 0) ? 0 : end - start + 1; |
| 1143 | if (new_size >= old_size) { |
| 1144 | ret = (new_size == old_size) ? 0 : -EINVAL; |
Amerigo Wang | 06a7f71 | 2009-12-15 16:47:46 -0800 | [diff] [blame] | 1145 | goto unlock; |
| 1146 | } |
| 1147 | |
Michael Holzheu | 6480e5a | 2012-01-12 17:20:14 -0800 | [diff] [blame] | 1148 | ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL); |
| 1149 | if (!ram_res) { |
| 1150 | ret = -ENOMEM; |
| 1151 | goto unlock; |
| 1152 | } |
| 1153 | |
Michael Holzheu | 558df72 | 2011-10-30 15:16:43 +0100 | [diff] [blame] | 1154 | start = roundup(start, KEXEC_CRASH_MEM_ALIGN); |
| 1155 | end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN); |
Amerigo Wang | 06a7f71 | 2009-12-15 16:47:46 -0800 | [diff] [blame] | 1156 | |
Michael Holzheu | 558df72 | 2011-10-30 15:16:43 +0100 | [diff] [blame] | 1157 | crash_map_reserved_pages(); |
Anton Blanchard | c0bb9e4 | 2010-08-25 10:22:58 +1000 | [diff] [blame] | 1158 | crash_free_reserved_phys_range(end, crashk_res.end); |
Amerigo Wang | 06a7f71 | 2009-12-15 16:47:46 -0800 | [diff] [blame] | 1159 | |
Pavan Naregundi | e05bd33 | 2010-06-29 15:05:28 -0700 | [diff] [blame] | 1160 | if ((start == end) && (crashk_res.parent != NULL)) |
Amerigo Wang | 06a7f71 | 2009-12-15 16:47:46 -0800 | [diff] [blame] | 1161 | release_resource(&crashk_res); |
Michael Holzheu | 6480e5a | 2012-01-12 17:20:14 -0800 | [diff] [blame] | 1162 | |
| 1163 | ram_res->start = end; |
| 1164 | ram_res->end = crashk_res.end; |
| 1165 | ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; |
| 1166 | ram_res->name = "System RAM"; |
| 1167 | |
Vitaly Mayatskikh | 475f9aa | 2010-05-11 14:06:51 -0700 | [diff] [blame] | 1168 | crashk_res.end = end - 1; |
Michael Holzheu | 6480e5a | 2012-01-12 17:20:14 -0800 | [diff] [blame] | 1169 | |
| 1170 | insert_resource(&iomem_resource, ram_res); |
Michael Holzheu | 558df72 | 2011-10-30 15:16:43 +0100 | [diff] [blame] | 1171 | crash_unmap_reserved_pages(); |
Amerigo Wang | 06a7f71 | 2009-12-15 16:47:46 -0800 | [diff] [blame] | 1172 | |
| 1173 | unlock: |
| 1174 | mutex_unlock(&kexec_mutex); |
| 1175 | return ret; |
| 1176 | } |
| 1177 | |
Magnus Damm | 85916f8 | 2006-12-06 20:40:41 -0800 | [diff] [blame] | 1178 | static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, |
| 1179 | size_t data_len) |
| 1180 | { |
| 1181 | struct elf_note note; |
| 1182 | |
| 1183 | note.n_namesz = strlen(name) + 1; |
| 1184 | note.n_descsz = data_len; |
| 1185 | note.n_type = type; |
| 1186 | memcpy(buf, ¬e, sizeof(note)); |
| 1187 | buf += (sizeof(note) + 3)/4; |
| 1188 | memcpy(buf, name, note.n_namesz); |
| 1189 | buf += (note.n_namesz + 3)/4; |
| 1190 | memcpy(buf, data, note.n_descsz); |
| 1191 | buf += (note.n_descsz + 3)/4; |
| 1192 | |
| 1193 | return buf; |
| 1194 | } |
| 1195 | |
| 1196 | static void final_note(u32 *buf) |
| 1197 | { |
| 1198 | struct elf_note note; |
| 1199 | |
| 1200 | note.n_namesz = 0; |
| 1201 | note.n_descsz = 0; |
| 1202 | note.n_type = 0; |
| 1203 | memcpy(buf, ¬e, sizeof(note)); |
| 1204 | } |
| 1205 | |
| 1206 | void crash_save_cpu(struct pt_regs *regs, int cpu) |
| 1207 | { |
| 1208 | struct elf_prstatus prstatus; |
| 1209 | u32 *buf; |
| 1210 | |
Rusty Russell | 4f4b6c1 | 2009-01-01 10:12:15 +1030 | [diff] [blame] | 1211 | if ((cpu < 0) || (cpu >= nr_cpu_ids)) |
Magnus Damm | 85916f8 | 2006-12-06 20:40:41 -0800 | [diff] [blame] | 1212 | return; |
| 1213 | |
| 1214 | /* Using ELF notes here is opportunistic. |
| 1215 | * I need a well defined structure format |
| 1216 | * for the data I pass, and I need tags |
| 1217 | * on the data to indicate what information I have |
| 1218 | * squirrelled away. ELF notes happen to provide |
| 1219 | * all of that, so there is no need to invent something new. |
| 1220 | */ |
| 1221 | buf = (u32*)per_cpu_ptr(crash_notes, cpu); |
| 1222 | if (!buf) |
| 1223 | return; |
| 1224 | memset(&prstatus, 0, sizeof(prstatus)); |
| 1225 | prstatus.pr_pid = current->pid; |
Tejun Heo | 6cd61c0 | 2009-02-09 22:17:39 +0900 | [diff] [blame] | 1226 | elf_core_copy_kernel_regs(&prstatus.pr_reg, regs); |
Simon Horman | 6672f76 | 2007-05-08 00:28:22 -0700 | [diff] [blame] | 1227 | buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, |
| 1228 | &prstatus, sizeof(prstatus)); |
Magnus Damm | 85916f8 | 2006-12-06 20:40:41 -0800 | [diff] [blame] | 1229 | final_note(buf); |
| 1230 | } |
| 1231 | |
Vivek Goyal | cc57165 | 2006-01-09 20:51:41 -0800 | [diff] [blame] | 1232 | static int __init crash_notes_memory_init(void) |
| 1233 | { |
| 1234 | /* Allocate memory for saving cpu registers. */ |
| 1235 | crash_notes = alloc_percpu(note_buf_t); |
| 1236 | if (!crash_notes) { |
| 1237 | printk("Kexec: Memory allocation for saving cpu register" |
| 1238 | " states failed\n"); |
| 1239 | return -ENOMEM; |
| 1240 | } |
| 1241 | return 0; |
| 1242 | } |
| 1243 | module_init(crash_notes_memory_init) |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 1244 | |
Bernhard Walle | cba63c3 | 2007-10-18 23:40:58 -0700 | [diff] [blame] | 1245 | |
| 1246 | /* |
| 1247 | * parsing the "crashkernel" commandline |
| 1248 | * |
| 1249 | * this code is intended to be called from architecture specific code |
| 1250 | */ |
| 1251 | |
| 1252 | |
| 1253 | /* |
| 1254 | * This function parses command lines in the format |
| 1255 | * |
| 1256 | * crashkernel=ramsize-range:size[,...][@offset] |
| 1257 | * |
| 1258 | * The function returns 0 on success and -EINVAL on failure. |
| 1259 | */ |
| 1260 | static int __init parse_crashkernel_mem(char *cmdline, |
| 1261 | unsigned long long system_ram, |
| 1262 | unsigned long long *crash_size, |
| 1263 | unsigned long long *crash_base) |
| 1264 | { |
| 1265 | char *cur = cmdline, *tmp; |
| 1266 | |
| 1267 | /* for each entry of the comma-separated list */ |
| 1268 | do { |
| 1269 | unsigned long long start, end = ULLONG_MAX, size; |
| 1270 | |
| 1271 | /* get the start of the range */ |
| 1272 | start = memparse(cur, &tmp); |
| 1273 | if (cur == tmp) { |
| 1274 | pr_warning("crashkernel: Memory value expected\n"); |
| 1275 | return -EINVAL; |
| 1276 | } |
| 1277 | cur = tmp; |
| 1278 | if (*cur != '-') { |
| 1279 | pr_warning("crashkernel: '-' expected\n"); |
| 1280 | return -EINVAL; |
| 1281 | } |
| 1282 | cur++; |
| 1283 | |
| 1284 | /* if no ':' is here, than we read the end */ |
| 1285 | if (*cur != ':') { |
| 1286 | end = memparse(cur, &tmp); |
| 1287 | if (cur == tmp) { |
| 1288 | pr_warning("crashkernel: Memory " |
| 1289 | "value expected\n"); |
| 1290 | return -EINVAL; |
| 1291 | } |
| 1292 | cur = tmp; |
| 1293 | if (end <= start) { |
| 1294 | pr_warning("crashkernel: end <= start\n"); |
| 1295 | return -EINVAL; |
| 1296 | } |
| 1297 | } |
| 1298 | |
| 1299 | if (*cur != ':') { |
| 1300 | pr_warning("crashkernel: ':' expected\n"); |
| 1301 | return -EINVAL; |
| 1302 | } |
| 1303 | cur++; |
| 1304 | |
| 1305 | size = memparse(cur, &tmp); |
| 1306 | if (cur == tmp) { |
| 1307 | pr_warning("Memory value expected\n"); |
| 1308 | return -EINVAL; |
| 1309 | } |
| 1310 | cur = tmp; |
| 1311 | if (size >= system_ram) { |
| 1312 | pr_warning("crashkernel: invalid size\n"); |
| 1313 | return -EINVAL; |
| 1314 | } |
| 1315 | |
| 1316 | /* match ? */ |
Michael Ellerman | be089d79 | 2008-05-01 04:34:49 -0700 | [diff] [blame] | 1317 | if (system_ram >= start && system_ram < end) { |
Bernhard Walle | cba63c3 | 2007-10-18 23:40:58 -0700 | [diff] [blame] | 1318 | *crash_size = size; |
| 1319 | break; |
| 1320 | } |
| 1321 | } while (*cur++ == ','); |
| 1322 | |
| 1323 | if (*crash_size > 0) { |
Hidetoshi Seto | 11c7da4 | 2009-07-29 15:02:08 -0700 | [diff] [blame] | 1324 | while (*cur && *cur != ' ' && *cur != '@') |
Bernhard Walle | cba63c3 | 2007-10-18 23:40:58 -0700 | [diff] [blame] | 1325 | cur++; |
| 1326 | if (*cur == '@') { |
| 1327 | cur++; |
| 1328 | *crash_base = memparse(cur, &tmp); |
| 1329 | if (cur == tmp) { |
| 1330 | pr_warning("Memory value expected " |
| 1331 | "after '@'\n"); |
| 1332 | return -EINVAL; |
| 1333 | } |
| 1334 | } |
| 1335 | } |
| 1336 | |
| 1337 | return 0; |
| 1338 | } |
| 1339 | |
| 1340 | /* |
| 1341 | * That function parses "simple" (old) crashkernel command lines like |
| 1342 | * |
| 1343 | * crashkernel=size[@offset] |
| 1344 | * |
| 1345 | * It returns 0 on success and -EINVAL on failure. |
| 1346 | */ |
| 1347 | static int __init parse_crashkernel_simple(char *cmdline, |
| 1348 | unsigned long long *crash_size, |
| 1349 | unsigned long long *crash_base) |
| 1350 | { |
| 1351 | char *cur = cmdline; |
| 1352 | |
| 1353 | *crash_size = memparse(cmdline, &cur); |
| 1354 | if (cmdline == cur) { |
| 1355 | pr_warning("crashkernel: memory value expected\n"); |
| 1356 | return -EINVAL; |
| 1357 | } |
| 1358 | |
| 1359 | if (*cur == '@') |
| 1360 | *crash_base = memparse(cur+1, &cur); |
Zhenzhong Duan | eaa3be6 | 2012-03-28 14:42:47 -0700 | [diff] [blame] | 1361 | else if (*cur != ' ' && *cur != '\0') { |
| 1362 | pr_warning("crashkernel: unrecognized char\n"); |
| 1363 | return -EINVAL; |
| 1364 | } |
Bernhard Walle | cba63c3 | 2007-10-18 23:40:58 -0700 | [diff] [blame] | 1365 | |
| 1366 | return 0; |
| 1367 | } |
| 1368 | |
| 1369 | /* |
| 1370 | * That function is the entry point for command line parsing and should be |
| 1371 | * called from the arch-specific code. |
| 1372 | */ |
| 1373 | int __init parse_crashkernel(char *cmdline, |
| 1374 | unsigned long long system_ram, |
| 1375 | unsigned long long *crash_size, |
| 1376 | unsigned long long *crash_base) |
| 1377 | { |
| 1378 | char *p = cmdline, *ck_cmdline = NULL; |
| 1379 | char *first_colon, *first_space; |
| 1380 | |
| 1381 | BUG_ON(!crash_size || !crash_base); |
| 1382 | *crash_size = 0; |
| 1383 | *crash_base = 0; |
| 1384 | |
| 1385 | /* find crashkernel and use the last one if there are more */ |
| 1386 | p = strstr(p, "crashkernel="); |
| 1387 | while (p) { |
| 1388 | ck_cmdline = p; |
| 1389 | p = strstr(p+1, "crashkernel="); |
| 1390 | } |
| 1391 | |
| 1392 | if (!ck_cmdline) |
| 1393 | return -EINVAL; |
| 1394 | |
| 1395 | ck_cmdline += 12; /* strlen("crashkernel=") */ |
| 1396 | |
| 1397 | /* |
| 1398 | * if the commandline contains a ':', then that's the extended |
| 1399 | * syntax -- if not, it must be the classic syntax |
| 1400 | */ |
| 1401 | first_colon = strchr(ck_cmdline, ':'); |
| 1402 | first_space = strchr(ck_cmdline, ' '); |
| 1403 | if (first_colon && (!first_space || first_colon < first_space)) |
| 1404 | return parse_crashkernel_mem(ck_cmdline, system_ram, |
| 1405 | crash_size, crash_base); |
| 1406 | else |
| 1407 | return parse_crashkernel_simple(ck_cmdline, crash_size, |
| 1408 | crash_base); |
| 1409 | |
| 1410 | return 0; |
| 1411 | } |
| 1412 | |
| 1413 | |
Michael Holzheu | fa8ff29 | 2011-10-30 15:16:41 +0100 | [diff] [blame] | 1414 | static void update_vmcoreinfo_note(void) |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 1415 | { |
Michael Holzheu | fa8ff29 | 2011-10-30 15:16:41 +0100 | [diff] [blame] | 1416 | u32 *buf = vmcoreinfo_note; |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 1417 | |
| 1418 | if (!vmcoreinfo_size) |
| 1419 | return; |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 1420 | buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data, |
| 1421 | vmcoreinfo_size); |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 1422 | final_note(buf); |
| 1423 | } |
| 1424 | |
Michael Holzheu | fa8ff29 | 2011-10-30 15:16:41 +0100 | [diff] [blame] | 1425 | void crash_save_vmcoreinfo(void) |
| 1426 | { |
Vivek Goyal | 63dca8d | 2012-07-30 14:42:36 -0700 | [diff] [blame] | 1427 | vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds()); |
Michael Holzheu | fa8ff29 | 2011-10-30 15:16:41 +0100 | [diff] [blame] | 1428 | update_vmcoreinfo_note(); |
| 1429 | } |
| 1430 | |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 1431 | void vmcoreinfo_append_str(const char *fmt, ...) |
| 1432 | { |
| 1433 | va_list args; |
| 1434 | char buf[0x50]; |
| 1435 | int r; |
| 1436 | |
| 1437 | va_start(args, fmt); |
| 1438 | r = vsnprintf(buf, sizeof(buf), fmt, args); |
| 1439 | va_end(args); |
| 1440 | |
| 1441 | if (r + vmcoreinfo_size > vmcoreinfo_max_size) |
| 1442 | r = vmcoreinfo_max_size - vmcoreinfo_size; |
| 1443 | |
| 1444 | memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r); |
| 1445 | |
| 1446 | vmcoreinfo_size += r; |
| 1447 | } |
| 1448 | |
| 1449 | /* |
| 1450 | * provide an empty default implementation here -- architecture |
| 1451 | * code may override this |
| 1452 | */ |
| 1453 | void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void) |
| 1454 | {} |
| 1455 | |
| 1456 | unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void) |
| 1457 | { |
| 1458 | return __pa((unsigned long)(char *)&vmcoreinfo_note); |
| 1459 | } |
| 1460 | |
| 1461 | static int __init crash_save_vmcoreinfo_init(void) |
| 1462 | { |
Ken'ichi Ohmichi | bba1f60 | 2008-02-07 00:15:22 -0800 | [diff] [blame] | 1463 | VMCOREINFO_OSRELEASE(init_uts_ns.name.release); |
| 1464 | VMCOREINFO_PAGESIZE(PAGE_SIZE); |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 1465 | |
Ken'ichi Ohmichi | bcbba6c | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1466 | VMCOREINFO_SYMBOL(init_uts_ns); |
| 1467 | VMCOREINFO_SYMBOL(node_online_map); |
Will Deacon | d034cfa | 2012-03-28 14:42:47 -0700 | [diff] [blame] | 1468 | #ifdef CONFIG_MMU |
Ken'ichi Ohmichi | bcbba6c | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1469 | VMCOREINFO_SYMBOL(swapper_pg_dir); |
Will Deacon | d034cfa | 2012-03-28 14:42:47 -0700 | [diff] [blame] | 1470 | #endif |
Ken'ichi Ohmichi | bcbba6c | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1471 | VMCOREINFO_SYMBOL(_stext); |
Ken'ichi Ohmichi | acd99db | 2008-10-18 20:28:30 -0700 | [diff] [blame] | 1472 | VMCOREINFO_SYMBOL(vmlist); |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 1473 | |
| 1474 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
Ken'ichi Ohmichi | bcbba6c | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1475 | VMCOREINFO_SYMBOL(mem_map); |
| 1476 | VMCOREINFO_SYMBOL(contig_page_data); |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 1477 | #endif |
| 1478 | #ifdef CONFIG_SPARSEMEM |
Ken'ichi Ohmichi | bcbba6c | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1479 | VMCOREINFO_SYMBOL(mem_section); |
| 1480 | VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); |
Ken'ichi Ohmichi | c76f860 | 2008-02-07 00:15:20 -0800 | [diff] [blame] | 1481 | VMCOREINFO_STRUCT_SIZE(mem_section); |
Ken'ichi Ohmichi | bcbba6c | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1482 | VMCOREINFO_OFFSET(mem_section, section_mem_map); |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 1483 | #endif |
Ken'ichi Ohmichi | c76f860 | 2008-02-07 00:15:20 -0800 | [diff] [blame] | 1484 | VMCOREINFO_STRUCT_SIZE(page); |
| 1485 | VMCOREINFO_STRUCT_SIZE(pglist_data); |
| 1486 | VMCOREINFO_STRUCT_SIZE(zone); |
| 1487 | VMCOREINFO_STRUCT_SIZE(free_area); |
| 1488 | VMCOREINFO_STRUCT_SIZE(list_head); |
| 1489 | VMCOREINFO_SIZE(nodemask_t); |
Ken'ichi Ohmichi | bcbba6c | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1490 | VMCOREINFO_OFFSET(page, flags); |
| 1491 | VMCOREINFO_OFFSET(page, _count); |
| 1492 | VMCOREINFO_OFFSET(page, mapping); |
| 1493 | VMCOREINFO_OFFSET(page, lru); |
| 1494 | VMCOREINFO_OFFSET(pglist_data, node_zones); |
| 1495 | VMCOREINFO_OFFSET(pglist_data, nr_zones); |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 1496 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
Ken'ichi Ohmichi | bcbba6c | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1497 | VMCOREINFO_OFFSET(pglist_data, node_mem_map); |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 1498 | #endif |
Ken'ichi Ohmichi | bcbba6c | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1499 | VMCOREINFO_OFFSET(pglist_data, node_start_pfn); |
| 1500 | VMCOREINFO_OFFSET(pglist_data, node_spanned_pages); |
| 1501 | VMCOREINFO_OFFSET(pglist_data, node_id); |
| 1502 | VMCOREINFO_OFFSET(zone, free_area); |
| 1503 | VMCOREINFO_OFFSET(zone, vm_stat); |
| 1504 | VMCOREINFO_OFFSET(zone, spanned_pages); |
| 1505 | VMCOREINFO_OFFSET(free_area, free_list); |
| 1506 | VMCOREINFO_OFFSET(list_head, next); |
| 1507 | VMCOREINFO_OFFSET(list_head, prev); |
Ken'ichi Ohmichi | acd99db | 2008-10-18 20:28:30 -0700 | [diff] [blame] | 1508 | VMCOREINFO_OFFSET(vm_struct, addr); |
Ken'ichi Ohmichi | bcbba6c | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1509 | VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); |
Neil Horman | 04d491a | 2009-04-02 16:58:57 -0700 | [diff] [blame] | 1510 | log_buf_kexec_setup(); |
Ken'ichi Ohmichi | 83a08e7 | 2008-01-08 15:33:05 -0800 | [diff] [blame] | 1511 | VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); |
Ken'ichi Ohmichi | bcbba6c | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1512 | VMCOREINFO_NUMBER(NR_FREE_PAGES); |
Ken'ichi Ohmichi | 122c7a5 | 2008-04-28 02:13:04 -0700 | [diff] [blame] | 1513 | VMCOREINFO_NUMBER(PG_lru); |
| 1514 | VMCOREINFO_NUMBER(PG_private); |
| 1515 | VMCOREINFO_NUMBER(PG_swapcache); |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 1516 | |
| 1517 | arch_crash_save_vmcoreinfo(); |
Michael Holzheu | fa8ff29 | 2011-10-30 15:16:41 +0100 | [diff] [blame] | 1518 | update_vmcoreinfo_note(); |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 1519 | |
| 1520 | return 0; |
| 1521 | } |
| 1522 | |
| 1523 | module_init(crash_save_vmcoreinfo_init) |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 1524 | |
Huang Ying | 7ade3fc | 2008-08-15 00:40:21 -0700 | [diff] [blame] | 1525 | /* |
| 1526 | * Move into place and start executing a preloaded standalone |
| 1527 | * executable. If nothing was preloaded return an error. |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 1528 | */ |
| 1529 | int kernel_kexec(void) |
| 1530 | { |
| 1531 | int error = 0; |
| 1532 | |
Andrew Morton | 8c5a1cf | 2008-08-15 00:40:27 -0700 | [diff] [blame] | 1533 | if (!mutex_trylock(&kexec_mutex)) |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 1534 | return -EBUSY; |
| 1535 | if (!kexec_image) { |
| 1536 | error = -EINVAL; |
| 1537 | goto Unlock; |
| 1538 | } |
| 1539 | |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 1540 | #ifdef CONFIG_KEXEC_JUMP |
Huang Ying | 7ade3fc | 2008-08-15 00:40:21 -0700 | [diff] [blame] | 1541 | if (kexec_image->preserve_context) { |
Srivatsa S. Bhat | bcda53f | 2011-12-07 22:29:54 +0100 | [diff] [blame] | 1542 | lock_system_sleep(); |
Huang Ying | 89081d1 | 2008-07-25 19:45:10 -0700 | [diff] [blame] | 1543 | pm_prepare_console(); |
| 1544 | error = freeze_processes(); |
| 1545 | if (error) { |
| 1546 | error = -EBUSY; |
| 1547 | goto Restore_console; |
| 1548 | } |
| 1549 | suspend_console(); |
Alan Stern | d161630 | 2009-05-24 22:05:42 +0200 | [diff] [blame] | 1550 | error = dpm_suspend_start(PMSG_FREEZE); |
Huang Ying | 89081d1 | 2008-07-25 19:45:10 -0700 | [diff] [blame] | 1551 | if (error) |
| 1552 | goto Resume_console; |
Alan Stern | d161630 | 2009-05-24 22:05:42 +0200 | [diff] [blame] | 1553 | /* At this point, dpm_suspend_start() has been called, |
Rafael J. Wysocki | cf579df | 2012-01-29 20:38:29 +0100 | [diff] [blame] | 1554 | * but *not* dpm_suspend_end(). We *must* call |
| 1555 | * dpm_suspend_end() now. Otherwise, drivers for |
Huang Ying | 89081d1 | 2008-07-25 19:45:10 -0700 | [diff] [blame] | 1556 | * some devices (e.g. interrupt controllers) become |
| 1557 | * desynchronized with the actual state of the |
| 1558 | * hardware at resume time, and evil weirdness ensues. |
| 1559 | */ |
Rafael J. Wysocki | cf579df | 2012-01-29 20:38:29 +0100 | [diff] [blame] | 1560 | error = dpm_suspend_end(PMSG_FREEZE); |
Huang Ying | 89081d1 | 2008-07-25 19:45:10 -0700 | [diff] [blame] | 1561 | if (error) |
Rafael J. Wysocki | 749b0af | 2009-03-16 22:34:35 +0100 | [diff] [blame] | 1562 | goto Resume_devices; |
| 1563 | error = disable_nonboot_cpus(); |
| 1564 | if (error) |
| 1565 | goto Enable_cpus; |
Rafael J. Wysocki | 2ed8d2b | 2009-03-16 22:34:06 +0100 | [diff] [blame] | 1566 | local_irq_disable(); |
Rafael J. Wysocki | 2e711c0 | 2011-04-26 19:15:07 +0200 | [diff] [blame] | 1567 | error = syscore_suspend(); |
Rafael J. Wysocki | 770824b | 2009-02-22 18:38:50 +0100 | [diff] [blame] | 1568 | if (error) |
Rafael J. Wysocki | 749b0af | 2009-03-16 22:34:35 +0100 | [diff] [blame] | 1569 | goto Enable_irqs; |
Huang Ying | 7ade3fc | 2008-08-15 00:40:21 -0700 | [diff] [blame] | 1570 | } else |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 1571 | #endif |
Huang Ying | 7ade3fc | 2008-08-15 00:40:21 -0700 | [diff] [blame] | 1572 | { |
Huang Ying | ca195b7 | 2008-08-15 00:40:24 -0700 | [diff] [blame] | 1573 | kernel_restart_prepare(NULL); |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 1574 | printk(KERN_EMERG "Starting new kernel\n"); |
| 1575 | machine_shutdown(); |
| 1576 | } |
| 1577 | |
| 1578 | machine_kexec(kexec_image); |
| 1579 | |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 1580 | #ifdef CONFIG_KEXEC_JUMP |
Huang Ying | 7ade3fc | 2008-08-15 00:40:21 -0700 | [diff] [blame] | 1581 | if (kexec_image->preserve_context) { |
Rafael J. Wysocki | 19234c0 | 2011-04-20 00:36:11 +0200 | [diff] [blame] | 1582 | syscore_resume(); |
Rafael J. Wysocki | 749b0af | 2009-03-16 22:34:35 +0100 | [diff] [blame] | 1583 | Enable_irqs: |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 1584 | local_irq_enable(); |
Rafael J. Wysocki | 749b0af | 2009-03-16 22:34:35 +0100 | [diff] [blame] | 1585 | Enable_cpus: |
Huang Ying | 89081d1 | 2008-07-25 19:45:10 -0700 | [diff] [blame] | 1586 | enable_nonboot_cpus(); |
Rafael J. Wysocki | cf579df | 2012-01-29 20:38:29 +0100 | [diff] [blame] | 1587 | dpm_resume_start(PMSG_RESTORE); |
Huang Ying | 89081d1 | 2008-07-25 19:45:10 -0700 | [diff] [blame] | 1588 | Resume_devices: |
Alan Stern | d161630 | 2009-05-24 22:05:42 +0200 | [diff] [blame] | 1589 | dpm_resume_end(PMSG_RESTORE); |
Huang Ying | 89081d1 | 2008-07-25 19:45:10 -0700 | [diff] [blame] | 1590 | Resume_console: |
| 1591 | resume_console(); |
| 1592 | thaw_processes(); |
| 1593 | Restore_console: |
| 1594 | pm_restore_console(); |
Srivatsa S. Bhat | bcda53f | 2011-12-07 22:29:54 +0100 | [diff] [blame] | 1595 | unlock_system_sleep(); |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 1596 | } |
Huang Ying | 7ade3fc | 2008-08-15 00:40:21 -0700 | [diff] [blame] | 1597 | #endif |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 1598 | |
| 1599 | Unlock: |
Andrew Morton | 8c5a1cf | 2008-08-15 00:40:27 -0700 | [diff] [blame] | 1600 | mutex_unlock(&kexec_mutex); |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 1601 | return error; |
| 1602 | } |