blob: c663202da8de79d5e432321194a9551416ff66c1 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Vivek Goyal666bfdd2005-06-25 14:58:21 -07002/*
3 * fs/proc/vmcore.c Interface for accessing the crash
4 * dump from the system's previous life.
5 * Heavily borrowed from fs/proc/kcore.c
6 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
7 * Copyright (C) IBM Corporation, 2004. All rights reserved
8 *
9 */
10
Vivek Goyal666bfdd2005-06-25 14:58:21 -070011#include <linux/mm.h>
David Howells2f96b8c2013-04-12 00:10:25 +010012#include <linux/kcore.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070013#include <linux/user.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070014#include <linux/elf.h>
15#include <linux/elfcore.h>
Paul Gortmakerafeacc82011-05-26 16:00:52 -040016#include <linux/export.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070018#include <linux/highmem.h>
Andrew Morton87ebdc02013-02-27 17:03:16 -080019#include <linux/printk.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070020#include <linux/memblock.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070021#include <linux/init.h>
22#include <linux/crash_dump.h>
23#include <linux/list.h>
Kairui Songc6c40532019-07-16 16:26:39 -070024#include <linux/moduleparam.h>
Rahul Lakkireddy27242732018-05-02 15:17:17 +053025#include <linux/mutex.h>
HATAYAMA Daisuke83086972013-07-03 15:02:23 -070026#include <linux/vmalloc.h>
Michael Holzheu9cb21812013-09-11 14:24:51 -070027#include <linux/pagemap.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080028#include <linux/uaccess.h>
Lianbo Jiang992b6492018-09-30 16:37:41 +080029#include <linux/mem_encrypt.h>
30#include <asm/pgtable.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070031#include <asm/io.h>
David Howells2f96b8c2013-04-12 00:10:25 +010032#include "internal.h"
Vivek Goyal666bfdd2005-06-25 14:58:21 -070033
34/* List representing chunks of contiguous memory areas and their offsets in
35 * vmcore file.
36 */
37static LIST_HEAD(vmcore_list);
38
39/* Stores the pointer to the buffer containing kernel elf core headers. */
40static char *elfcorebuf;
41static size_t elfcorebuf_sz;
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -070042static size_t elfcorebuf_sz_orig;
Vivek Goyal666bfdd2005-06-25 14:58:21 -070043
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -070044static char *elfnotes_buf;
45static size_t elfnotes_sz;
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +053046/* Size of all notes minus the device dump notes */
47static size_t elfnotes_orig_sz;
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -070048
Vivek Goyal666bfdd2005-06-25 14:58:21 -070049/* Total size of vmcore file. */
50static u64 vmcore_size;
51
Fabian Fredericka05e16ad2014-06-06 14:37:04 -070052static struct proc_dir_entry *proc_vmcore;
Vivek Goyal666bfdd2005-06-25 14:58:21 -070053
Rahul Lakkireddy27242732018-05-02 15:17:17 +053054#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
55/* Device Dump list and mutex to synchronize access to list */
56static LIST_HEAD(vmcoredd_list);
57static DEFINE_MUTEX(vmcoredd_mutex);
Kairui Songc6c40532019-07-16 16:26:39 -070058
59static bool vmcoredd_disabled;
60core_param(novmcoredd, vmcoredd_disabled, bool, 0);
Rahul Lakkireddy27242732018-05-02 15:17:17 +053061#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
62
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +053063/* Device Dump Size */
64static size_t vmcoredd_orig_sz;
65
Olaf Hering997c1362011-05-26 16:25:54 -070066/*
67 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
68 * The called function has to take care of module refcounting.
69 */
70static int (*oldmem_pfn_is_ram)(unsigned long pfn);
71
72int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
73{
74 if (oldmem_pfn_is_ram)
75 return -EBUSY;
76 oldmem_pfn_is_ram = fn;
77 return 0;
78}
79EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
80
81void unregister_oldmem_pfn_is_ram(void)
82{
83 oldmem_pfn_is_ram = NULL;
84 wmb();
85}
86EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
87
88static int pfn_is_ram(unsigned long pfn)
89{
90 int (*fn)(unsigned long pfn);
91 /* pfn is ram unless fn() checks pagetype */
92 int ret = 1;
93
94 /*
95 * Ask hypervisor if the pfn is really ram.
96 * A ballooned page contains no data and reading from such a page
97 * will cause high load in the hypervisor.
98 */
99 fn = oldmem_pfn_is_ram;
100 if (fn)
101 ret = fn(pfn);
102
103 return ret;
104}
105
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700106/* Reads a page from the oldmem device from given offset. */
Thiago Jung Bauermannae7eb822019-08-06 01:49:18 -0300107ssize_t read_from_oldmem(char *buf, size_t count,
108 u64 *ppos, int userbuf,
109 bool encrypted)
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700110{
111 unsigned long pfn, offset;
112 size_t nr_bytes;
113 ssize_t read = 0, tmp;
114
115 if (!count)
116 return 0;
117
118 offset = (unsigned long)(*ppos % PAGE_SIZE);
119 pfn = (unsigned long)(*ppos / PAGE_SIZE);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700120
121 do {
122 if (count > (PAGE_SIZE - offset))
123 nr_bytes = PAGE_SIZE - offset;
124 else
125 nr_bytes = count;
126
Olaf Hering997c1362011-05-26 16:25:54 -0700127 /* If pfn is not ram, return zeros for sparse dump files */
128 if (pfn_is_ram(pfn) == 0)
129 memset(buf, 0, nr_bytes);
130 else {
Lianbo Jiang992b6492018-09-30 16:37:41 +0800131 if (encrypted)
132 tmp = copy_oldmem_page_encrypted(pfn, buf,
133 nr_bytes,
134 offset,
135 userbuf);
136 else
137 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
138 offset, userbuf);
139
Olaf Hering997c1362011-05-26 16:25:54 -0700140 if (tmp < 0)
141 return tmp;
142 }
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700143 *ppos += nr_bytes;
144 count -= nr_bytes;
145 buf += nr_bytes;
146 read += nr_bytes;
147 ++pfn;
148 offset = 0;
149 } while (count);
150
151 return read;
152}
153
Michael Holzheube8a8d02013-09-11 14:24:49 -0700154/*
155 * Architectures may override this function to allocate ELF header in 2nd kernel
156 */
157int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
158{
159 return 0;
160}
161
162/*
163 * Architectures may override this function to free header
164 */
165void __weak elfcorehdr_free(unsigned long long addr)
166{}
167
168/*
169 * Architectures may override this function to read from ELF header
170 */
171ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
172{
Thiago Jung Bauermannae7eb822019-08-06 01:49:18 -0300173 return read_from_oldmem(buf, count, ppos, 0, false);
Michael Holzheube8a8d02013-09-11 14:24:49 -0700174}
175
176/*
177 * Architectures may override this function to read from notes sections
178 */
179ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
180{
Lianbo Jiang4eb5fec2019-04-30 15:44:21 +0800181 return read_from_oldmem(buf, count, ppos, 0, mem_encrypt_active());
Michael Holzheube8a8d02013-09-11 14:24:49 -0700182}
183
Michael Holzheu9cb21812013-09-11 14:24:51 -0700184/*
185 * Architectures may override this function to map oldmem
186 */
187int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
188 unsigned long from, unsigned long pfn,
189 unsigned long size, pgprot_t prot)
190{
Lianbo Jiang992b6492018-09-30 16:37:41 +0800191 prot = pgprot_encrypted(prot);
Michael Holzheu9cb21812013-09-11 14:24:51 -0700192 return remap_pfn_range(vma, from, pfn, size, prot);
193}
194
195/*
Borislav Petkovcf089612018-10-08 10:05:20 +0200196 * Architectures which support memory encryption override this.
197 */
198ssize_t __weak
199copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
200 unsigned long offset, int userbuf)
201{
202 return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
203}
204
205/*
Michael Holzheu9cb21812013-09-11 14:24:51 -0700206 * Copy to either kernel or user space
207 */
208static int copy_to(void *target, void *src, size_t size, int userbuf)
209{
210 if (userbuf) {
211 if (copy_to_user((char __user *) target, src, size))
212 return -EFAULT;
213 } else {
214 memcpy(target, src, size);
215 }
216 return 0;
217}
218
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530219#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
220static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
221{
222 struct vmcoredd_node *dump;
223 u64 offset = 0;
224 int ret = 0;
225 size_t tsz;
226 char *buf;
227
228 mutex_lock(&vmcoredd_mutex);
229 list_for_each_entry(dump, &vmcoredd_list, list) {
230 if (start < offset + dump->size) {
231 tsz = min(offset + (u64)dump->size - start, (u64)size);
232 buf = dump->buf + start - offset;
233 if (copy_to(dst, buf, tsz, userbuf)) {
234 ret = -EFAULT;
235 goto out_unlock;
236 }
237
238 size -= tsz;
239 start += tsz;
240 dst += tsz;
241
242 /* Leave now if buffer filled already */
243 if (!size)
244 goto out_unlock;
245 }
246 offset += dump->size;
247 }
248
249out_unlock:
250 mutex_unlock(&vmcoredd_mutex);
251 return ret;
252}
253
Arnd Bergmanna2036a12018-08-23 17:00:55 -0700254#ifdef CONFIG_MMU
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530255static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
256 u64 start, size_t size)
257{
258 struct vmcoredd_node *dump;
259 u64 offset = 0;
260 int ret = 0;
261 size_t tsz;
262 char *buf;
263
264 mutex_lock(&vmcoredd_mutex);
265 list_for_each_entry(dump, &vmcoredd_list, list) {
266 if (start < offset + dump->size) {
267 tsz = min(offset + (u64)dump->size - start, (u64)size);
268 buf = dump->buf + start - offset;
Jann Hornbdebd6a22020-04-20 18:14:11 -0700269 if (remap_vmalloc_range_partial(vma, dst, buf, 0,
270 tsz)) {
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530271 ret = -EFAULT;
272 goto out_unlock;
273 }
274
275 size -= tsz;
276 start += tsz;
277 dst += tsz;
278
279 /* Leave now if buffer filled already */
280 if (!size)
281 goto out_unlock;
282 }
283 offset += dump->size;
284 }
285
286out_unlock:
287 mutex_unlock(&vmcoredd_mutex);
288 return ret;
289}
Arnd Bergmanna2036a12018-08-23 17:00:55 -0700290#endif /* CONFIG_MMU */
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530291#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
292
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700293/* Read from the ELF header and then the crash dump. On error, negative value is
294 * returned otherwise number of bytes read are returned.
295 */
Michael Holzheu9cb21812013-09-11 14:24:51 -0700296static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
297 int userbuf)
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700298{
299 ssize_t acc = 0, tmp;
Vivek Goyal80e8ff62006-04-10 22:54:10 -0700300 size_t tsz;
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700301 u64 start;
302 struct vmcore *m = NULL;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700303
304 if (buflen == 0 || *fpos >= vmcore_size)
305 return 0;
306
307 /* trim buflen to not go beyond EOF */
308 if (buflen > vmcore_size - *fpos)
309 buflen = vmcore_size - *fpos;
310
311 /* Read ELF core header */
312 if (*fpos < elfcorebuf_sz) {
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700313 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
Michael Holzheu9cb21812013-09-11 14:24:51 -0700314 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700315 return -EFAULT;
316 buflen -= tsz;
317 *fpos += tsz;
318 buffer += tsz;
319 acc += tsz;
320
321 /* leave now if filled buffer already */
322 if (buflen == 0)
323 return acc;
324 }
325
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700326 /* Read Elf note segment */
327 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
328 void *kaddr;
329
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530330 /* We add device dumps before other elf notes because the
331 * other elf notes may not fill the elf notes buffer
332 * completely and we will end up with zero-filled data
333 * between the elf notes and the device dumps. Tools will
334 * then try to decode this zero-filled data as valid notes
335 * and we don't want that. Hence, adding device dumps before
336 * the other elf notes ensure that zero-filled data can be
337 * avoided.
338 */
339#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
340 /* Read device dumps */
341 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
342 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
343 (size_t)*fpos, buflen);
344 start = *fpos - elfcorebuf_sz;
345 if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
346 return -EFAULT;
347
348 buflen -= tsz;
349 *fpos += tsz;
350 buffer += tsz;
351 acc += tsz;
352
353 /* leave now if filled buffer already */
354 if (!buflen)
355 return acc;
356 }
357#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
358
359 /* Read remaining elf notes */
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700360 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530361 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
Michael Holzheu9cb21812013-09-11 14:24:51 -0700362 if (copy_to(buffer, kaddr, tsz, userbuf))
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700363 return -EFAULT;
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530364
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700365 buflen -= tsz;
366 *fpos += tsz;
367 buffer += tsz;
368 acc += tsz;
369
370 /* leave now if filled buffer already */
371 if (buflen == 0)
372 return acc;
373 }
374
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700375 list_for_each_entry(m, &vmcore_list, list) {
376 if (*fpos < m->offset + m->size) {
Dave Young0b50a2d2016-03-17 14:21:03 -0700377 tsz = (size_t)min_t(unsigned long long,
378 m->offset + m->size - *fpos,
379 buflen);
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700380 start = m->paddr + *fpos - m->offset;
Lianbo Jiang992b6492018-09-30 16:37:41 +0800381 tmp = read_from_oldmem(buffer, tsz, &start,
Lianbo Jiang4eb5fec2019-04-30 15:44:21 +0800382 userbuf, mem_encrypt_active());
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700383 if (tmp < 0)
384 return tmp;
385 buflen -= tsz;
386 *fpos += tsz;
387 buffer += tsz;
388 acc += tsz;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700389
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700390 /* leave now if filled buffer already */
391 if (buflen == 0)
392 return acc;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700393 }
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700394 }
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700395
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700396 return acc;
397}
398
Michael Holzheu9cb21812013-09-11 14:24:51 -0700399static ssize_t read_vmcore(struct file *file, char __user *buffer,
400 size_t buflen, loff_t *fpos)
401{
402 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
403}
404
405/*
406 * The vmcore fault handler uses the page cache and fills data using the
407 * standard __vmcore_read() function.
408 *
409 * On s390 the fault handler is used for memory regions that can't be mapped
410 * directly with remap_pfn_range().
411 */
Souptick Joarder36f06202018-08-21 21:54:44 -0700412static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
Michael Holzheu9cb21812013-09-11 14:24:51 -0700413{
414#ifdef CONFIG_S390
Dave Jiang11bac802017-02-24 14:56:41 -0800415 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
Michael Holzheu9cb21812013-09-11 14:24:51 -0700416 pgoff_t index = vmf->pgoff;
417 struct page *page;
418 loff_t offset;
419 char *buf;
420 int rc;
421
422 page = find_or_create_page(mapping, index, GFP_KERNEL);
423 if (!page)
424 return VM_FAULT_OOM;
425 if (!PageUptodate(page)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300426 offset = (loff_t) index << PAGE_SHIFT;
Michael Holzheu9cb21812013-09-11 14:24:51 -0700427 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
428 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
429 if (rc < 0) {
430 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300431 put_page(page);
Souptick Joarderb5c21232018-10-30 15:04:35 -0700432 return vmf_error(rc);
Michael Holzheu9cb21812013-09-11 14:24:51 -0700433 }
434 SetPageUptodate(page);
435 }
436 unlock_page(page);
437 vmf->page = page;
438 return 0;
439#else
440 return VM_FAULT_SIGBUS;
441#endif
442}
443
444static const struct vm_operations_struct vmcore_mmap_ops = {
445 .fault = mmap_vmcore_fault,
446};
447
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700448/**
Rahul Lakkireddy27242732018-05-02 15:17:17 +0530449 * vmcore_alloc_buf - allocate buffer in vmalloc memory
450 * @sizez: size of buffer
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700451 *
452 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
453 * the buffer to user-space by means of remap_vmalloc_range().
454 *
455 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
456 * disabled and there's no need to allow users to mmap the buffer.
457 */
Rahul Lakkireddy27242732018-05-02 15:17:17 +0530458static inline char *vmcore_alloc_buf(size_t size)
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700459{
460#ifdef CONFIG_MMU
Rahul Lakkireddy27242732018-05-02 15:17:17 +0530461 return vmalloc_user(size);
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700462#else
Rahul Lakkireddy27242732018-05-02 15:17:17 +0530463 return vzalloc(size);
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700464#endif
465}
466
467/*
468 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
469 * essential for mmap_vmcore() in order to map physically
470 * non-contiguous objects (ELF header, ELF note segment and memory
471 * regions in the 1st kernel pointed to by PT_LOAD entries) into
472 * virtually contiguous user-space in ELF layout.
473 */
Michael Holzheu11e376a2013-09-11 14:24:53 -0700474#ifdef CONFIG_MMU
Vitaly Kuznetsov0692ded2014-08-08 14:22:05 -0700475/*
476 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
477 * reported as not being ram with the zero page.
478 *
479 * @vma: vm_area_struct describing requested mapping
480 * @from: start remapping from
481 * @pfn: page frame number to start remapping to
482 * @size: remapping size
483 * @prot: protection bits
484 *
485 * Returns zero on success, -EAGAIN on failure.
486 */
487static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
488 unsigned long from, unsigned long pfn,
489 unsigned long size, pgprot_t prot)
490{
491 unsigned long map_size;
492 unsigned long pos_start, pos_end, pos;
493 unsigned long zeropage_pfn = my_zero_pfn(0);
494 size_t len = 0;
495
496 pos_start = pfn;
497 pos_end = pfn + (size >> PAGE_SHIFT);
498
499 for (pos = pos_start; pos < pos_end; ++pos) {
500 if (!pfn_is_ram(pos)) {
501 /*
502 * We hit a page which is not ram. Remap the continuous
503 * region between pos_start and pos-1 and replace
504 * the non-ram page at pos with the zero page.
505 */
506 if (pos > pos_start) {
507 /* Remap continuous region */
508 map_size = (pos - pos_start) << PAGE_SHIFT;
509 if (remap_oldmem_pfn_range(vma, from + len,
510 pos_start, map_size,
511 prot))
512 goto fail;
513 len += map_size;
514 }
515 /* Remap the zero page */
516 if (remap_oldmem_pfn_range(vma, from + len,
517 zeropage_pfn,
518 PAGE_SIZE, prot))
519 goto fail;
520 len += PAGE_SIZE;
521 pos_start = pos + 1;
522 }
523 }
524 if (pos > pos_start) {
525 /* Remap the rest */
526 map_size = (pos - pos_start) << PAGE_SHIFT;
527 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
528 map_size, prot))
529 goto fail;
530 }
531 return 0;
532fail:
Mike Rapoport897ab3e2017-02-24 14:58:22 -0800533 do_munmap(vma->vm_mm, from, len, NULL);
Vitaly Kuznetsov0692ded2014-08-08 14:22:05 -0700534 return -EAGAIN;
535}
536
537static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
538 unsigned long from, unsigned long pfn,
539 unsigned long size, pgprot_t prot)
540{
541 /*
542 * Check if oldmem_pfn_is_ram was registered to avoid
543 * looping over all pages without a reason.
544 */
545 if (oldmem_pfn_is_ram)
546 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
547 else
548 return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
549}
550
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700551static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
552{
553 size_t size = vma->vm_end - vma->vm_start;
554 u64 start, end, len, tsz;
555 struct vmcore *m;
556
557 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
558 end = start + size;
559
560 if (size > vmcore_size || end > vmcore_size)
561 return -EINVAL;
562
563 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
564 return -EPERM;
565
566 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
567 vma->vm_flags |= VM_MIXEDMAP;
Michael Holzheu9cb21812013-09-11 14:24:51 -0700568 vma->vm_ops = &vmcore_mmap_ops;
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700569
570 len = 0;
571
572 if (start < elfcorebuf_sz) {
573 u64 pfn;
574
575 tsz = min(elfcorebuf_sz - (size_t)start, size);
576 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
577 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
578 vma->vm_page_prot))
579 return -EAGAIN;
580 size -= tsz;
581 start += tsz;
582 len += tsz;
583
584 if (size == 0)
585 return 0;
586 }
587
588 if (start < elfcorebuf_sz + elfnotes_sz) {
589 void *kaddr;
590
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530591 /* We add device dumps before other elf notes because the
592 * other elf notes may not fill the elf notes buffer
593 * completely and we will end up with zero-filled data
594 * between the elf notes and the device dumps. Tools will
595 * then try to decode this zero-filled data as valid notes
596 * and we don't want that. Hence, adding device dumps before
597 * the other elf notes ensure that zero-filled data can be
598 * avoided. This also ensures that the device dumps and
599 * other elf notes can be properly mmaped at page aligned
600 * address.
601 */
602#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
603 /* Read device dumps */
604 if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
605 u64 start_off;
606
607 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
608 (size_t)start, size);
609 start_off = start - elfcorebuf_sz;
610 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
611 start_off, tsz))
612 goto fail;
613
614 size -= tsz;
615 start += tsz;
616 len += tsz;
617
618 /* leave now if filled buffer already */
619 if (!size)
620 return 0;
621 }
622#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
623
624 /* Read remaining elf notes */
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700625 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530626 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700627 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
Jann Hornbdebd6a22020-04-20 18:14:11 -0700628 kaddr, 0, tsz))
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700629 goto fail;
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530630
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700631 size -= tsz;
632 start += tsz;
633 len += tsz;
634
635 if (size == 0)
636 return 0;
637 }
638
639 list_for_each_entry(m, &vmcore_list, list) {
640 if (start < m->offset + m->size) {
641 u64 paddr = 0;
642
Dave Young0b50a2d2016-03-17 14:21:03 -0700643 tsz = (size_t)min_t(unsigned long long,
644 m->offset + m->size - start, size);
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700645 paddr = m->paddr + start - m->offset;
Vitaly Kuznetsov0692ded2014-08-08 14:22:05 -0700646 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
647 paddr >> PAGE_SHIFT, tsz,
648 vma->vm_page_prot))
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700649 goto fail;
650 size -= tsz;
651 start += tsz;
652 len += tsz;
653
654 if (size == 0)
655 return 0;
656 }
657 }
658
659 return 0;
660fail:
Mike Rapoport897ab3e2017-02-24 14:58:22 -0800661 do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700662 return -EAGAIN;
663}
664#else
665static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
666{
667 return -ENOSYS;
668}
669#endif
670
Alexey Dobriyan97a32532020-02-03 17:37:17 -0800671static const struct proc_ops vmcore_proc_ops = {
672 .proc_read = read_vmcore,
673 .proc_lseek = default_llseek,
674 .proc_mmap = mmap_vmcore,
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700675};
676
677static struct vmcore* __init get_new_element(void)
678{
Cyrill Gorcunov2f6d3112009-06-17 16:26:00 -0700679 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700680}
681
Rahul Lakkireddy44c752f2018-05-21 19:07:50 +0530682static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
683 struct list_head *vc_list)
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700684{
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700685 u64 size;
HATAYAMA Daisuke591ff712013-07-03 15:02:22 -0700686 struct vmcore *m;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700687
HATAYAMA Daisuke591ff712013-07-03 15:02:22 -0700688 size = elfsz + elfnotesegsz;
689 list_for_each_entry(m, vc_list, list) {
690 size += m->size;
Vivek Goyal72658e92005-06-25 14:58:22 -0700691 }
692 return size;
693}
694
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700695/**
696 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
697 *
698 * @ehdr_ptr: ELF header
699 *
700 * This function updates p_memsz member of each PT_NOTE entry in the
701 * program header table pointed to by @ehdr_ptr to real size of ELF
702 * note segment.
703 */
704static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700705{
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700706 int i, rc=0;
707 Elf64_Phdr *phdr_ptr;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700708 Elf64_Nhdr *nhdr_ptr;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700709
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700710 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700711 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700712 void *notes_section;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700713 u64 offset, max_sz, sz, real_sz = 0;
714 if (phdr_ptr->p_type != PT_NOTE)
715 continue;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700716 max_sz = phdr_ptr->p_memsz;
717 offset = phdr_ptr->p_offset;
718 notes_section = kmalloc(max_sz, GFP_KERNEL);
719 if (!notes_section)
720 return -ENOMEM;
Michael Holzheube8a8d02013-09-11 14:24:49 -0700721 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700722 if (rc < 0) {
723 kfree(notes_section);
724 return rc;
725 }
726 nhdr_ptr = notes_section;
Greg Pearson38dfac82014-02-10 14:25:36 -0800727 while (nhdr_ptr->n_namesz != 0) {
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700728 sz = sizeof(Elf64_Nhdr) +
WANG Chao34b47762015-02-17 13:46:01 -0800729 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
730 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
Greg Pearson38dfac82014-02-10 14:25:36 -0800731 if ((real_sz + sz) > max_sz) {
732 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
733 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
734 break;
735 }
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700736 real_sz += sz;
737 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
738 }
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700739 kfree(notes_section);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700740 phdr_ptr->p_memsz = real_sz;
Greg Pearson38dfac82014-02-10 14:25:36 -0800741 if (real_sz == 0) {
742 pr_warn("Warning: Zero PT_NOTE entries found\n");
Greg Pearson38dfac82014-02-10 14:25:36 -0800743 }
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700744 }
745
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700746 return 0;
747}
748
749/**
750 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
751 * headers and sum of real size of their ELF note segment headers and
752 * data.
753 *
754 * @ehdr_ptr: ELF header
755 * @nr_ptnote: buffer for the number of PT_NOTE program headers
756 * @sz_ptnote: buffer for size of unique PT_NOTE program header
757 *
758 * This function is used to merge multiple PT_NOTE program headers
759 * into a unique single one. The resulting unique entry will have
760 * @sz_ptnote in its phdr->p_mem.
761 *
762 * It is assumed that program headers with PT_NOTE type pointed to by
763 * @ehdr_ptr has already been updated by update_note_header_size_elf64
764 * and each of PT_NOTE program headers has actual ELF note segment
765 * size in its p_memsz member.
766 */
767static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
768 int *nr_ptnote, u64 *sz_ptnote)
769{
770 int i;
771 Elf64_Phdr *phdr_ptr;
772
773 *nr_ptnote = *sz_ptnote = 0;
774
775 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
776 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
777 if (phdr_ptr->p_type != PT_NOTE)
778 continue;
779 *nr_ptnote += 1;
780 *sz_ptnote += phdr_ptr->p_memsz;
781 }
782
783 return 0;
784}
785
786/**
787 * copy_notes_elf64 - copy ELF note segments in a given buffer
788 *
789 * @ehdr_ptr: ELF header
790 * @notes_buf: buffer into which ELF note segments are copied
791 *
792 * This function is used to copy ELF note segment in the 1st kernel
793 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
794 * size of the buffer @notes_buf is equal to or larger than sum of the
795 * real ELF note segment headers and data.
796 *
797 * It is assumed that program headers with PT_NOTE type pointed to by
798 * @ehdr_ptr has already been updated by update_note_header_size_elf64
799 * and each of PT_NOTE program headers has actual ELF note segment
800 * size in its p_memsz member.
801 */
802static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
803{
804 int i, rc=0;
805 Elf64_Phdr *phdr_ptr;
806
807 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
808
809 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
810 u64 offset;
811 if (phdr_ptr->p_type != PT_NOTE)
812 continue;
813 offset = phdr_ptr->p_offset;
Michael Holzheube8a8d02013-09-11 14:24:49 -0700814 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
815 &offset);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700816 if (rc < 0)
817 return rc;
818 notes_buf += phdr_ptr->p_memsz;
819 }
820
821 return 0;
822}
823
824/* Merges all the PT_NOTE headers into one. */
825static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
826 char **notes_buf, size_t *notes_sz)
827{
828 int i, nr_ptnote=0, rc=0;
829 char *tmp;
830 Elf64_Ehdr *ehdr_ptr;
831 Elf64_Phdr phdr;
832 u64 phdr_sz = 0, note_off;
833
834 ehdr_ptr = (Elf64_Ehdr *)elfptr;
835
836 rc = update_note_header_size_elf64(ehdr_ptr);
837 if (rc < 0)
838 return rc;
839
840 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
841 if (rc < 0)
842 return rc;
843
844 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
Rahul Lakkireddy27242732018-05-02 15:17:17 +0530845 *notes_buf = vmcore_alloc_buf(*notes_sz);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700846 if (!*notes_buf)
847 return -ENOMEM;
848
849 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
850 if (rc < 0)
851 return rc;
852
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700853 /* Prepare merged PT_NOTE program header. */
854 phdr.p_type = PT_NOTE;
855 phdr.p_flags = 0;
856 note_off = sizeof(Elf64_Ehdr) +
857 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700858 phdr.p_offset = roundup(note_off, PAGE_SIZE);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700859 phdr.p_vaddr = phdr.p_paddr = 0;
860 phdr.p_filesz = phdr.p_memsz = phdr_sz;
861 phdr.p_align = 0;
862
863 /* Add merged PT_NOTE program header*/
864 tmp = elfptr + sizeof(Elf64_Ehdr);
865 memcpy(tmp, &phdr, sizeof(phdr));
866 tmp += sizeof(phdr);
867
868 /* Remove unwanted PT_NOTE program headers. */
869 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
870 *elfsz = *elfsz - i;
871 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -0700872 memset(elfptr + *elfsz, 0, i);
873 *elfsz = roundup(*elfsz, PAGE_SIZE);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700874
875 /* Modify e_phnum to reflect merged headers. */
876 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
877
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530878 /* Store the size of all notes. We need this to update the note
879 * header when the device dumps will be added.
880 */
881 elfnotes_orig_sz = phdr.p_memsz;
882
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700883 return 0;
884}
885
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700886/**
887 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
888 *
889 * @ehdr_ptr: ELF header
890 *
891 * This function updates p_memsz member of each PT_NOTE entry in the
892 * program header table pointed to by @ehdr_ptr to real size of ELF
893 * note segment.
894 */
895static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
Vivek Goyal72658e92005-06-25 14:58:22 -0700896{
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700897 int i, rc=0;
898 Elf32_Phdr *phdr_ptr;
Vivek Goyal72658e92005-06-25 14:58:22 -0700899 Elf32_Nhdr *nhdr_ptr;
Vivek Goyal72658e92005-06-25 14:58:22 -0700900
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700901 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
Vivek Goyal72658e92005-06-25 14:58:22 -0700902 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
Vivek Goyal72658e92005-06-25 14:58:22 -0700903 void *notes_section;
Vivek Goyal72658e92005-06-25 14:58:22 -0700904 u64 offset, max_sz, sz, real_sz = 0;
905 if (phdr_ptr->p_type != PT_NOTE)
906 continue;
Vivek Goyal72658e92005-06-25 14:58:22 -0700907 max_sz = phdr_ptr->p_memsz;
908 offset = phdr_ptr->p_offset;
909 notes_section = kmalloc(max_sz, GFP_KERNEL);
910 if (!notes_section)
911 return -ENOMEM;
Michael Holzheube8a8d02013-09-11 14:24:49 -0700912 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
Vivek Goyal72658e92005-06-25 14:58:22 -0700913 if (rc < 0) {
914 kfree(notes_section);
915 return rc;
916 }
917 nhdr_ptr = notes_section;
Greg Pearson38dfac82014-02-10 14:25:36 -0800918 while (nhdr_ptr->n_namesz != 0) {
Vivek Goyal72658e92005-06-25 14:58:22 -0700919 sz = sizeof(Elf32_Nhdr) +
WANG Chao34b47762015-02-17 13:46:01 -0800920 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
921 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
Greg Pearson38dfac82014-02-10 14:25:36 -0800922 if ((real_sz + sz) > max_sz) {
923 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
924 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
925 break;
926 }
Vivek Goyal72658e92005-06-25 14:58:22 -0700927 real_sz += sz;
928 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
929 }
Vivek Goyal72658e92005-06-25 14:58:22 -0700930 kfree(notes_section);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700931 phdr_ptr->p_memsz = real_sz;
Greg Pearson38dfac82014-02-10 14:25:36 -0800932 if (real_sz == 0) {
933 pr_warn("Warning: Zero PT_NOTE entries found\n");
Greg Pearson38dfac82014-02-10 14:25:36 -0800934 }
Vivek Goyal72658e92005-06-25 14:58:22 -0700935 }
936
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700937 return 0;
938}
939
940/**
941 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
942 * headers and sum of real size of their ELF note segment headers and
943 * data.
944 *
945 * @ehdr_ptr: ELF header
946 * @nr_ptnote: buffer for the number of PT_NOTE program headers
947 * @sz_ptnote: buffer for size of unique PT_NOTE program header
948 *
949 * This function is used to merge multiple PT_NOTE program headers
950 * into a unique single one. The resulting unique entry will have
951 * @sz_ptnote in its phdr->p_mem.
952 *
953 * It is assumed that program headers with PT_NOTE type pointed to by
954 * @ehdr_ptr has already been updated by update_note_header_size_elf32
955 * and each of PT_NOTE program headers has actual ELF note segment
956 * size in its p_memsz member.
957 */
958static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
959 int *nr_ptnote, u64 *sz_ptnote)
960{
961 int i;
962 Elf32_Phdr *phdr_ptr;
963
964 *nr_ptnote = *sz_ptnote = 0;
965
966 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
967 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
968 if (phdr_ptr->p_type != PT_NOTE)
969 continue;
970 *nr_ptnote += 1;
971 *sz_ptnote += phdr_ptr->p_memsz;
972 }
973
974 return 0;
975}
976
977/**
978 * copy_notes_elf32 - copy ELF note segments in a given buffer
979 *
980 * @ehdr_ptr: ELF header
981 * @notes_buf: buffer into which ELF note segments are copied
982 *
983 * This function is used to copy ELF note segment in the 1st kernel
984 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
985 * size of the buffer @notes_buf is equal to or larger than sum of the
986 * real ELF note segment headers and data.
987 *
988 * It is assumed that program headers with PT_NOTE type pointed to by
989 * @ehdr_ptr has already been updated by update_note_header_size_elf32
990 * and each of PT_NOTE program headers has actual ELF note segment
991 * size in its p_memsz member.
992 */
993static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
994{
995 int i, rc=0;
996 Elf32_Phdr *phdr_ptr;
997
998 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
999
1000 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1001 u64 offset;
1002 if (phdr_ptr->p_type != PT_NOTE)
1003 continue;
1004 offset = phdr_ptr->p_offset;
Michael Holzheube8a8d02013-09-11 14:24:49 -07001005 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1006 &offset);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001007 if (rc < 0)
1008 return rc;
1009 notes_buf += phdr_ptr->p_memsz;
1010 }
1011
1012 return 0;
1013}
1014
1015/* Merges all the PT_NOTE headers into one. */
1016static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1017 char **notes_buf, size_t *notes_sz)
1018{
1019 int i, nr_ptnote=0, rc=0;
1020 char *tmp;
1021 Elf32_Ehdr *ehdr_ptr;
1022 Elf32_Phdr phdr;
1023 u64 phdr_sz = 0, note_off;
1024
1025 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1026
1027 rc = update_note_header_size_elf32(ehdr_ptr);
1028 if (rc < 0)
1029 return rc;
1030
1031 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1032 if (rc < 0)
1033 return rc;
1034
1035 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301036 *notes_buf = vmcore_alloc_buf(*notes_sz);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001037 if (!*notes_buf)
1038 return -ENOMEM;
1039
1040 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1041 if (rc < 0)
1042 return rc;
1043
Vivek Goyal72658e92005-06-25 14:58:22 -07001044 /* Prepare merged PT_NOTE program header. */
1045 phdr.p_type = PT_NOTE;
1046 phdr.p_flags = 0;
1047 note_off = sizeof(Elf32_Ehdr) +
1048 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001049 phdr.p_offset = roundup(note_off, PAGE_SIZE);
Vivek Goyal72658e92005-06-25 14:58:22 -07001050 phdr.p_vaddr = phdr.p_paddr = 0;
1051 phdr.p_filesz = phdr.p_memsz = phdr_sz;
1052 phdr.p_align = 0;
1053
1054 /* Add merged PT_NOTE program header*/
1055 tmp = elfptr + sizeof(Elf32_Ehdr);
1056 memcpy(tmp, &phdr, sizeof(phdr));
1057 tmp += sizeof(phdr);
1058
1059 /* Remove unwanted PT_NOTE program headers. */
1060 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1061 *elfsz = *elfsz - i;
1062 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001063 memset(elfptr + *elfsz, 0, i);
1064 *elfsz = roundup(*elfsz, PAGE_SIZE);
Vivek Goyal72658e92005-06-25 14:58:22 -07001065
1066 /* Modify e_phnum to reflect merged headers. */
1067 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1068
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +05301069 /* Store the size of all notes. We need this to update the note
1070 * header when the device dumps will be added.
1071 */
1072 elfnotes_orig_sz = phdr.p_memsz;
1073
Vivek Goyal72658e92005-06-25 14:58:22 -07001074 return 0;
1075}
1076
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001077/* Add memory chunks represented by program headers to vmcore list. Also update
1078 * the new offset fields of exported program headers. */
1079static int __init process_ptload_program_headers_elf64(char *elfptr,
1080 size_t elfsz,
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001081 size_t elfnotes_sz,
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001082 struct list_head *vc_list)
1083{
1084 int i;
1085 Elf64_Ehdr *ehdr_ptr;
1086 Elf64_Phdr *phdr_ptr;
1087 loff_t vmcore_off;
1088 struct vmcore *new;
1089
1090 ehdr_ptr = (Elf64_Ehdr *)elfptr;
1091 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1092
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001093 /* Skip Elf header, program headers and Elf note segment. */
1094 vmcore_off = elfsz + elfnotes_sz;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001095
1096 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001097 u64 paddr, start, end, size;
1098
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001099 if (phdr_ptr->p_type != PT_LOAD)
1100 continue;
1101
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001102 paddr = phdr_ptr->p_offset;
1103 start = rounddown(paddr, PAGE_SIZE);
1104 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1105 size = end - start;
1106
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001107 /* Add this contiguous chunk of memory to vmcore list.*/
1108 new = get_new_element();
1109 if (!new)
1110 return -ENOMEM;
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001111 new->paddr = start;
1112 new->size = size;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001113 list_add_tail(&new->list, vc_list);
1114
1115 /* Update the program header offset. */
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001116 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1117 vmcore_off = vmcore_off + size;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001118 }
1119 return 0;
1120}
1121
Vivek Goyal72658e92005-06-25 14:58:22 -07001122static int __init process_ptload_program_headers_elf32(char *elfptr,
1123 size_t elfsz,
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001124 size_t elfnotes_sz,
Vivek Goyal72658e92005-06-25 14:58:22 -07001125 struct list_head *vc_list)
1126{
1127 int i;
1128 Elf32_Ehdr *ehdr_ptr;
1129 Elf32_Phdr *phdr_ptr;
1130 loff_t vmcore_off;
1131 struct vmcore *new;
1132
1133 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1134 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1135
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001136 /* Skip Elf header, program headers and Elf note segment. */
1137 vmcore_off = elfsz + elfnotes_sz;
Vivek Goyal72658e92005-06-25 14:58:22 -07001138
1139 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001140 u64 paddr, start, end, size;
1141
Vivek Goyal72658e92005-06-25 14:58:22 -07001142 if (phdr_ptr->p_type != PT_LOAD)
1143 continue;
1144
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001145 paddr = phdr_ptr->p_offset;
1146 start = rounddown(paddr, PAGE_SIZE);
1147 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1148 size = end - start;
1149
Vivek Goyal72658e92005-06-25 14:58:22 -07001150 /* Add this contiguous chunk of memory to vmcore list.*/
1151 new = get_new_element();
1152 if (!new)
1153 return -ENOMEM;
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001154 new->paddr = start;
1155 new->size = size;
Vivek Goyal72658e92005-06-25 14:58:22 -07001156 list_add_tail(&new->list, vc_list);
1157
1158 /* Update the program header offset */
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001159 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1160 vmcore_off = vmcore_off + size;
Vivek Goyal72658e92005-06-25 14:58:22 -07001161 }
1162 return 0;
1163}
1164
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001165/* Sets offset fields of vmcore elements. */
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +05301166static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1167 struct list_head *vc_list)
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001168{
1169 loff_t vmcore_off;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001170 struct vmcore *m;
1171
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001172 /* Skip Elf header, program headers and Elf note segment. */
1173 vmcore_off = elfsz + elfnotes_sz;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001174
1175 list_for_each_entry(m, vc_list, list) {
1176 m->offset = vmcore_off;
1177 vmcore_off += m->size;
1178 }
1179}
1180
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001181static void free_elfcorebuf(void)
Vivek Goyal72658e92005-06-25 14:58:22 -07001182{
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001183 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1184 elfcorebuf = NULL;
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001185 vfree(elfnotes_buf);
1186 elfnotes_buf = NULL;
Vivek Goyal72658e92005-06-25 14:58:22 -07001187}
1188
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001189static int __init parse_crash_elf64_headers(void)
1190{
1191 int rc=0;
1192 Elf64_Ehdr ehdr;
1193 u64 addr;
1194
1195 addr = elfcorehdr_addr;
1196
1197 /* Read Elf header */
Michael Holzheube8a8d02013-09-11 14:24:49 -07001198 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001199 if (rc < 0)
1200 return rc;
1201
1202 /* Do some basic Verification. */
1203 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1204 (ehdr.e_type != ET_CORE) ||
Mika Westerberg9833c392010-11-19 09:29:24 +01001205 !vmcore_elf64_check_arch(&ehdr) ||
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001206 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1207 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1208 ehdr.e_version != EV_CURRENT ||
1209 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1210 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1211 ehdr.e_phnum == 0) {
Andrew Morton87ebdc02013-02-27 17:03:16 -08001212 pr_warn("Warning: Core image elf header is not sane\n");
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001213 return -EINVAL;
1214 }
1215
1216 /* Read in all elf headers. */
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001217 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1218 ehdr.e_phnum * sizeof(Elf64_Phdr);
1219 elfcorebuf_sz = elfcorebuf_sz_orig;
1220 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1221 get_order(elfcorebuf_sz_orig));
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001222 if (!elfcorebuf)
1223 return -ENOMEM;
1224 addr = elfcorehdr_addr;
Michael Holzheube8a8d02013-09-11 14:24:49 -07001225 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001226 if (rc < 0)
1227 goto fail;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001228
1229 /* Merge all PT_NOTE headers into one. */
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001230 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1231 &elfnotes_buf, &elfnotes_sz);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001232 if (rc)
1233 goto fail;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001234 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001235 elfnotes_sz, &vmcore_list);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001236 if (rc)
1237 goto fail;
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001238 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001239 return 0;
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001240fail:
1241 free_elfcorebuf();
1242 return rc;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001243}
1244
Vivek Goyal72658e92005-06-25 14:58:22 -07001245static int __init parse_crash_elf32_headers(void)
1246{
1247 int rc=0;
1248 Elf32_Ehdr ehdr;
1249 u64 addr;
1250
1251 addr = elfcorehdr_addr;
1252
1253 /* Read Elf header */
Michael Holzheube8a8d02013-09-11 14:24:49 -07001254 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
Vivek Goyal72658e92005-06-25 14:58:22 -07001255 if (rc < 0)
1256 return rc;
1257
1258 /* Do some basic Verification. */
1259 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1260 (ehdr.e_type != ET_CORE) ||
Daniel Wagnere55d5312016-02-11 13:36:54 +01001261 !vmcore_elf32_check_arch(&ehdr) ||
Vivek Goyal72658e92005-06-25 14:58:22 -07001262 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1263 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1264 ehdr.e_version != EV_CURRENT ||
1265 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1266 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1267 ehdr.e_phnum == 0) {
Andrew Morton87ebdc02013-02-27 17:03:16 -08001268 pr_warn("Warning: Core image elf header is not sane\n");
Vivek Goyal72658e92005-06-25 14:58:22 -07001269 return -EINVAL;
1270 }
1271
1272 /* Read in all elf headers. */
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001273 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1274 elfcorebuf_sz = elfcorebuf_sz_orig;
1275 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1276 get_order(elfcorebuf_sz_orig));
Vivek Goyal72658e92005-06-25 14:58:22 -07001277 if (!elfcorebuf)
1278 return -ENOMEM;
1279 addr = elfcorehdr_addr;
Michael Holzheube8a8d02013-09-11 14:24:49 -07001280 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001281 if (rc < 0)
1282 goto fail;
Vivek Goyal72658e92005-06-25 14:58:22 -07001283
1284 /* Merge all PT_NOTE headers into one. */
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001285 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1286 &elfnotes_buf, &elfnotes_sz);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001287 if (rc)
1288 goto fail;
Vivek Goyal72658e92005-06-25 14:58:22 -07001289 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001290 elfnotes_sz, &vmcore_list);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001291 if (rc)
1292 goto fail;
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001293 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
Vivek Goyal72658e92005-06-25 14:58:22 -07001294 return 0;
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001295fail:
1296 free_elfcorebuf();
1297 return rc;
Vivek Goyal72658e92005-06-25 14:58:22 -07001298}
1299
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001300static int __init parse_crash_elf_headers(void)
1301{
1302 unsigned char e_ident[EI_NIDENT];
1303 u64 addr;
1304 int rc=0;
1305
1306 addr = elfcorehdr_addr;
Michael Holzheube8a8d02013-09-11 14:24:49 -07001307 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001308 if (rc < 0)
1309 return rc;
1310 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
Andrew Morton87ebdc02013-02-27 17:03:16 -08001311 pr_warn("Warning: Core image elf header not found\n");
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001312 return -EINVAL;
1313 }
1314
1315 if (e_ident[EI_CLASS] == ELFCLASS64) {
1316 rc = parse_crash_elf64_headers();
1317 if (rc)
1318 return rc;
Vivek Goyal72658e92005-06-25 14:58:22 -07001319 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1320 rc = parse_crash_elf32_headers();
1321 if (rc)
1322 return rc;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001323 } else {
Andrew Morton87ebdc02013-02-27 17:03:16 -08001324 pr_warn("Warning: Core image elf header is not sane\n");
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001325 return -EINVAL;
1326 }
HATAYAMA Daisuke591ff712013-07-03 15:02:22 -07001327
1328 /* Determine vmcore size. */
1329 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1330 &vmcore_list);
1331
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001332 return 0;
1333}
1334
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301335#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1336/**
1337 * vmcoredd_write_header - Write vmcore device dump header at the
1338 * beginning of the dump's buffer.
1339 * @buf: Output buffer where the note is written
1340 * @data: Dump info
1341 * @size: Size of the dump
1342 *
1343 * Fills beginning of the dump's buffer with vmcore device dump header.
1344 */
1345static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1346 u32 size)
1347{
1348 struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1349
1350 vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1351 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1352 vdd_hdr->n_type = NT_VMCOREDD;
1353
1354 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1355 sizeof(vdd_hdr->name));
1356 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1357}
1358
1359/**
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +05301360 * vmcoredd_update_program_headers - Update all Elf program headers
1361 * @elfptr: Pointer to elf header
1362 * @elfnotesz: Size of elf notes aligned to page size
1363 * @vmcoreddsz: Size of device dumps to be added to elf note header
1364 *
1365 * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1366 * Also update the offsets of all the program headers after the elf note header.
1367 */
1368static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1369 size_t vmcoreddsz)
1370{
1371 unsigned char *e_ident = (unsigned char *)elfptr;
1372 u64 start, end, size;
1373 loff_t vmcore_off;
1374 u32 i;
1375
1376 vmcore_off = elfcorebuf_sz + elfnotesz;
1377
1378 if (e_ident[EI_CLASS] == ELFCLASS64) {
1379 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1380 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1381
1382 /* Update all program headers */
1383 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1384 if (phdr->p_type == PT_NOTE) {
1385 /* Update note size */
1386 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1387 phdr->p_filesz = phdr->p_memsz;
1388 continue;
1389 }
1390
1391 start = rounddown(phdr->p_offset, PAGE_SIZE);
1392 end = roundup(phdr->p_offset + phdr->p_memsz,
1393 PAGE_SIZE);
1394 size = end - start;
1395 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1396 vmcore_off += size;
1397 }
1398 } else {
1399 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1400 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1401
1402 /* Update all program headers */
1403 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1404 if (phdr->p_type == PT_NOTE) {
1405 /* Update note size */
1406 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1407 phdr->p_filesz = phdr->p_memsz;
1408 continue;
1409 }
1410
1411 start = rounddown(phdr->p_offset, PAGE_SIZE);
1412 end = roundup(phdr->p_offset + phdr->p_memsz,
1413 PAGE_SIZE);
1414 size = end - start;
1415 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1416 vmcore_off += size;
1417 }
1418 }
1419}
1420
1421/**
1422 * vmcoredd_update_size - Update the total size of the device dumps and update
1423 * Elf header
1424 * @dump_size: Size of the current device dump to be added to total size
1425 *
1426 * Update the total size of all the device dumps and update the Elf program
1427 * headers. Calculate the new offsets for the vmcore list and update the
1428 * total vmcore size.
1429 */
1430static void vmcoredd_update_size(size_t dump_size)
1431{
1432 vmcoredd_orig_sz += dump_size;
1433 elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1434 vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1435 vmcoredd_orig_sz);
1436
1437 /* Update vmcore list offsets */
1438 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1439
1440 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1441 &vmcore_list);
1442 proc_vmcore->size = vmcore_size;
1443}
1444
1445/**
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301446 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1447 * @data: dump info.
1448 *
1449 * Allocate a buffer and invoke the calling driver's dump collect routine.
1450 * Write Elf note at the beginning of the buffer to indicate vmcore device
1451 * dump and add the dump to global list.
1452 */
1453int vmcore_add_device_dump(struct vmcoredd_data *data)
1454{
1455 struct vmcoredd_node *dump;
1456 void *buf = NULL;
1457 size_t data_size;
1458 int ret;
1459
Kairui Songc6c40532019-07-16 16:26:39 -07001460 if (vmcoredd_disabled) {
1461 pr_err_once("Device dump is disabled\n");
1462 return -EINVAL;
1463 }
1464
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301465 if (!data || !strlen(data->dump_name) ||
1466 !data->vmcoredd_callback || !data->size)
1467 return -EINVAL;
1468
1469 dump = vzalloc(sizeof(*dump));
1470 if (!dump) {
1471 ret = -ENOMEM;
1472 goto out_err;
1473 }
1474
1475 /* Keep size of the buffer page aligned so that it can be mmaped */
1476 data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1477 PAGE_SIZE);
1478
1479 /* Allocate buffer for driver's to write their dumps */
1480 buf = vmcore_alloc_buf(data_size);
1481 if (!buf) {
1482 ret = -ENOMEM;
1483 goto out_err;
1484 }
1485
1486 vmcoredd_write_header(buf, data, data_size -
1487 sizeof(struct vmcoredd_header));
1488
1489 /* Invoke the driver's dump collection routing */
1490 ret = data->vmcoredd_callback(data, buf +
1491 sizeof(struct vmcoredd_header));
1492 if (ret)
1493 goto out_err;
1494
1495 dump->buf = buf;
1496 dump->size = data_size;
1497
1498 /* Add the dump to driver sysfs list */
1499 mutex_lock(&vmcoredd_mutex);
1500 list_add_tail(&dump->list, &vmcoredd_list);
1501 mutex_unlock(&vmcoredd_mutex);
1502
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +05301503 vmcoredd_update_size(data_size);
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301504 return 0;
1505
1506out_err:
1507 if (buf)
1508 vfree(buf);
1509
1510 if (dump)
1511 vfree(dump);
1512
1513 return ret;
1514}
1515EXPORT_SYMBOL(vmcore_add_device_dump);
1516#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1517
1518/* Free all dumps in vmcore device dump list */
1519static void vmcore_free_device_dumps(void)
1520{
1521#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1522 mutex_lock(&vmcoredd_mutex);
1523 while (!list_empty(&vmcoredd_list)) {
1524 struct vmcoredd_node *dump;
1525
1526 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1527 list);
1528 list_del(&dump->list);
1529 vfree(dump->buf);
1530 vfree(dump);
1531 }
1532 mutex_unlock(&vmcoredd_mutex);
1533#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1534}
1535
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001536/* Init function for vmcore module. */
1537static int __init vmcore_init(void)
1538{
1539 int rc = 0;
1540
Michael Holzheube8a8d02013-09-11 14:24:49 -07001541 /* Allow architectures to allocate ELF header in 2nd kernel */
1542 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1543 if (rc)
1544 return rc;
1545 /*
1546 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1547 * then capture the dump.
1548 */
Simon Horman85a0ee32008-10-18 20:28:29 -07001549 if (!(is_vmcore_usable()))
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001550 return rc;
1551 rc = parse_crash_elf_headers();
1552 if (rc) {
Andrew Morton87ebdc02013-02-27 17:03:16 -08001553 pr_warn("Kdump: vmcore not initialized\n");
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001554 return rc;
1555 }
Michael Holzheube8a8d02013-09-11 14:24:49 -07001556 elfcorehdr_free(elfcorehdr_addr);
1557 elfcorehdr_addr = ELFCORE_ADDR_ERR;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001558
Alexey Dobriyan97a32532020-02-03 17:37:17 -08001559 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001560 if (proc_vmcore)
1561 proc_vmcore->size = vmcore_size;
1562 return 0;
1563}
Paul Gortmakerabaf3782014-01-23 15:55:45 -08001564fs_initcall(vmcore_init);
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001565
1566/* Cleanup function for vmcore module. */
1567void vmcore_cleanup(void)
1568{
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001569 if (proc_vmcore) {
David Howellsa8ca16e2013-04-12 17:27:28 +01001570 proc_remove(proc_vmcore);
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001571 proc_vmcore = NULL;
1572 }
1573
1574 /* clear the vmcore list. */
Alexey Dobriyan593bc692018-02-06 15:37:02 -08001575 while (!list_empty(&vmcore_list)) {
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001576 struct vmcore *m;
1577
Alexey Dobriyan593bc692018-02-06 15:37:02 -08001578 m = list_first_entry(&vmcore_list, struct vmcore, list);
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001579 list_del(&m->list);
1580 kfree(m);
1581 }
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001582 free_elfcorebuf();
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301583
1584 /* clear vmcore device dump list */
1585 vmcore_free_device_dumps();
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001586}