blob: 702754dd1daffb815ab30377fdd286407ea380e1 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Vivek Goyal666bfdd2005-06-25 14:58:21 -07002/*
3 * fs/proc/vmcore.c Interface for accessing the crash
4 * dump from the system's previous life.
5 * Heavily borrowed from fs/proc/kcore.c
6 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
7 * Copyright (C) IBM Corporation, 2004. All rights reserved
8 *
9 */
10
Vivek Goyal666bfdd2005-06-25 14:58:21 -070011#include <linux/mm.h>
David Howells2f96b8c2013-04-12 00:10:25 +010012#include <linux/kcore.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070013#include <linux/user.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070014#include <linux/elf.h>
15#include <linux/elfcore.h>
Paul Gortmakerafeacc82011-05-26 16:00:52 -040016#include <linux/export.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070018#include <linux/highmem.h>
Andrew Morton87ebdc02013-02-27 17:03:16 -080019#include <linux/printk.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070020#include <linux/memblock.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070021#include <linux/init.h>
22#include <linux/crash_dump.h>
23#include <linux/list.h>
Kairui Songc6c40532019-07-16 16:26:39 -070024#include <linux/moduleparam.h>
Rahul Lakkireddy27242732018-05-02 15:17:17 +053025#include <linux/mutex.h>
HATAYAMA Daisuke83086972013-07-03 15:02:23 -070026#include <linux/vmalloc.h>
Michael Holzheu9cb21812013-09-11 14:24:51 -070027#include <linux/pagemap.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080028#include <linux/uaccess.h>
Tom Lendackye9d1d2b2021-09-08 17:58:39 -050029#include <linux/cc_platform.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070030#include <asm/io.h>
David Howells2f96b8c2013-04-12 00:10:25 +010031#include "internal.h"
Vivek Goyal666bfdd2005-06-25 14:58:21 -070032
33/* List representing chunks of contiguous memory areas and their offsets in
34 * vmcore file.
35 */
36static LIST_HEAD(vmcore_list);
37
38/* Stores the pointer to the buffer containing kernel elf core headers. */
39static char *elfcorebuf;
40static size_t elfcorebuf_sz;
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -070041static size_t elfcorebuf_sz_orig;
Vivek Goyal666bfdd2005-06-25 14:58:21 -070042
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -070043static char *elfnotes_buf;
44static size_t elfnotes_sz;
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +053045/* Size of all notes minus the device dump notes */
46static size_t elfnotes_orig_sz;
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -070047
Vivek Goyal666bfdd2005-06-25 14:58:21 -070048/* Total size of vmcore file. */
49static u64 vmcore_size;
50
Fabian Fredericka05e16ad2014-06-06 14:37:04 -070051static struct proc_dir_entry *proc_vmcore;
Vivek Goyal666bfdd2005-06-25 14:58:21 -070052
Rahul Lakkireddy27242732018-05-02 15:17:17 +053053#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
54/* Device Dump list and mutex to synchronize access to list */
55static LIST_HEAD(vmcoredd_list);
56static DEFINE_MUTEX(vmcoredd_mutex);
Kairui Songc6c40532019-07-16 16:26:39 -070057
58static bool vmcoredd_disabled;
59core_param(novmcoredd, vmcoredd_disabled, bool, 0);
Rahul Lakkireddy27242732018-05-02 15:17:17 +053060#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
61
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +053062/* Device Dump Size */
63static size_t vmcoredd_orig_sz;
64
David Hildenbrandcc5f27042021-11-08 18:31:48 -080065static DECLARE_RWSEM(vmcore_cb_rwsem);
66/* List of registered vmcore callbacks. */
67static LIST_HEAD(vmcore_cb_list);
David Hildenbrandcc5f27042021-11-08 18:31:48 -080068/* Whether the vmcore has been opened once. */
69static bool vmcore_opened;
Olaf Hering997c1362011-05-26 16:25:54 -070070
David Hildenbrandcc5f27042021-11-08 18:31:48 -080071void register_vmcore_cb(struct vmcore_cb *cb)
Olaf Hering997c1362011-05-26 16:25:54 -070072{
David Hildenbrandcc5f27042021-11-08 18:31:48 -080073 down_write(&vmcore_cb_rwsem);
74 INIT_LIST_HEAD(&cb->next);
75 list_add_tail(&cb->next, &vmcore_cb_list);
Olaf Hering997c1362011-05-26 16:25:54 -070076 /*
David Hildenbrandcc5f27042021-11-08 18:31:48 -080077 * Registering a vmcore callback after the vmcore was opened is
78 * very unusual (e.g., manual driver loading).
Olaf Hering997c1362011-05-26 16:25:54 -070079 */
David Hildenbrandcc5f27042021-11-08 18:31:48 -080080 if (vmcore_opened)
81 pr_warn_once("Unexpected vmcore callback registration\n");
82 up_write(&vmcore_cb_rwsem);
Olaf Hering997c1362011-05-26 16:25:54 -070083}
David Hildenbrandcc5f27042021-11-08 18:31:48 -080084EXPORT_SYMBOL_GPL(register_vmcore_cb);
Olaf Hering997c1362011-05-26 16:25:54 -070085
David Hildenbrandcc5f27042021-11-08 18:31:48 -080086void unregister_vmcore_cb(struct vmcore_cb *cb)
Olaf Hering997c1362011-05-26 16:25:54 -070087{
David Hildenbrandcc5f27042021-11-08 18:31:48 -080088 down_write(&vmcore_cb_rwsem);
89 list_del(&cb->next);
90 /*
91 * Unregistering a vmcore callback after the vmcore was opened is
92 * very unusual (e.g., forced driver removal), but we cannot stop
93 * unregistering.
94 */
David Hildenbrand25bc5b02022-01-19 18:07:57 -080095 if (vmcore_opened)
David Hildenbrandcc5f27042021-11-08 18:31:48 -080096 pr_warn_once("Unexpected vmcore callback unregistration\n");
David Hildenbrandcc5f27042021-11-08 18:31:48 -080097 up_write(&vmcore_cb_rwsem);
Olaf Hering997c1362011-05-26 16:25:54 -070098}
David Hildenbrandcc5f27042021-11-08 18:31:48 -080099EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
Olaf Hering997c1362011-05-26 16:25:54 -0700100
David Hildenbrand2c9feea2021-11-08 18:31:44 -0800101static bool pfn_is_ram(unsigned long pfn)
Olaf Hering997c1362011-05-26 16:25:54 -0700102{
David Hildenbrandcc5f27042021-11-08 18:31:48 -0800103 struct vmcore_cb *cb;
David Hildenbrand2c9feea2021-11-08 18:31:44 -0800104 bool ret = true;
Olaf Hering997c1362011-05-26 16:25:54 -0700105
David Hildenbrandcc5f27042021-11-08 18:31:48 -0800106 lockdep_assert_held_read(&vmcore_cb_rwsem);
David Hildenbrandcc5f27042021-11-08 18:31:48 -0800107
108 list_for_each_entry(cb, &vmcore_cb_list, next) {
109 if (unlikely(!cb->pfn_is_ram))
110 continue;
111 ret = cb->pfn_is_ram(cb, pfn);
112 if (!ret)
113 break;
114 }
Olaf Hering997c1362011-05-26 16:25:54 -0700115
116 return ret;
117}
118
David Hildenbrandcc5f27042021-11-08 18:31:48 -0800119static int open_vmcore(struct inode *inode, struct file *file)
120{
121 down_read(&vmcore_cb_rwsem);
122 vmcore_opened = true;
123 up_read(&vmcore_cb_rwsem);
124
125 return 0;
126}
127
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700128/* Reads a page from the oldmem device from given offset. */
Thiago Jung Bauermannae7eb822019-08-06 01:49:18 -0300129ssize_t read_from_oldmem(char *buf, size_t count,
130 u64 *ppos, int userbuf,
131 bool encrypted)
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700132{
133 unsigned long pfn, offset;
134 size_t nr_bytes;
135 ssize_t read = 0, tmp;
136
137 if (!count)
138 return 0;
139
140 offset = (unsigned long)(*ppos % PAGE_SIZE);
141 pfn = (unsigned long)(*ppos / PAGE_SIZE);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700142
David Hildenbrandcc5f27042021-11-08 18:31:48 -0800143 down_read(&vmcore_cb_rwsem);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700144 do {
145 if (count > (PAGE_SIZE - offset))
146 nr_bytes = PAGE_SIZE - offset;
147 else
148 nr_bytes = count;
149
Olaf Hering997c1362011-05-26 16:25:54 -0700150 /* If pfn is not ram, return zeros for sparse dump files */
David Hildenbrandc1e63112021-11-19 16:43:58 -0800151 if (!pfn_is_ram(pfn)) {
152 tmp = 0;
153 if (!userbuf)
154 memset(buf, 0, nr_bytes);
155 else if (clear_user(buf, nr_bytes))
156 tmp = -EFAULT;
157 } else {
Lianbo Jiang992b6492018-09-30 16:37:41 +0800158 if (encrypted)
159 tmp = copy_oldmem_page_encrypted(pfn, buf,
160 nr_bytes,
161 offset,
162 userbuf);
163 else
164 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
165 offset, userbuf);
Olaf Hering997c1362011-05-26 16:25:54 -0700166 }
David Hildenbrandc1e63112021-11-19 16:43:58 -0800167 if (tmp < 0) {
168 up_read(&vmcore_cb_rwsem);
169 return tmp;
170 }
171
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700172 *ppos += nr_bytes;
173 count -= nr_bytes;
174 buf += nr_bytes;
175 read += nr_bytes;
176 ++pfn;
177 offset = 0;
178 } while (count);
179
David Hildenbrandcc5f27042021-11-08 18:31:48 -0800180 up_read(&vmcore_cb_rwsem);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700181 return read;
182}
183
Michael Holzheube8a8d02013-09-11 14:24:49 -0700184/*
185 * Architectures may override this function to allocate ELF header in 2nd kernel
186 */
187int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
188{
189 return 0;
190}
191
192/*
193 * Architectures may override this function to free header
194 */
195void __weak elfcorehdr_free(unsigned long long addr)
196{}
197
198/*
199 * Architectures may override this function to read from ELF header
200 */
201ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
202{
Thiago Jung Bauermannae7eb822019-08-06 01:49:18 -0300203 return read_from_oldmem(buf, count, ppos, 0, false);
Michael Holzheube8a8d02013-09-11 14:24:49 -0700204}
205
206/*
207 * Architectures may override this function to read from notes sections
208 */
209ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
210{
Tom Lendackye9d1d2b2021-09-08 17:58:39 -0500211 return read_from_oldmem(buf, count, ppos, 0, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
Michael Holzheube8a8d02013-09-11 14:24:49 -0700212}
213
Michael Holzheu9cb21812013-09-11 14:24:51 -0700214/*
215 * Architectures may override this function to map oldmem
216 */
217int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
218 unsigned long from, unsigned long pfn,
219 unsigned long size, pgprot_t prot)
220{
Lianbo Jiang992b6492018-09-30 16:37:41 +0800221 prot = pgprot_encrypted(prot);
Michael Holzheu9cb21812013-09-11 14:24:51 -0700222 return remap_pfn_range(vma, from, pfn, size, prot);
223}
224
225/*
Borislav Petkovcf089612018-10-08 10:05:20 +0200226 * Architectures which support memory encryption override this.
227 */
228ssize_t __weak
229copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
230 unsigned long offset, int userbuf)
231{
232 return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
233}
234
235/*
Michael Holzheu9cb21812013-09-11 14:24:51 -0700236 * Copy to either kernel or user space
237 */
238static int copy_to(void *target, void *src, size_t size, int userbuf)
239{
240 if (userbuf) {
241 if (copy_to_user((char __user *) target, src, size))
242 return -EFAULT;
243 } else {
244 memcpy(target, src, size);
245 }
246 return 0;
247}
248
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530249#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
250static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
251{
252 struct vmcoredd_node *dump;
253 u64 offset = 0;
254 int ret = 0;
255 size_t tsz;
256 char *buf;
257
258 mutex_lock(&vmcoredd_mutex);
259 list_for_each_entry(dump, &vmcoredd_list, list) {
260 if (start < offset + dump->size) {
261 tsz = min(offset + (u64)dump->size - start, (u64)size);
262 buf = dump->buf + start - offset;
263 if (copy_to(dst, buf, tsz, userbuf)) {
264 ret = -EFAULT;
265 goto out_unlock;
266 }
267
268 size -= tsz;
269 start += tsz;
270 dst += tsz;
271
272 /* Leave now if buffer filled already */
273 if (!size)
274 goto out_unlock;
275 }
276 offset += dump->size;
277 }
278
279out_unlock:
280 mutex_unlock(&vmcoredd_mutex);
281 return ret;
282}
283
Arnd Bergmanna2036a12018-08-23 17:00:55 -0700284#ifdef CONFIG_MMU
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530285static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
286 u64 start, size_t size)
287{
288 struct vmcoredd_node *dump;
289 u64 offset = 0;
290 int ret = 0;
291 size_t tsz;
292 char *buf;
293
294 mutex_lock(&vmcoredd_mutex);
295 list_for_each_entry(dump, &vmcoredd_list, list) {
296 if (start < offset + dump->size) {
297 tsz = min(offset + (u64)dump->size - start, (u64)size);
298 buf = dump->buf + start - offset;
Jann Hornbdebd6a22020-04-20 18:14:11 -0700299 if (remap_vmalloc_range_partial(vma, dst, buf, 0,
300 tsz)) {
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530301 ret = -EFAULT;
302 goto out_unlock;
303 }
304
305 size -= tsz;
306 start += tsz;
307 dst += tsz;
308
309 /* Leave now if buffer filled already */
310 if (!size)
311 goto out_unlock;
312 }
313 offset += dump->size;
314 }
315
316out_unlock:
317 mutex_unlock(&vmcoredd_mutex);
318 return ret;
319}
Arnd Bergmanna2036a12018-08-23 17:00:55 -0700320#endif /* CONFIG_MMU */
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530321#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
322
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700323/* Read from the ELF header and then the crash dump. On error, negative value is
324 * returned otherwise number of bytes read are returned.
325 */
Michael Holzheu9cb21812013-09-11 14:24:51 -0700326static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
327 int userbuf)
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700328{
329 ssize_t acc = 0, tmp;
Vivek Goyal80e8ff62006-04-10 22:54:10 -0700330 size_t tsz;
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700331 u64 start;
332 struct vmcore *m = NULL;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700333
334 if (buflen == 0 || *fpos >= vmcore_size)
335 return 0;
336
337 /* trim buflen to not go beyond EOF */
338 if (buflen > vmcore_size - *fpos)
339 buflen = vmcore_size - *fpos;
340
341 /* Read ELF core header */
342 if (*fpos < elfcorebuf_sz) {
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700343 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
Michael Holzheu9cb21812013-09-11 14:24:51 -0700344 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700345 return -EFAULT;
346 buflen -= tsz;
347 *fpos += tsz;
348 buffer += tsz;
349 acc += tsz;
350
351 /* leave now if filled buffer already */
352 if (buflen == 0)
353 return acc;
354 }
355
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700356 /* Read Elf note segment */
357 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
358 void *kaddr;
359
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530360 /* We add device dumps before other elf notes because the
361 * other elf notes may not fill the elf notes buffer
362 * completely and we will end up with zero-filled data
363 * between the elf notes and the device dumps. Tools will
364 * then try to decode this zero-filled data as valid notes
365 * and we don't want that. Hence, adding device dumps before
366 * the other elf notes ensure that zero-filled data can be
367 * avoided.
368 */
369#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
370 /* Read device dumps */
371 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
372 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
373 (size_t)*fpos, buflen);
374 start = *fpos - elfcorebuf_sz;
375 if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
376 return -EFAULT;
377
378 buflen -= tsz;
379 *fpos += tsz;
380 buffer += tsz;
381 acc += tsz;
382
383 /* leave now if filled buffer already */
384 if (!buflen)
385 return acc;
386 }
387#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
388
389 /* Read remaining elf notes */
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700390 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530391 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
Michael Holzheu9cb21812013-09-11 14:24:51 -0700392 if (copy_to(buffer, kaddr, tsz, userbuf))
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700393 return -EFAULT;
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530394
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700395 buflen -= tsz;
396 *fpos += tsz;
397 buffer += tsz;
398 acc += tsz;
399
400 /* leave now if filled buffer already */
401 if (buflen == 0)
402 return acc;
403 }
404
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700405 list_for_each_entry(m, &vmcore_list, list) {
406 if (*fpos < m->offset + m->size) {
Dave Young0b50a2d2016-03-17 14:21:03 -0700407 tsz = (size_t)min_t(unsigned long long,
408 m->offset + m->size - *fpos,
409 buflen);
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700410 start = m->paddr + *fpos - m->offset;
Lianbo Jiang992b6492018-09-30 16:37:41 +0800411 tmp = read_from_oldmem(buffer, tsz, &start,
Tom Lendackye9d1d2b2021-09-08 17:58:39 -0500412 userbuf, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700413 if (tmp < 0)
414 return tmp;
415 buflen -= tsz;
416 *fpos += tsz;
417 buffer += tsz;
418 acc += tsz;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700419
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700420 /* leave now if filled buffer already */
421 if (buflen == 0)
422 return acc;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700423 }
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700424 }
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700425
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700426 return acc;
427}
428
Michael Holzheu9cb21812013-09-11 14:24:51 -0700429static ssize_t read_vmcore(struct file *file, char __user *buffer,
430 size_t buflen, loff_t *fpos)
431{
432 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
433}
434
435/*
436 * The vmcore fault handler uses the page cache and fills data using the
437 * standard __vmcore_read() function.
438 *
439 * On s390 the fault handler is used for memory regions that can't be mapped
440 * directly with remap_pfn_range().
441 */
Souptick Joarder36f06202018-08-21 21:54:44 -0700442static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
Michael Holzheu9cb21812013-09-11 14:24:51 -0700443{
444#ifdef CONFIG_S390
Dave Jiang11bac802017-02-24 14:56:41 -0800445 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
Michael Holzheu9cb21812013-09-11 14:24:51 -0700446 pgoff_t index = vmf->pgoff;
447 struct page *page;
448 loff_t offset;
449 char *buf;
450 int rc;
451
452 page = find_or_create_page(mapping, index, GFP_KERNEL);
453 if (!page)
454 return VM_FAULT_OOM;
455 if (!PageUptodate(page)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300456 offset = (loff_t) index << PAGE_SHIFT;
Michael Holzheu9cb21812013-09-11 14:24:51 -0700457 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
458 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
459 if (rc < 0) {
460 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300461 put_page(page);
Souptick Joarderb5c21232018-10-30 15:04:35 -0700462 return vmf_error(rc);
Michael Holzheu9cb21812013-09-11 14:24:51 -0700463 }
464 SetPageUptodate(page);
465 }
466 unlock_page(page);
467 vmf->page = page;
468 return 0;
469#else
470 return VM_FAULT_SIGBUS;
471#endif
472}
473
474static const struct vm_operations_struct vmcore_mmap_ops = {
475 .fault = mmap_vmcore_fault,
476};
477
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700478/**
Rahul Lakkireddy27242732018-05-02 15:17:17 +0530479 * vmcore_alloc_buf - allocate buffer in vmalloc memory
480 * @sizez: size of buffer
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700481 *
482 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
483 * the buffer to user-space by means of remap_vmalloc_range().
484 *
485 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
486 * disabled and there's no need to allow users to mmap the buffer.
487 */
Rahul Lakkireddy27242732018-05-02 15:17:17 +0530488static inline char *vmcore_alloc_buf(size_t size)
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700489{
490#ifdef CONFIG_MMU
Rahul Lakkireddy27242732018-05-02 15:17:17 +0530491 return vmalloc_user(size);
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700492#else
Rahul Lakkireddy27242732018-05-02 15:17:17 +0530493 return vzalloc(size);
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700494#endif
495}
496
497/*
498 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
499 * essential for mmap_vmcore() in order to map physically
500 * non-contiguous objects (ELF header, ELF note segment and memory
501 * regions in the 1st kernel pointed to by PT_LOAD entries) into
502 * virtually contiguous user-space in ELF layout.
503 */
Michael Holzheu11e376a2013-09-11 14:24:53 -0700504#ifdef CONFIG_MMU
Vitaly Kuznetsov0692ded2014-08-08 14:22:05 -0700505/*
506 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
507 * reported as not being ram with the zero page.
508 *
509 * @vma: vm_area_struct describing requested mapping
510 * @from: start remapping from
511 * @pfn: page frame number to start remapping to
512 * @size: remapping size
513 * @prot: protection bits
514 *
515 * Returns zero on success, -EAGAIN on failure.
516 */
517static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
518 unsigned long from, unsigned long pfn,
519 unsigned long size, pgprot_t prot)
520{
521 unsigned long map_size;
522 unsigned long pos_start, pos_end, pos;
523 unsigned long zeropage_pfn = my_zero_pfn(0);
524 size_t len = 0;
525
526 pos_start = pfn;
527 pos_end = pfn + (size >> PAGE_SHIFT);
528
529 for (pos = pos_start; pos < pos_end; ++pos) {
530 if (!pfn_is_ram(pos)) {
531 /*
532 * We hit a page which is not ram. Remap the continuous
533 * region between pos_start and pos-1 and replace
534 * the non-ram page at pos with the zero page.
535 */
536 if (pos > pos_start) {
537 /* Remap continuous region */
538 map_size = (pos - pos_start) << PAGE_SHIFT;
539 if (remap_oldmem_pfn_range(vma, from + len,
540 pos_start, map_size,
541 prot))
542 goto fail;
543 len += map_size;
544 }
545 /* Remap the zero page */
546 if (remap_oldmem_pfn_range(vma, from + len,
547 zeropage_pfn,
548 PAGE_SIZE, prot))
549 goto fail;
550 len += PAGE_SIZE;
551 pos_start = pos + 1;
552 }
553 }
554 if (pos > pos_start) {
555 /* Remap the rest */
556 map_size = (pos - pos_start) << PAGE_SHIFT;
557 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
558 map_size, prot))
559 goto fail;
560 }
561 return 0;
562fail:
Mike Rapoport897ab3e2017-02-24 14:58:22 -0800563 do_munmap(vma->vm_mm, from, len, NULL);
Vitaly Kuznetsov0692ded2014-08-08 14:22:05 -0700564 return -EAGAIN;
565}
566
567static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
568 unsigned long from, unsigned long pfn,
569 unsigned long size, pgprot_t prot)
570{
David Hildenbrandcc5f27042021-11-08 18:31:48 -0800571 int ret;
572
Vitaly Kuznetsov0692ded2014-08-08 14:22:05 -0700573 /*
574 * Check if oldmem_pfn_is_ram was registered to avoid
575 * looping over all pages without a reason.
576 */
David Hildenbrandcc5f27042021-11-08 18:31:48 -0800577 down_read(&vmcore_cb_rwsem);
David Hildenbrand25bc5b02022-01-19 18:07:57 -0800578 if (!list_empty(&vmcore_cb_list))
David Hildenbrandcc5f27042021-11-08 18:31:48 -0800579 ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
Vitaly Kuznetsov0692ded2014-08-08 14:22:05 -0700580 else
David Hildenbrandcc5f27042021-11-08 18:31:48 -0800581 ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
582 up_read(&vmcore_cb_rwsem);
583 return ret;
Vitaly Kuznetsov0692ded2014-08-08 14:22:05 -0700584}
585
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700586static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
587{
588 size_t size = vma->vm_end - vma->vm_start;
589 u64 start, end, len, tsz;
590 struct vmcore *m;
591
592 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
593 end = start + size;
594
595 if (size > vmcore_size || end > vmcore_size)
596 return -EINVAL;
597
598 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
599 return -EPERM;
600
601 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
602 vma->vm_flags |= VM_MIXEDMAP;
Michael Holzheu9cb21812013-09-11 14:24:51 -0700603 vma->vm_ops = &vmcore_mmap_ops;
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700604
605 len = 0;
606
607 if (start < elfcorebuf_sz) {
608 u64 pfn;
609
610 tsz = min(elfcorebuf_sz - (size_t)start, size);
611 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
612 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
613 vma->vm_page_prot))
614 return -EAGAIN;
615 size -= tsz;
616 start += tsz;
617 len += tsz;
618
619 if (size == 0)
620 return 0;
621 }
622
623 if (start < elfcorebuf_sz + elfnotes_sz) {
624 void *kaddr;
625
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530626 /* We add device dumps before other elf notes because the
627 * other elf notes may not fill the elf notes buffer
628 * completely and we will end up with zero-filled data
629 * between the elf notes and the device dumps. Tools will
630 * then try to decode this zero-filled data as valid notes
631 * and we don't want that. Hence, adding device dumps before
632 * the other elf notes ensure that zero-filled data can be
633 * avoided. This also ensures that the device dumps and
634 * other elf notes can be properly mmaped at page aligned
635 * address.
636 */
637#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
638 /* Read device dumps */
639 if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
640 u64 start_off;
641
642 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
643 (size_t)start, size);
644 start_off = start - elfcorebuf_sz;
645 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
646 start_off, tsz))
647 goto fail;
648
649 size -= tsz;
650 start += tsz;
651 len += tsz;
652
653 /* leave now if filled buffer already */
654 if (!size)
655 return 0;
656 }
657#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
658
659 /* Read remaining elf notes */
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700660 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530661 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700662 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
Jann Hornbdebd6a22020-04-20 18:14:11 -0700663 kaddr, 0, tsz))
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700664 goto fail;
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530665
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700666 size -= tsz;
667 start += tsz;
668 len += tsz;
669
670 if (size == 0)
671 return 0;
672 }
673
674 list_for_each_entry(m, &vmcore_list, list) {
675 if (start < m->offset + m->size) {
676 u64 paddr = 0;
677
Dave Young0b50a2d2016-03-17 14:21:03 -0700678 tsz = (size_t)min_t(unsigned long long,
679 m->offset + m->size - start, size);
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700680 paddr = m->paddr + start - m->offset;
Vitaly Kuznetsov0692ded2014-08-08 14:22:05 -0700681 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
682 paddr >> PAGE_SHIFT, tsz,
683 vma->vm_page_prot))
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700684 goto fail;
685 size -= tsz;
686 start += tsz;
687 len += tsz;
688
689 if (size == 0)
690 return 0;
691 }
692 }
693
694 return 0;
695fail:
Mike Rapoport897ab3e2017-02-24 14:58:22 -0800696 do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700697 return -EAGAIN;
698}
699#else
700static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
701{
702 return -ENOSYS;
703}
704#endif
705
Alexey Dobriyan97a32532020-02-03 17:37:17 -0800706static const struct proc_ops vmcore_proc_ops = {
David Hildenbrandcc5f27042021-11-08 18:31:48 -0800707 .proc_open = open_vmcore,
Alexey Dobriyan97a32532020-02-03 17:37:17 -0800708 .proc_read = read_vmcore,
709 .proc_lseek = default_llseek,
710 .proc_mmap = mmap_vmcore,
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700711};
712
713static struct vmcore* __init get_new_element(void)
714{
Cyrill Gorcunov2f6d3112009-06-17 16:26:00 -0700715 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700716}
717
Rahul Lakkireddy44c752f2018-05-21 19:07:50 +0530718static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
719 struct list_head *vc_list)
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700720{
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700721 u64 size;
HATAYAMA Daisuke591ff712013-07-03 15:02:22 -0700722 struct vmcore *m;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700723
HATAYAMA Daisuke591ff712013-07-03 15:02:22 -0700724 size = elfsz + elfnotesegsz;
725 list_for_each_entry(m, vc_list, list) {
726 size += m->size;
Vivek Goyal72658e92005-06-25 14:58:22 -0700727 }
728 return size;
729}
730
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700731/**
732 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
733 *
734 * @ehdr_ptr: ELF header
735 *
736 * This function updates p_memsz member of each PT_NOTE entry in the
737 * program header table pointed to by @ehdr_ptr to real size of ELF
738 * note segment.
739 */
740static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700741{
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700742 int i, rc=0;
743 Elf64_Phdr *phdr_ptr;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700744 Elf64_Nhdr *nhdr_ptr;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700745
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700746 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700747 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700748 void *notes_section;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700749 u64 offset, max_sz, sz, real_sz = 0;
750 if (phdr_ptr->p_type != PT_NOTE)
751 continue;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700752 max_sz = phdr_ptr->p_memsz;
753 offset = phdr_ptr->p_offset;
754 notes_section = kmalloc(max_sz, GFP_KERNEL);
755 if (!notes_section)
756 return -ENOMEM;
Michael Holzheube8a8d02013-09-11 14:24:49 -0700757 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700758 if (rc < 0) {
759 kfree(notes_section);
760 return rc;
761 }
762 nhdr_ptr = notes_section;
Greg Pearson38dfac82014-02-10 14:25:36 -0800763 while (nhdr_ptr->n_namesz != 0) {
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700764 sz = sizeof(Elf64_Nhdr) +
WANG Chao34b47762015-02-17 13:46:01 -0800765 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
766 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
Greg Pearson38dfac82014-02-10 14:25:36 -0800767 if ((real_sz + sz) > max_sz) {
768 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
769 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
770 break;
771 }
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700772 real_sz += sz;
773 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
774 }
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700775 kfree(notes_section);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700776 phdr_ptr->p_memsz = real_sz;
Greg Pearson38dfac82014-02-10 14:25:36 -0800777 if (real_sz == 0) {
778 pr_warn("Warning: Zero PT_NOTE entries found\n");
Greg Pearson38dfac82014-02-10 14:25:36 -0800779 }
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700780 }
781
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700782 return 0;
783}
784
785/**
786 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
787 * headers and sum of real size of their ELF note segment headers and
788 * data.
789 *
790 * @ehdr_ptr: ELF header
791 * @nr_ptnote: buffer for the number of PT_NOTE program headers
792 * @sz_ptnote: buffer for size of unique PT_NOTE program header
793 *
794 * This function is used to merge multiple PT_NOTE program headers
795 * into a unique single one. The resulting unique entry will have
796 * @sz_ptnote in its phdr->p_mem.
797 *
798 * It is assumed that program headers with PT_NOTE type pointed to by
799 * @ehdr_ptr has already been updated by update_note_header_size_elf64
800 * and each of PT_NOTE program headers has actual ELF note segment
801 * size in its p_memsz member.
802 */
803static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
804 int *nr_ptnote, u64 *sz_ptnote)
805{
806 int i;
807 Elf64_Phdr *phdr_ptr;
808
809 *nr_ptnote = *sz_ptnote = 0;
810
811 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
812 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
813 if (phdr_ptr->p_type != PT_NOTE)
814 continue;
815 *nr_ptnote += 1;
816 *sz_ptnote += phdr_ptr->p_memsz;
817 }
818
819 return 0;
820}
821
822/**
823 * copy_notes_elf64 - copy ELF note segments in a given buffer
824 *
825 * @ehdr_ptr: ELF header
826 * @notes_buf: buffer into which ELF note segments are copied
827 *
828 * This function is used to copy ELF note segment in the 1st kernel
829 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
830 * size of the buffer @notes_buf is equal to or larger than sum of the
831 * real ELF note segment headers and data.
832 *
833 * It is assumed that program headers with PT_NOTE type pointed to by
834 * @ehdr_ptr has already been updated by update_note_header_size_elf64
835 * and each of PT_NOTE program headers has actual ELF note segment
836 * size in its p_memsz member.
837 */
838static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
839{
840 int i, rc=0;
841 Elf64_Phdr *phdr_ptr;
842
843 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
844
845 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
846 u64 offset;
847 if (phdr_ptr->p_type != PT_NOTE)
848 continue;
849 offset = phdr_ptr->p_offset;
Michael Holzheube8a8d02013-09-11 14:24:49 -0700850 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
851 &offset);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700852 if (rc < 0)
853 return rc;
854 notes_buf += phdr_ptr->p_memsz;
855 }
856
857 return 0;
858}
859
860/* Merges all the PT_NOTE headers into one. */
861static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
862 char **notes_buf, size_t *notes_sz)
863{
864 int i, nr_ptnote=0, rc=0;
865 char *tmp;
866 Elf64_Ehdr *ehdr_ptr;
867 Elf64_Phdr phdr;
868 u64 phdr_sz = 0, note_off;
869
870 ehdr_ptr = (Elf64_Ehdr *)elfptr;
871
872 rc = update_note_header_size_elf64(ehdr_ptr);
873 if (rc < 0)
874 return rc;
875
876 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
877 if (rc < 0)
878 return rc;
879
880 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
Rahul Lakkireddy27242732018-05-02 15:17:17 +0530881 *notes_buf = vmcore_alloc_buf(*notes_sz);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700882 if (!*notes_buf)
883 return -ENOMEM;
884
885 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
886 if (rc < 0)
887 return rc;
888
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700889 /* Prepare merged PT_NOTE program header. */
890 phdr.p_type = PT_NOTE;
891 phdr.p_flags = 0;
892 note_off = sizeof(Elf64_Ehdr) +
893 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700894 phdr.p_offset = roundup(note_off, PAGE_SIZE);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700895 phdr.p_vaddr = phdr.p_paddr = 0;
896 phdr.p_filesz = phdr.p_memsz = phdr_sz;
897 phdr.p_align = 0;
898
899 /* Add merged PT_NOTE program header*/
900 tmp = elfptr + sizeof(Elf64_Ehdr);
901 memcpy(tmp, &phdr, sizeof(phdr));
902 tmp += sizeof(phdr);
903
904 /* Remove unwanted PT_NOTE program headers. */
905 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
906 *elfsz = *elfsz - i;
907 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -0700908 memset(elfptr + *elfsz, 0, i);
909 *elfsz = roundup(*elfsz, PAGE_SIZE);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700910
911 /* Modify e_phnum to reflect merged headers. */
912 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
913
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530914 /* Store the size of all notes. We need this to update the note
915 * header when the device dumps will be added.
916 */
917 elfnotes_orig_sz = phdr.p_memsz;
918
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700919 return 0;
920}
921
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700922/**
923 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
924 *
925 * @ehdr_ptr: ELF header
926 *
927 * This function updates p_memsz member of each PT_NOTE entry in the
928 * program header table pointed to by @ehdr_ptr to real size of ELF
929 * note segment.
930 */
931static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
Vivek Goyal72658e92005-06-25 14:58:22 -0700932{
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700933 int i, rc=0;
934 Elf32_Phdr *phdr_ptr;
Vivek Goyal72658e92005-06-25 14:58:22 -0700935 Elf32_Nhdr *nhdr_ptr;
Vivek Goyal72658e92005-06-25 14:58:22 -0700936
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700937 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
Vivek Goyal72658e92005-06-25 14:58:22 -0700938 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
Vivek Goyal72658e92005-06-25 14:58:22 -0700939 void *notes_section;
Vivek Goyal72658e92005-06-25 14:58:22 -0700940 u64 offset, max_sz, sz, real_sz = 0;
941 if (phdr_ptr->p_type != PT_NOTE)
942 continue;
Vivek Goyal72658e92005-06-25 14:58:22 -0700943 max_sz = phdr_ptr->p_memsz;
944 offset = phdr_ptr->p_offset;
945 notes_section = kmalloc(max_sz, GFP_KERNEL);
946 if (!notes_section)
947 return -ENOMEM;
Michael Holzheube8a8d02013-09-11 14:24:49 -0700948 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
Vivek Goyal72658e92005-06-25 14:58:22 -0700949 if (rc < 0) {
950 kfree(notes_section);
951 return rc;
952 }
953 nhdr_ptr = notes_section;
Greg Pearson38dfac82014-02-10 14:25:36 -0800954 while (nhdr_ptr->n_namesz != 0) {
Vivek Goyal72658e92005-06-25 14:58:22 -0700955 sz = sizeof(Elf32_Nhdr) +
WANG Chao34b47762015-02-17 13:46:01 -0800956 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
957 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
Greg Pearson38dfac82014-02-10 14:25:36 -0800958 if ((real_sz + sz) > max_sz) {
959 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
960 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
961 break;
962 }
Vivek Goyal72658e92005-06-25 14:58:22 -0700963 real_sz += sz;
964 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
965 }
Vivek Goyal72658e92005-06-25 14:58:22 -0700966 kfree(notes_section);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700967 phdr_ptr->p_memsz = real_sz;
Greg Pearson38dfac82014-02-10 14:25:36 -0800968 if (real_sz == 0) {
969 pr_warn("Warning: Zero PT_NOTE entries found\n");
Greg Pearson38dfac82014-02-10 14:25:36 -0800970 }
Vivek Goyal72658e92005-06-25 14:58:22 -0700971 }
972
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700973 return 0;
974}
975
976/**
977 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
978 * headers and sum of real size of their ELF note segment headers and
979 * data.
980 *
981 * @ehdr_ptr: ELF header
982 * @nr_ptnote: buffer for the number of PT_NOTE program headers
983 * @sz_ptnote: buffer for size of unique PT_NOTE program header
984 *
985 * This function is used to merge multiple PT_NOTE program headers
986 * into a unique single one. The resulting unique entry will have
987 * @sz_ptnote in its phdr->p_mem.
988 *
989 * It is assumed that program headers with PT_NOTE type pointed to by
990 * @ehdr_ptr has already been updated by update_note_header_size_elf32
991 * and each of PT_NOTE program headers has actual ELF note segment
992 * size in its p_memsz member.
993 */
994static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
995 int *nr_ptnote, u64 *sz_ptnote)
996{
997 int i;
998 Elf32_Phdr *phdr_ptr;
999
1000 *nr_ptnote = *sz_ptnote = 0;
1001
1002 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
1003 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1004 if (phdr_ptr->p_type != PT_NOTE)
1005 continue;
1006 *nr_ptnote += 1;
1007 *sz_ptnote += phdr_ptr->p_memsz;
1008 }
1009
1010 return 0;
1011}
1012
1013/**
1014 * copy_notes_elf32 - copy ELF note segments in a given buffer
1015 *
1016 * @ehdr_ptr: ELF header
1017 * @notes_buf: buffer into which ELF note segments are copied
1018 *
1019 * This function is used to copy ELF note segment in the 1st kernel
1020 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
1021 * size of the buffer @notes_buf is equal to or larger than sum of the
1022 * real ELF note segment headers and data.
1023 *
1024 * It is assumed that program headers with PT_NOTE type pointed to by
1025 * @ehdr_ptr has already been updated by update_note_header_size_elf32
1026 * and each of PT_NOTE program headers has actual ELF note segment
1027 * size in its p_memsz member.
1028 */
1029static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
1030{
1031 int i, rc=0;
1032 Elf32_Phdr *phdr_ptr;
1033
1034 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
1035
1036 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1037 u64 offset;
1038 if (phdr_ptr->p_type != PT_NOTE)
1039 continue;
1040 offset = phdr_ptr->p_offset;
Michael Holzheube8a8d02013-09-11 14:24:49 -07001041 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1042 &offset);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001043 if (rc < 0)
1044 return rc;
1045 notes_buf += phdr_ptr->p_memsz;
1046 }
1047
1048 return 0;
1049}
1050
1051/* Merges all the PT_NOTE headers into one. */
1052static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1053 char **notes_buf, size_t *notes_sz)
1054{
1055 int i, nr_ptnote=0, rc=0;
1056 char *tmp;
1057 Elf32_Ehdr *ehdr_ptr;
1058 Elf32_Phdr phdr;
1059 u64 phdr_sz = 0, note_off;
1060
1061 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1062
1063 rc = update_note_header_size_elf32(ehdr_ptr);
1064 if (rc < 0)
1065 return rc;
1066
1067 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1068 if (rc < 0)
1069 return rc;
1070
1071 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301072 *notes_buf = vmcore_alloc_buf(*notes_sz);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001073 if (!*notes_buf)
1074 return -ENOMEM;
1075
1076 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1077 if (rc < 0)
1078 return rc;
1079
Vivek Goyal72658e92005-06-25 14:58:22 -07001080 /* Prepare merged PT_NOTE program header. */
1081 phdr.p_type = PT_NOTE;
1082 phdr.p_flags = 0;
1083 note_off = sizeof(Elf32_Ehdr) +
1084 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001085 phdr.p_offset = roundup(note_off, PAGE_SIZE);
Vivek Goyal72658e92005-06-25 14:58:22 -07001086 phdr.p_vaddr = phdr.p_paddr = 0;
1087 phdr.p_filesz = phdr.p_memsz = phdr_sz;
1088 phdr.p_align = 0;
1089
1090 /* Add merged PT_NOTE program header*/
1091 tmp = elfptr + sizeof(Elf32_Ehdr);
1092 memcpy(tmp, &phdr, sizeof(phdr));
1093 tmp += sizeof(phdr);
1094
1095 /* Remove unwanted PT_NOTE program headers. */
1096 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1097 *elfsz = *elfsz - i;
1098 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001099 memset(elfptr + *elfsz, 0, i);
1100 *elfsz = roundup(*elfsz, PAGE_SIZE);
Vivek Goyal72658e92005-06-25 14:58:22 -07001101
1102 /* Modify e_phnum to reflect merged headers. */
1103 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1104
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +05301105 /* Store the size of all notes. We need this to update the note
1106 * header when the device dumps will be added.
1107 */
1108 elfnotes_orig_sz = phdr.p_memsz;
1109
Vivek Goyal72658e92005-06-25 14:58:22 -07001110 return 0;
1111}
1112
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001113/* Add memory chunks represented by program headers to vmcore list. Also update
1114 * the new offset fields of exported program headers. */
1115static int __init process_ptload_program_headers_elf64(char *elfptr,
1116 size_t elfsz,
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001117 size_t elfnotes_sz,
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001118 struct list_head *vc_list)
1119{
1120 int i;
1121 Elf64_Ehdr *ehdr_ptr;
1122 Elf64_Phdr *phdr_ptr;
1123 loff_t vmcore_off;
1124 struct vmcore *new;
1125
1126 ehdr_ptr = (Elf64_Ehdr *)elfptr;
1127 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1128
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001129 /* Skip Elf header, program headers and Elf note segment. */
1130 vmcore_off = elfsz + elfnotes_sz;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001131
1132 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001133 u64 paddr, start, end, size;
1134
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001135 if (phdr_ptr->p_type != PT_LOAD)
1136 continue;
1137
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001138 paddr = phdr_ptr->p_offset;
1139 start = rounddown(paddr, PAGE_SIZE);
1140 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1141 size = end - start;
1142
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001143 /* Add this contiguous chunk of memory to vmcore list.*/
1144 new = get_new_element();
1145 if (!new)
1146 return -ENOMEM;
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001147 new->paddr = start;
1148 new->size = size;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001149 list_add_tail(&new->list, vc_list);
1150
1151 /* Update the program header offset. */
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001152 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1153 vmcore_off = vmcore_off + size;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001154 }
1155 return 0;
1156}
1157
Vivek Goyal72658e92005-06-25 14:58:22 -07001158static int __init process_ptload_program_headers_elf32(char *elfptr,
1159 size_t elfsz,
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001160 size_t elfnotes_sz,
Vivek Goyal72658e92005-06-25 14:58:22 -07001161 struct list_head *vc_list)
1162{
1163 int i;
1164 Elf32_Ehdr *ehdr_ptr;
1165 Elf32_Phdr *phdr_ptr;
1166 loff_t vmcore_off;
1167 struct vmcore *new;
1168
1169 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1170 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1171
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001172 /* Skip Elf header, program headers and Elf note segment. */
1173 vmcore_off = elfsz + elfnotes_sz;
Vivek Goyal72658e92005-06-25 14:58:22 -07001174
1175 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001176 u64 paddr, start, end, size;
1177
Vivek Goyal72658e92005-06-25 14:58:22 -07001178 if (phdr_ptr->p_type != PT_LOAD)
1179 continue;
1180
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001181 paddr = phdr_ptr->p_offset;
1182 start = rounddown(paddr, PAGE_SIZE);
1183 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1184 size = end - start;
1185
Vivek Goyal72658e92005-06-25 14:58:22 -07001186 /* Add this contiguous chunk of memory to vmcore list.*/
1187 new = get_new_element();
1188 if (!new)
1189 return -ENOMEM;
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001190 new->paddr = start;
1191 new->size = size;
Vivek Goyal72658e92005-06-25 14:58:22 -07001192 list_add_tail(&new->list, vc_list);
1193
1194 /* Update the program header offset */
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001195 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1196 vmcore_off = vmcore_off + size;
Vivek Goyal72658e92005-06-25 14:58:22 -07001197 }
1198 return 0;
1199}
1200
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001201/* Sets offset fields of vmcore elements. */
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +05301202static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1203 struct list_head *vc_list)
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001204{
1205 loff_t vmcore_off;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001206 struct vmcore *m;
1207
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001208 /* Skip Elf header, program headers and Elf note segment. */
1209 vmcore_off = elfsz + elfnotes_sz;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001210
1211 list_for_each_entry(m, vc_list, list) {
1212 m->offset = vmcore_off;
1213 vmcore_off += m->size;
1214 }
1215}
1216
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001217static void free_elfcorebuf(void)
Vivek Goyal72658e92005-06-25 14:58:22 -07001218{
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001219 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1220 elfcorebuf = NULL;
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001221 vfree(elfnotes_buf);
1222 elfnotes_buf = NULL;
Vivek Goyal72658e92005-06-25 14:58:22 -07001223}
1224
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001225static int __init parse_crash_elf64_headers(void)
1226{
1227 int rc=0;
1228 Elf64_Ehdr ehdr;
1229 u64 addr;
1230
1231 addr = elfcorehdr_addr;
1232
1233 /* Read Elf header */
Michael Holzheube8a8d02013-09-11 14:24:49 -07001234 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001235 if (rc < 0)
1236 return rc;
1237
1238 /* Do some basic Verification. */
1239 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1240 (ehdr.e_type != ET_CORE) ||
Mika Westerberg9833c392010-11-19 09:29:24 +01001241 !vmcore_elf64_check_arch(&ehdr) ||
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001242 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1243 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1244 ehdr.e_version != EV_CURRENT ||
1245 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1246 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1247 ehdr.e_phnum == 0) {
Andrew Morton87ebdc02013-02-27 17:03:16 -08001248 pr_warn("Warning: Core image elf header is not sane\n");
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001249 return -EINVAL;
1250 }
1251
1252 /* Read in all elf headers. */
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001253 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1254 ehdr.e_phnum * sizeof(Elf64_Phdr);
1255 elfcorebuf_sz = elfcorebuf_sz_orig;
1256 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1257 get_order(elfcorebuf_sz_orig));
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001258 if (!elfcorebuf)
1259 return -ENOMEM;
1260 addr = elfcorehdr_addr;
Michael Holzheube8a8d02013-09-11 14:24:49 -07001261 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001262 if (rc < 0)
1263 goto fail;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001264
1265 /* Merge all PT_NOTE headers into one. */
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001266 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1267 &elfnotes_buf, &elfnotes_sz);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001268 if (rc)
1269 goto fail;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001270 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001271 elfnotes_sz, &vmcore_list);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001272 if (rc)
1273 goto fail;
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001274 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001275 return 0;
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001276fail:
1277 free_elfcorebuf();
1278 return rc;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001279}
1280
Vivek Goyal72658e92005-06-25 14:58:22 -07001281static int __init parse_crash_elf32_headers(void)
1282{
1283 int rc=0;
1284 Elf32_Ehdr ehdr;
1285 u64 addr;
1286
1287 addr = elfcorehdr_addr;
1288
1289 /* Read Elf header */
Michael Holzheube8a8d02013-09-11 14:24:49 -07001290 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
Vivek Goyal72658e92005-06-25 14:58:22 -07001291 if (rc < 0)
1292 return rc;
1293
1294 /* Do some basic Verification. */
1295 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1296 (ehdr.e_type != ET_CORE) ||
Daniel Wagnere55d5312016-02-11 13:36:54 +01001297 !vmcore_elf32_check_arch(&ehdr) ||
Vivek Goyal72658e92005-06-25 14:58:22 -07001298 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1299 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1300 ehdr.e_version != EV_CURRENT ||
1301 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1302 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1303 ehdr.e_phnum == 0) {
Andrew Morton87ebdc02013-02-27 17:03:16 -08001304 pr_warn("Warning: Core image elf header is not sane\n");
Vivek Goyal72658e92005-06-25 14:58:22 -07001305 return -EINVAL;
1306 }
1307
1308 /* Read in all elf headers. */
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001309 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1310 elfcorebuf_sz = elfcorebuf_sz_orig;
1311 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1312 get_order(elfcorebuf_sz_orig));
Vivek Goyal72658e92005-06-25 14:58:22 -07001313 if (!elfcorebuf)
1314 return -ENOMEM;
1315 addr = elfcorehdr_addr;
Michael Holzheube8a8d02013-09-11 14:24:49 -07001316 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001317 if (rc < 0)
1318 goto fail;
Vivek Goyal72658e92005-06-25 14:58:22 -07001319
1320 /* Merge all PT_NOTE headers into one. */
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001321 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1322 &elfnotes_buf, &elfnotes_sz);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001323 if (rc)
1324 goto fail;
Vivek Goyal72658e92005-06-25 14:58:22 -07001325 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001326 elfnotes_sz, &vmcore_list);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001327 if (rc)
1328 goto fail;
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001329 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
Vivek Goyal72658e92005-06-25 14:58:22 -07001330 return 0;
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001331fail:
1332 free_elfcorebuf();
1333 return rc;
Vivek Goyal72658e92005-06-25 14:58:22 -07001334}
1335
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001336static int __init parse_crash_elf_headers(void)
1337{
1338 unsigned char e_ident[EI_NIDENT];
1339 u64 addr;
1340 int rc=0;
1341
1342 addr = elfcorehdr_addr;
Michael Holzheube8a8d02013-09-11 14:24:49 -07001343 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001344 if (rc < 0)
1345 return rc;
1346 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
Andrew Morton87ebdc02013-02-27 17:03:16 -08001347 pr_warn("Warning: Core image elf header not found\n");
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001348 return -EINVAL;
1349 }
1350
1351 if (e_ident[EI_CLASS] == ELFCLASS64) {
1352 rc = parse_crash_elf64_headers();
1353 if (rc)
1354 return rc;
Vivek Goyal72658e92005-06-25 14:58:22 -07001355 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1356 rc = parse_crash_elf32_headers();
1357 if (rc)
1358 return rc;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001359 } else {
Andrew Morton87ebdc02013-02-27 17:03:16 -08001360 pr_warn("Warning: Core image elf header is not sane\n");
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001361 return -EINVAL;
1362 }
HATAYAMA Daisuke591ff712013-07-03 15:02:22 -07001363
1364 /* Determine vmcore size. */
1365 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1366 &vmcore_list);
1367
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001368 return 0;
1369}
1370
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301371#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1372/**
1373 * vmcoredd_write_header - Write vmcore device dump header at the
1374 * beginning of the dump's buffer.
1375 * @buf: Output buffer where the note is written
1376 * @data: Dump info
1377 * @size: Size of the dump
1378 *
1379 * Fills beginning of the dump's buffer with vmcore device dump header.
1380 */
1381static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1382 u32 size)
1383{
1384 struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1385
1386 vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1387 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1388 vdd_hdr->n_type = NT_VMCOREDD;
1389
1390 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1391 sizeof(vdd_hdr->name));
1392 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1393}
1394
1395/**
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +05301396 * vmcoredd_update_program_headers - Update all Elf program headers
1397 * @elfptr: Pointer to elf header
1398 * @elfnotesz: Size of elf notes aligned to page size
1399 * @vmcoreddsz: Size of device dumps to be added to elf note header
1400 *
1401 * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1402 * Also update the offsets of all the program headers after the elf note header.
1403 */
1404static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1405 size_t vmcoreddsz)
1406{
1407 unsigned char *e_ident = (unsigned char *)elfptr;
1408 u64 start, end, size;
1409 loff_t vmcore_off;
1410 u32 i;
1411
1412 vmcore_off = elfcorebuf_sz + elfnotesz;
1413
1414 if (e_ident[EI_CLASS] == ELFCLASS64) {
1415 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1416 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1417
1418 /* Update all program headers */
1419 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1420 if (phdr->p_type == PT_NOTE) {
1421 /* Update note size */
1422 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1423 phdr->p_filesz = phdr->p_memsz;
1424 continue;
1425 }
1426
1427 start = rounddown(phdr->p_offset, PAGE_SIZE);
1428 end = roundup(phdr->p_offset + phdr->p_memsz,
1429 PAGE_SIZE);
1430 size = end - start;
1431 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1432 vmcore_off += size;
1433 }
1434 } else {
1435 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1436 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1437
1438 /* Update all program headers */
1439 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1440 if (phdr->p_type == PT_NOTE) {
1441 /* Update note size */
1442 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1443 phdr->p_filesz = phdr->p_memsz;
1444 continue;
1445 }
1446
1447 start = rounddown(phdr->p_offset, PAGE_SIZE);
1448 end = roundup(phdr->p_offset + phdr->p_memsz,
1449 PAGE_SIZE);
1450 size = end - start;
1451 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1452 vmcore_off += size;
1453 }
1454 }
1455}
1456
1457/**
1458 * vmcoredd_update_size - Update the total size of the device dumps and update
1459 * Elf header
1460 * @dump_size: Size of the current device dump to be added to total size
1461 *
1462 * Update the total size of all the device dumps and update the Elf program
1463 * headers. Calculate the new offsets for the vmcore list and update the
1464 * total vmcore size.
1465 */
1466static void vmcoredd_update_size(size_t dump_size)
1467{
1468 vmcoredd_orig_sz += dump_size;
1469 elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1470 vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1471 vmcoredd_orig_sz);
1472
1473 /* Update vmcore list offsets */
1474 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1475
1476 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1477 &vmcore_list);
1478 proc_vmcore->size = vmcore_size;
1479}
1480
1481/**
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301482 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1483 * @data: dump info.
1484 *
1485 * Allocate a buffer and invoke the calling driver's dump collect routine.
1486 * Write Elf note at the beginning of the buffer to indicate vmcore device
1487 * dump and add the dump to global list.
1488 */
1489int vmcore_add_device_dump(struct vmcoredd_data *data)
1490{
1491 struct vmcoredd_node *dump;
1492 void *buf = NULL;
1493 size_t data_size;
1494 int ret;
1495
Kairui Songc6c40532019-07-16 16:26:39 -07001496 if (vmcoredd_disabled) {
1497 pr_err_once("Device dump is disabled\n");
1498 return -EINVAL;
1499 }
1500
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301501 if (!data || !strlen(data->dump_name) ||
1502 !data->vmcoredd_callback || !data->size)
1503 return -EINVAL;
1504
1505 dump = vzalloc(sizeof(*dump));
1506 if (!dump) {
1507 ret = -ENOMEM;
1508 goto out_err;
1509 }
1510
1511 /* Keep size of the buffer page aligned so that it can be mmaped */
1512 data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1513 PAGE_SIZE);
1514
1515 /* Allocate buffer for driver's to write their dumps */
1516 buf = vmcore_alloc_buf(data_size);
1517 if (!buf) {
1518 ret = -ENOMEM;
1519 goto out_err;
1520 }
1521
1522 vmcoredd_write_header(buf, data, data_size -
1523 sizeof(struct vmcoredd_header));
1524
1525 /* Invoke the driver's dump collection routing */
1526 ret = data->vmcoredd_callback(data, buf +
1527 sizeof(struct vmcoredd_header));
1528 if (ret)
1529 goto out_err;
1530
1531 dump->buf = buf;
1532 dump->size = data_size;
1533
1534 /* Add the dump to driver sysfs list */
1535 mutex_lock(&vmcoredd_mutex);
1536 list_add_tail(&dump->list, &vmcoredd_list);
1537 mutex_unlock(&vmcoredd_mutex);
1538
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +05301539 vmcoredd_update_size(data_size);
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301540 return 0;
1541
1542out_err:
Yang Lifb9bf042021-02-24 12:05:00 -08001543 vfree(buf);
1544 vfree(dump);
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301545
1546 return ret;
1547}
1548EXPORT_SYMBOL(vmcore_add_device_dump);
1549#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1550
1551/* Free all dumps in vmcore device dump list */
1552static void vmcore_free_device_dumps(void)
1553{
1554#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1555 mutex_lock(&vmcoredd_mutex);
1556 while (!list_empty(&vmcoredd_list)) {
1557 struct vmcoredd_node *dump;
1558
1559 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1560 list);
1561 list_del(&dump->list);
1562 vfree(dump->buf);
1563 vfree(dump);
1564 }
1565 mutex_unlock(&vmcoredd_mutex);
1566#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1567}
1568
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001569/* Init function for vmcore module. */
1570static int __init vmcore_init(void)
1571{
1572 int rc = 0;
1573
Michael Holzheube8a8d02013-09-11 14:24:49 -07001574 /* Allow architectures to allocate ELF header in 2nd kernel */
1575 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1576 if (rc)
1577 return rc;
1578 /*
1579 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1580 * then capture the dump.
1581 */
Simon Horman85a0ee32008-10-18 20:28:29 -07001582 if (!(is_vmcore_usable()))
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001583 return rc;
1584 rc = parse_crash_elf_headers();
1585 if (rc) {
Andrew Morton87ebdc02013-02-27 17:03:16 -08001586 pr_warn("Kdump: vmcore not initialized\n");
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001587 return rc;
1588 }
Michael Holzheube8a8d02013-09-11 14:24:49 -07001589 elfcorehdr_free(elfcorehdr_addr);
1590 elfcorehdr_addr = ELFCORE_ADDR_ERR;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001591
Alexey Dobriyan97a32532020-02-03 17:37:17 -08001592 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001593 if (proc_vmcore)
1594 proc_vmcore->size = vmcore_size;
1595 return 0;
1596}
Paul Gortmakerabaf3782014-01-23 15:55:45 -08001597fs_initcall(vmcore_init);
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001598
1599/* Cleanup function for vmcore module. */
1600void vmcore_cleanup(void)
1601{
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001602 if (proc_vmcore) {
David Howellsa8ca16e2013-04-12 17:27:28 +01001603 proc_remove(proc_vmcore);
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001604 proc_vmcore = NULL;
1605 }
1606
1607 /* clear the vmcore list. */
Alexey Dobriyan593bc692018-02-06 15:37:02 -08001608 while (!list_empty(&vmcore_list)) {
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001609 struct vmcore *m;
1610
Alexey Dobriyan593bc692018-02-06 15:37:02 -08001611 m = list_first_entry(&vmcore_list, struct vmcore, list);
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001612 list_del(&m->list);
1613 kfree(m);
1614 }
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001615 free_elfcorebuf();
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301616
1617 /* clear vmcore device dump list */
1618 vmcore_free_device_dumps();
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001619}