blob: e502414b3556408f539b74d64bb933ba2a8cb750 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * fs/proc/kcore.c kernel ELF core dumper
4 *
5 * Modelled on fs/exec.c:aout_core_dump()
6 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
7 * ELF version written by David Howells <David.Howells@nexor.co.uk>
8 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
9 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
10 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
11 */
12
Omar Sandoval23c85092018-08-21 21:55:20 -070013#include <linux/crash_core.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/mm.h>
15#include <linux/proc_fs.h>
David Howells2f96b8c2013-04-12 00:10:25 +010016#include <linux/kcore.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/user.h>
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080018#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/elf.h>
20#include <linux/elfcore.h>
Andrew Morton3c743a72013-04-29 15:08:08 -070021#include <linux/notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/vmalloc.h>
23#include <linux/highmem.h>
Andrew Morton87ebdc02013-02-27 17:03:16 -080024#include <linux/printk.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070025#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080028#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/io.h>
KAMEZAWA Hiroyuki2ef43ec2009-09-22 16:45:41 -070030#include <linux/list.h>
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -070031#include <linux/ioport.h>
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -070032#include <linux/memory.h>
Ingo Molnar29930022017-02-08 18:51:36 +010033#include <linux/sched/task.h>
David Howells02e935b2019-08-19 17:17:57 -070034#include <linux/security.h>
KAMEZAWA Hiroyuki94925872009-09-22 16:45:45 -070035#include <asm/sections.h>
David Howells59d80532013-04-11 13:34:43 +010036#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Magnus Damm36027602006-12-06 20:38:00 -080038#define CORE_STR "CORE"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Edgar E. Iglesias79885b22008-07-25 01:48:10 -070040#ifndef ELF_CORE_EFLAGS
41#define ELF_CORE_EFLAGS 0
42#endif
43
Alexey Dobriyan97ce5d62008-10-06 14:14:19 +040044static struct proc_dir_entry *proc_root_kcore;
45
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#ifndef kc_vaddr_to_offset
48#define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
49#endif
50#ifndef kc_offset_to_vaddr
51#define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
52#endif
53
KAMEZAWA Hiroyuki2ef43ec2009-09-22 16:45:41 -070054static LIST_HEAD(kclist_head);
Omar Sandoval0b172f82018-08-21 21:54:59 -070055static DECLARE_RWSEM(kclist_lock);
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -070056static int kcore_need_update = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Kairui Songffc85992019-03-08 11:05:08 +080058/*
59 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
60 * Same as oldmem_pfn_is_ram in vmcore
61 */
62static int (*mem_pfn_is_ram)(unsigned long pfn);
63
64int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
65{
66 if (mem_pfn_is_ram)
67 return -EBUSY;
68 mem_pfn_is_ram = fn;
69 return 0;
70}
71
72static int pfn_is_ram(unsigned long pfn)
73{
74 if (mem_pfn_is_ram)
75 return mem_pfn_is_ram(pfn);
76 else
77 return 1;
78}
79
Omar Sandovala8dd9c42018-08-21 21:54:51 -070080/* This doesn't grab kclist_lock, so it should only be used at init time. */
81void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
82 int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -070083{
84 new->addr = (unsigned long)addr;
85 new->size = size;
KAMEZAWA Hiroyukic30bb2a2009-09-22 16:45:43 -070086 new->type = type;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
KAMEZAWA Hiroyuki2ef43ec2009-09-22 16:45:41 -070088 list_add_tail(&new->list, &kclist_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
Omar Sandoval37e949b2018-08-21 21:55:09 -070091static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len,
92 size_t *data_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
94 size_t try, size;
95 struct kcore_list *m;
96
97 *nphdr = 1; /* PT_NOTE */
98 size = 0;
99
KAMEZAWA Hiroyuki2ef43ec2009-09-22 16:45:41 -0700100 list_for_each_entry(m, &kclist_head, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
102 if (try > size)
103 size = try;
104 *nphdr = *nphdr + 1;
105 }
Omar Sandoval37e949b2018-08-21 21:55:09 -0700106
107 *phdrs_len = *nphdr * sizeof(struct elf_phdr);
Omar Sandoval23c85092018-08-21 21:55:20 -0700108 *notes_len = (4 * sizeof(struct elf_note) +
109 3 * ALIGN(sizeof(CORE_STR), 4) +
110 VMCOREINFO_NOTE_NAME_BYTES +
Omar Sandoval37e949b2018-08-21 21:55:09 -0700111 ALIGN(sizeof(struct elf_prstatus), 4) +
112 ALIGN(sizeof(struct elf_prpsinfo), 4) +
Omar Sandoval23c85092018-08-21 21:55:20 -0700113 ALIGN(arch_task_struct_size, 4) +
114 ALIGN(vmcoreinfo_size, 4));
Omar Sandoval37e949b2018-08-21 21:55:09 -0700115 *data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len +
116 *notes_len);
117 return *data_offset + size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118}
119
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700120#ifdef CONFIG_HIGHMEM
121/*
122 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
123 * because memory hole is not as big as !HIGHMEM case.
124 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
125 */
Omar Sandovalb66fb002018-08-21 21:55:02 -0700126static int kcore_ram_list(struct list_head *head)
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700127{
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700128 struct kcore_list *ent;
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700129
130 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
131 if (!ent)
132 return -ENOMEM;
133 ent->addr = (unsigned long)__va(0);
134 ent->size = max_low_pfn << PAGE_SHIFT;
135 ent->type = KCORE_RAM;
Omar Sandovalb66fb002018-08-21 21:55:02 -0700136 list_add(&ent->list, head);
137 return 0;
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700138}
139
140#else /* !CONFIG_HIGHMEM */
141
KAMEZAWA Hiroyuki26562c52009-09-22 16:45:49 -0700142#ifdef CONFIG_SPARSEMEM_VMEMMAP
143/* calculate vmemmap's address from given system ram pfn and register it */
Djalal Harounib9082432012-03-23 15:02:52 -0700144static int
145get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
KAMEZAWA Hiroyuki26562c52009-09-22 16:45:49 -0700146{
147 unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
148 unsigned long nr_pages = ent->size >> PAGE_SHIFT;
149 unsigned long start, end;
150 struct kcore_list *vmm, *tmp;
151
152
153 start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
154 end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
Fabian Frederick108a8a12014-08-08 14:21:20 -0700155 end = PAGE_ALIGN(end);
KAMEZAWA Hiroyuki26562c52009-09-22 16:45:49 -0700156 /* overlap check (because we have to align page */
157 list_for_each_entry(tmp, head, list) {
158 if (tmp->type != KCORE_VMEMMAP)
159 continue;
160 if (start < tmp->addr + tmp->size)
161 if (end > tmp->addr)
162 end = tmp->addr;
163 }
164 if (start < end) {
165 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
166 if (!vmm)
167 return 0;
168 vmm->addr = start;
169 vmm->size = end - start;
170 vmm->type = KCORE_VMEMMAP;
171 list_add_tail(&vmm->list, head);
172 }
173 return 1;
174
175}
176#else
Djalal Harounib9082432012-03-23 15:02:52 -0700177static int
178get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
KAMEZAWA Hiroyuki26562c52009-09-22 16:45:49 -0700179{
180 return 1;
181}
182
183#endif
184
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700185static int
186kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
187{
188 struct list_head *head = (struct list_head *)arg;
189 struct kcore_list *ent;
Laura Abbott39553332018-05-11 16:01:57 -0700190 struct page *p;
191
192 if (!pfn_valid(pfn))
193 return 1;
194
195 p = pfn_to_page(pfn);
196 if (!memmap_valid_within(pfn, p, page_zone(p)))
197 return 1;
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700198
199 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
200 if (!ent)
201 return -ENOMEM;
Laura Abbott39553332018-05-11 16:01:57 -0700202 ent->addr = (unsigned long)page_to_virt(p);
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700203 ent->size = nr_pages << PAGE_SHIFT;
204
Laura Abbott39553332018-05-11 16:01:57 -0700205 if (!virt_addr_valid(ent->addr))
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700206 goto free_out;
207
208 /* cut not-mapped area. ....from ppc-32 code. */
209 if (ULONG_MAX - ent->addr < ent->size)
210 ent->size = ULONG_MAX - ent->addr;
211
Laura Abbott39553332018-05-11 16:01:57 -0700212 /*
213 * We've already checked virt_addr_valid so we know this address
214 * is a valid pointer, therefore we can check against it to determine
215 * if we need to trim
216 */
217 if (VMALLOC_START > ent->addr) {
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700218 if (VMALLOC_START - ent->addr < ent->size)
219 ent->size = VMALLOC_START - ent->addr;
220 }
221
222 ent->type = KCORE_RAM;
223 list_add_tail(&ent->list, head);
KAMEZAWA Hiroyuki26562c52009-09-22 16:45:49 -0700224
225 if (!get_sparsemem_vmemmap_info(ent, head)) {
226 list_del(&ent->list);
227 goto free_out;
228 }
229
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700230 return 0;
231free_out:
232 kfree(ent);
233 return 1;
234}
235
Omar Sandovalb66fb002018-08-21 21:55:02 -0700236static int kcore_ram_list(struct list_head *list)
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700237{
238 int nid, ret;
239 unsigned long end_pfn;
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700240
241 /* Not inialized....update now */
242 /* find out "max pfn" */
243 end_pfn = 0;
Lai Jiangshan4ff1b2c2012-12-12 13:51:25 -0800244 for_each_node_state(nid, N_MEMORY) {
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700245 unsigned long node_end;
Xishi Qiu83285c72013-11-12 15:07:19 -0800246 node_end = node_end_pfn(nid);
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700247 if (end_pfn < node_end)
248 end_pfn = node_end;
249 }
250 /* scan 0 to max_pfn */
Omar Sandovalb66fb002018-08-21 21:55:02 -0700251 ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private);
252 if (ret)
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700253 return -ENOMEM;
Omar Sandovalb66fb002018-08-21 21:55:02 -0700254 return 0;
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700255}
256#endif /* CONFIG_HIGHMEM */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Omar Sandovalb66fb002018-08-21 21:55:02 -0700258static int kcore_update_ram(void)
259{
260 LIST_HEAD(list);
261 LIST_HEAD(garbage);
262 int nphdr;
Omar Sandoval37e949b2018-08-21 21:55:09 -0700263 size_t phdrs_len, notes_len, data_offset;
Omar Sandovalb66fb002018-08-21 21:55:02 -0700264 struct kcore_list *tmp, *pos;
265 int ret = 0;
266
267 down_write(&kclist_lock);
268 if (!xchg(&kcore_need_update, 0))
269 goto out;
270
271 ret = kcore_ram_list(&list);
272 if (ret) {
273 /* Couldn't get the RAM list, try again next time. */
274 WRITE_ONCE(kcore_need_update, 1);
275 list_splice_tail(&list, &garbage);
276 goto out;
277 }
278
279 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
280 if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP)
281 list_move(&pos->list, &garbage);
282 }
283 list_splice_tail(&list, &kclist_head);
284
Omar Sandoval37e949b2018-08-21 21:55:09 -0700285 proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, &notes_len,
286 &data_offset);
Omar Sandovalb66fb002018-08-21 21:55:02 -0700287
288out:
289 up_write(&kclist_lock);
290 list_for_each_entry_safe(pos, tmp, &garbage, list) {
291 list_del(&pos->list);
292 kfree(pos);
293 }
294 return ret;
295}
296
Omar Sandoval37e949b2018-08-21 21:55:09 -0700297static void append_kcore_note(char *notes, size_t *i, const char *name,
298 unsigned int type, const void *desc,
299 size_t descsz)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300{
Omar Sandoval37e949b2018-08-21 21:55:09 -0700301 struct elf_note *note = (struct elf_note *)&notes[*i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Omar Sandoval37e949b2018-08-21 21:55:09 -0700303 note->n_namesz = strlen(name) + 1;
304 note->n_descsz = descsz;
305 note->n_type = type;
306 *i += sizeof(*note);
307 memcpy(&notes[*i], name, note->n_namesz);
308 *i = ALIGN(*i + note->n_namesz, 4);
309 memcpy(&notes[*i], desc, descsz);
310 *i = ALIGN(*i + descsz, 4);
311}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313static ssize_t
314read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
315{
Jiri Olsaf5beeb12016-09-08 09:57:07 +0200316 char *buf = file->private_data;
Omar Sandoval37e949b2018-08-21 21:55:09 -0700317 size_t phdrs_offset, notes_offset, data_offset;
318 size_t phdrs_len, notes_len;
319 struct kcore_list *m;
320 size_t tsz;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 int nphdr;
322 unsigned long start;
Omar Sandoval3673fb02018-08-21 21:55:06 -0700323 size_t orig_buflen = buflen;
324 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
Omar Sandoval0b172f82018-08-21 21:54:59 -0700326 down_read(&kclist_lock);
KAMEZAWA Hiroyuki678ad5d2009-09-22 16:45:51 -0700327
Omar Sandoval37e949b2018-08-21 21:55:09 -0700328 get_kcore_size(&nphdr, &phdrs_len, &notes_len, &data_offset);
329 phdrs_offset = sizeof(struct elfhdr);
330 notes_offset = phdrs_offset + phdrs_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
Omar Sandoval37e949b2018-08-21 21:55:09 -0700332 /* ELF file header. */
333 if (buflen && *fpos < sizeof(struct elfhdr)) {
334 struct elfhdr ehdr = {
335 .e_ident = {
336 [EI_MAG0] = ELFMAG0,
337 [EI_MAG1] = ELFMAG1,
338 [EI_MAG2] = ELFMAG2,
339 [EI_MAG3] = ELFMAG3,
340 [EI_CLASS] = ELF_CLASS,
341 [EI_DATA] = ELF_DATA,
342 [EI_VERSION] = EV_CURRENT,
343 [EI_OSABI] = ELF_OSABI,
344 },
345 .e_type = ET_CORE,
346 .e_machine = ELF_ARCH,
347 .e_version = EV_CURRENT,
348 .e_phoff = sizeof(struct elfhdr),
349 .e_flags = ELF_CORE_EFLAGS,
350 .e_ehsize = sizeof(struct elfhdr),
351 .e_phentsize = sizeof(struct elf_phdr),
352 .e_phnum = nphdr,
353 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
Omar Sandoval37e949b2018-08-21 21:55:09 -0700355 tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
356 if (copy_to_user(buffer, (char *)&ehdr + *fpos, tsz)) {
Omar Sandoval3673fb02018-08-21 21:55:06 -0700357 ret = -EFAULT;
358 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 }
Omar Sandoval37e949b2018-08-21 21:55:09 -0700360
361 buffer += tsz;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 buflen -= tsz;
363 *fpos += tsz;
Omar Sandoval37e949b2018-08-21 21:55:09 -0700364 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Omar Sandoval37e949b2018-08-21 21:55:09 -0700366 /* ELF program headers. */
367 if (buflen && *fpos < phdrs_offset + phdrs_len) {
368 struct elf_phdr *phdrs, *phdr;
369
370 phdrs = kzalloc(phdrs_len, GFP_KERNEL);
371 if (!phdrs) {
372 ret = -ENOMEM;
Omar Sandoval3673fb02018-08-21 21:55:06 -0700373 goto out;
Omar Sandoval37e949b2018-08-21 21:55:09 -0700374 }
375
376 phdrs[0].p_type = PT_NOTE;
377 phdrs[0].p_offset = notes_offset;
378 phdrs[0].p_filesz = notes_len;
379
380 phdr = &phdrs[1];
381 list_for_each_entry(m, &kclist_head, list) {
382 phdr->p_type = PT_LOAD;
383 phdr->p_flags = PF_R | PF_W | PF_X;
384 phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
Linus Torvaldsd207ea82018-08-26 11:25:21 -0700385 if (m->type == KCORE_REMAP)
386 phdr->p_vaddr = (size_t)m->vaddr;
387 else
388 phdr->p_vaddr = (size_t)m->addr;
389 if (m->type == KCORE_RAM || m->type == KCORE_REMAP)
Omar Sandoval37e949b2018-08-21 21:55:09 -0700390 phdr->p_paddr = __pa(m->addr);
391 else if (m->type == KCORE_TEXT)
392 phdr->p_paddr = __pa_symbol(m->addr);
393 else
394 phdr->p_paddr = (elf_addr_t)-1;
395 phdr->p_filesz = phdr->p_memsz = m->size;
396 phdr->p_align = PAGE_SIZE;
397 phdr++;
398 }
399
400 tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
401 if (copy_to_user(buffer, (char *)phdrs + *fpos - phdrs_offset,
402 tsz)) {
403 kfree(phdrs);
404 ret = -EFAULT;
405 goto out;
406 }
407 kfree(phdrs);
408
409 buffer += tsz;
410 buflen -= tsz;
411 *fpos += tsz;
412 }
413
414 /* ELF note segment. */
415 if (buflen && *fpos < notes_offset + notes_len) {
416 struct elf_prstatus prstatus = {};
417 struct elf_prpsinfo prpsinfo = {
418 .pr_sname = 'R',
419 .pr_fname = "vmlinux",
420 };
421 char *notes;
422 size_t i = 0;
423
424 strlcpy(prpsinfo.pr_psargs, saved_command_line,
425 sizeof(prpsinfo.pr_psargs));
426
427 notes = kzalloc(notes_len, GFP_KERNEL);
428 if (!notes) {
429 ret = -ENOMEM;
430 goto out;
431 }
432
433 append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus,
434 sizeof(prstatus));
435 append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo,
436 sizeof(prpsinfo));
437 append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current,
438 arch_task_struct_size);
Omar Sandoval23c85092018-08-21 21:55:20 -0700439 /*
440 * vmcoreinfo_size is mostly constant after init time, but it
441 * can be changed by crash_save_vmcoreinfo(). Racing here with a
442 * panic on another CPU before the machine goes down is insanely
443 * unlikely, but it's better to not leave potential buffer
444 * overflows lying around, regardless.
445 */
446 append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
447 vmcoreinfo_data,
448 min(vmcoreinfo_size, notes_len - i));
Omar Sandoval37e949b2018-08-21 21:55:09 -0700449
450 tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
451 if (copy_to_user(buffer, notes + *fpos - notes_offset, tsz)) {
452 kfree(notes);
453 ret = -EFAULT;
454 goto out;
455 }
456 kfree(notes);
457
458 buffer += tsz;
459 buflen -= tsz;
460 *fpos += tsz;
Omar Sandoval3673fb02018-08-21 21:55:06 -0700461 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
463 /*
464 * Check to see if our file offset matches with any of
465 * the addresses in the elf_phdr on our list.
466 */
Omar Sandoval37e949b2018-08-21 21:55:09 -0700467 start = kc_offset_to_vaddr(*fpos - data_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
469 tsz = buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Omar Sandovalbf991c22018-08-21 21:55:13 -0700471 m = NULL;
Omar Sandoval37e949b2018-08-21 21:55:09 -0700472 while (buflen) {
Omar Sandovalbf991c22018-08-21 21:55:13 -0700473 /*
474 * If this is the first iteration or the address is not within
475 * the previous entry, search for a matching entry.
476 */
477 if (!m || start < m->addr || start >= m->addr + m->size) {
478 list_for_each_entry(m, &kclist_head, list) {
479 if (start >= m->addr &&
480 start < m->addr + m->size)
481 break;
482 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Dan Carpenter4fd2c202010-03-23 13:35:42 -0700485 if (&m->list == &kclist_head) {
Omar Sandoval3673fb02018-08-21 21:55:06 -0700486 if (clear_user(buffer, tsz)) {
487 ret = -EFAULT;
488 goto out;
489 }
Dominique Martineta1b3d2f2018-09-20 12:22:35 -0700490 m = NULL; /* skip the list anchor */
Kairui Songffc85992019-03-08 11:05:08 +0800491 } else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) {
492 if (clear_user(buffer, tsz)) {
493 ret = -EFAULT;
494 goto out;
495 }
Ard Biesheuvel737326a2017-06-14 12:43:54 +0200496 } else if (m->type == KCORE_VMALLOC) {
Jiri Olsaf5beeb12016-09-08 09:57:07 +0200497 vread(buf, (char *)start, tsz);
KAMEZAWA Hiroyuki73d7c332009-09-21 17:02:35 -0700498 /* we have to zero-fill user buffer even if no read */
Omar Sandoval3673fb02018-08-21 21:55:06 -0700499 if (copy_to_user(buffer, buf, tsz)) {
500 ret = -EFAULT;
501 goto out;
502 }
Jia Zhang595dd462018-02-12 22:44:53 +0800503 } else if (m->type == KCORE_USER) {
504 /* User page is handled prior to normal kernel page: */
Omar Sandoval3673fb02018-08-21 21:55:06 -0700505 if (copy_to_user(buffer, (char *)start, tsz)) {
506 ret = -EFAULT;
507 goto out;
508 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 } else {
510 if (kern_addr_valid(start)) {
Jiri Olsadf04abf2016-09-08 09:57:08 +0200511 /*
512 * Using bounce buffer to bypass the
513 * hardened user copy kernel text checks.
514 */
Christoph Hellwigfe557312020-06-17 09:37:53 +0200515 if (copy_from_kernel_nofault(buf, (void *)start,
516 tsz)) {
Omar Sandoval3673fb02018-08-21 21:55:06 -0700517 if (clear_user(buffer, tsz)) {
518 ret = -EFAULT;
519 goto out;
520 }
Heiko Carstensd0290bc2018-02-06 15:37:13 -0800521 } else {
Omar Sandoval3673fb02018-08-21 21:55:06 -0700522 if (copy_to_user(buffer, buf, tsz)) {
523 ret = -EFAULT;
524 goto out;
525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 }
527 } else {
Omar Sandoval3673fb02018-08-21 21:55:06 -0700528 if (clear_user(buffer, tsz)) {
529 ret = -EFAULT;
530 goto out;
531 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 }
533 }
534 buflen -= tsz;
535 *fpos += tsz;
536 buffer += tsz;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 start += tsz;
538 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
539 }
540
Omar Sandoval3673fb02018-08-21 21:55:06 -0700541out:
542 up_read(&kclist_lock);
543 if (ret)
544 return ret;
545 return orig_buflen - buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546}
Alexey Dobriyan97ce5d62008-10-06 14:14:19 +0400547
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700548static int open_kcore(struct inode *inode, struct file *filp)
549{
David Howells02e935b2019-08-19 17:17:57 -0700550 int ret = security_locked_down(LOCKDOWN_KCORE);
551
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700552 if (!capable(CAP_SYS_RAWIO))
553 return -EPERM;
Jiri Olsaf5beeb12016-09-08 09:57:07 +0200554
Matthew Garrettb6026142019-08-19 17:18:05 -0700555 if (ret)
556 return ret;
557
Jiri Olsaf5beeb12016-09-08 09:57:07 +0200558 filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
559 if (!filp->private_data)
560 return -ENOMEM;
561
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700562 if (kcore_need_update)
563 kcore_update_ram();
KAMEZAWA Hiroyuki0d4c36a2009-09-22 16:45:52 -0700564 if (i_size_read(inode) != proc_root_kcore->size) {
Al Viro59551022016-01-22 15:40:57 -0500565 inode_lock(inode);
KAMEZAWA Hiroyuki0d4c36a2009-09-22 16:45:52 -0700566 i_size_write(inode, proc_root_kcore->size);
Al Viro59551022016-01-22 15:40:57 -0500567 inode_unlock(inode);
KAMEZAWA Hiroyuki0d4c36a2009-09-22 16:45:52 -0700568 }
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700569 return 0;
570}
571
Jiri Olsaf5beeb12016-09-08 09:57:07 +0200572static int release_kcore(struct inode *inode, struct file *file)
573{
574 kfree(file->private_data);
575 return 0;
576}
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700577
Alexey Dobriyan97a32532020-02-03 17:37:17 -0800578static const struct proc_ops kcore_proc_ops = {
579 .proc_read = read_kcore,
580 .proc_open = open_kcore,
581 .proc_release = release_kcore,
582 .proc_lseek = default_llseek,
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700583};
584
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700585/* just remember that we have to update kcore */
586static int __meminit kcore_callback(struct notifier_block *self,
587 unsigned long action, void *arg)
588{
589 switch (action) {
590 case MEM_ONLINE:
591 case MEM_OFFLINE:
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700592 kcore_need_update = 1;
Omar Sandovalbf531832018-08-21 21:54:55 -0700593 break;
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700594 }
595 return NOTIFY_OK;
596}
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700597
Andrew Morton3c743a72013-04-29 15:08:08 -0700598static struct notifier_block kcore_callback_nb __meminitdata = {
599 .notifier_call = kcore_callback,
600 .priority = 0,
601};
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700602
KAMEZAWA Hiroyukia0614da2009-09-22 16:45:44 -0700603static struct kcore_list kcore_vmalloc;
604
KAMEZAWA Hiroyuki94925872009-09-22 16:45:45 -0700605#ifdef CONFIG_ARCH_PROC_KCORE_TEXT
606static struct kcore_list kcore_text;
607/*
608 * If defined, special segment is used for mapping kernel text instead of
609 * direct-map area. We need to create special TEXT section.
610 */
611static void __init proc_kcore_text_init(void)
612{
Wu Fengguang36e15262010-05-26 14:43:27 -0700613 kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
KAMEZAWA Hiroyuki94925872009-09-22 16:45:45 -0700614}
615#else
616static void __init proc_kcore_text_init(void)
617{
618}
619#endif
620
KAMEZAWA Hiroyuki81ac3ad2009-09-22 16:45:49 -0700621#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
622/*
623 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
624 */
YueHaibingeebf3642019-03-28 20:44:09 -0700625static struct kcore_list kcore_modules;
KAMEZAWA Hiroyuki81ac3ad2009-09-22 16:45:49 -0700626static void __init add_modules_range(void)
627{
Baoquan Hebf3e2692014-10-09 15:25:56 -0700628 if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
629 kclist_add(&kcore_modules, (void *)MODULES_VADDR,
KAMEZAWA Hiroyuki81ac3ad2009-09-22 16:45:49 -0700630 MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
Baoquan Hebf3e2692014-10-09 15:25:56 -0700631 }
KAMEZAWA Hiroyuki81ac3ad2009-09-22 16:45:49 -0700632}
633#else
634static void __init add_modules_range(void)
635{
636}
637#endif
638
Alexey Dobriyan97ce5d62008-10-06 14:14:19 +0400639static int __init proc_kcore_init(void)
640{
Alexey Dobriyan97a32532020-02-03 17:37:17 -0800641 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops);
KAMEZAWA Hiroyuki90396f92009-09-22 16:45:50 -0700642 if (!proc_root_kcore) {
Andrew Morton87ebdc02013-02-27 17:03:16 -0800643 pr_err("couldn't create /proc/kcore\n");
KAMEZAWA Hiroyuki90396f92009-09-22 16:45:50 -0700644 return 0; /* Always returns 0. */
645 }
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700646 /* Store text area if it's special */
KAMEZAWA Hiroyuki94925872009-09-22 16:45:45 -0700647 proc_kcore_text_init();
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700648 /* Store vmalloc area */
KAMEZAWA Hiroyukia0614da2009-09-22 16:45:44 -0700649 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
650 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
KAMEZAWA Hiroyuki81ac3ad2009-09-22 16:45:49 -0700651 add_modules_range();
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700652 /* Store direct-map area from physical memory map */
653 kcore_update_ram();
Andrew Morton3c743a72013-04-29 15:08:08 -0700654 register_hotmemory_notifier(&kcore_callback_nb);
KAMEZAWA Hiroyuki3089aa12009-09-22 16:45:48 -0700655
Alexey Dobriyan97ce5d62008-10-06 14:14:19 +0400656 return 0;
657}
Paul Gortmakerabaf3782014-01-23 15:55:45 -0800658fs_initcall(proc_kcore_init);