Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * fs/proc/kcore.c kernel ELF core dumper |
| 4 | * |
| 5 | * Modelled on fs/exec.c:aout_core_dump() |
| 6 | * Jeremy Fitzhardinge <jeremy@sw.oz.au> |
| 7 | * ELF version written by David Howells <David.Howells@nexor.co.uk> |
| 8 | * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com> |
| 9 | * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com> |
| 10 | * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com> |
| 11 | */ |
| 12 | |
Omar Sandoval | 23c8509 | 2018-08-21 21:55:20 -0700 | [diff] [blame] | 13 | #include <linux/crash_core.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/mm.h> |
| 15 | #include <linux/proc_fs.h> |
David Howells | 2f96b8c | 2013-04-12 00:10:25 +0100 | [diff] [blame] | 16 | #include <linux/kcore.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/user.h> |
Randy Dunlap | 16f7e0f | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 18 | #include <linux/capability.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/elf.h> |
| 20 | #include <linux/elfcore.h> |
Andrew Morton | 3c743a7 | 2013-04-29 15:08:08 -0700 | [diff] [blame] | 21 | #include <linux/notifier.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/vmalloc.h> |
| 23 | #include <linux/highmem.h> |
Andrew Morton | 87ebdc0 | 2013-02-27 17:03:16 -0800 | [diff] [blame] | 24 | #include <linux/printk.h> |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 25 | #include <linux/memblock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/init.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 27 | #include <linux/slab.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 28 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <asm/io.h> |
KAMEZAWA Hiroyuki | 2ef43ec | 2009-09-22 16:45:41 -0700 | [diff] [blame] | 30 | #include <linux/list.h> |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 31 | #include <linux/ioport.h> |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 32 | #include <linux/memory.h> |
Ingo Molnar | 2993002 | 2017-02-08 18:51:36 +0100 | [diff] [blame] | 33 | #include <linux/sched/task.h> |
KAMEZAWA Hiroyuki | 9492587 | 2009-09-22 16:45:45 -0700 | [diff] [blame] | 34 | #include <asm/sections.h> |
David Howells | 59d8053 | 2013-04-11 13:34:43 +0100 | [diff] [blame] | 35 | #include "internal.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Magnus Damm | 3602760 | 2006-12-06 20:38:00 -0800 | [diff] [blame] | 37 | #define CORE_STR "CORE" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
Edgar E. Iglesias | 79885b2 | 2008-07-25 01:48:10 -0700 | [diff] [blame] | 39 | #ifndef ELF_CORE_EFLAGS |
| 40 | #define ELF_CORE_EFLAGS 0 |
| 41 | #endif |
| 42 | |
Alexey Dobriyan | 97ce5d6 | 2008-10-06 14:14:19 +0400 | [diff] [blame] | 43 | static struct proc_dir_entry *proc_root_kcore; |
| 44 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
| 46 | #ifndef kc_vaddr_to_offset |
| 47 | #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET) |
| 48 | #endif |
| 49 | #ifndef kc_offset_to_vaddr |
| 50 | #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET) |
| 51 | #endif |
| 52 | |
KAMEZAWA Hiroyuki | 2ef43ec | 2009-09-22 16:45:41 -0700 | [diff] [blame] | 53 | static LIST_HEAD(kclist_head); |
Omar Sandoval | 0b172f8 | 2018-08-21 21:54:59 -0700 | [diff] [blame] | 54 | static DECLARE_RWSEM(kclist_lock); |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 55 | static int kcore_need_update = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | |
Omar Sandoval | a8dd9c4 | 2018-08-21 21:54:51 -0700 | [diff] [blame] | 57 | /* This doesn't grab kclist_lock, so it should only be used at init time. */ |
| 58 | void __init kclist_add(struct kcore_list *new, void *addr, size_t size, |
| 59 | int type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | { |
| 61 | new->addr = (unsigned long)addr; |
| 62 | new->size = size; |
KAMEZAWA Hiroyuki | c30bb2a | 2009-09-22 16:45:43 -0700 | [diff] [blame] | 63 | new->type = type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
KAMEZAWA Hiroyuki | 2ef43ec | 2009-09-22 16:45:41 -0700 | [diff] [blame] | 65 | list_add_tail(&new->list, &kclist_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | } |
| 67 | |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 68 | static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len, |
| 69 | size_t *data_offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | { |
| 71 | size_t try, size; |
| 72 | struct kcore_list *m; |
| 73 | |
| 74 | *nphdr = 1; /* PT_NOTE */ |
| 75 | size = 0; |
| 76 | |
KAMEZAWA Hiroyuki | 2ef43ec | 2009-09-22 16:45:41 -0700 | [diff] [blame] | 77 | list_for_each_entry(m, &kclist_head, list) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | try = kc_vaddr_to_offset((size_t)m->addr + m->size); |
| 79 | if (try > size) |
| 80 | size = try; |
| 81 | *nphdr = *nphdr + 1; |
| 82 | } |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 83 | |
| 84 | *phdrs_len = *nphdr * sizeof(struct elf_phdr); |
Omar Sandoval | 23c8509 | 2018-08-21 21:55:20 -0700 | [diff] [blame] | 85 | *notes_len = (4 * sizeof(struct elf_note) + |
| 86 | 3 * ALIGN(sizeof(CORE_STR), 4) + |
| 87 | VMCOREINFO_NOTE_NAME_BYTES + |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 88 | ALIGN(sizeof(struct elf_prstatus), 4) + |
| 89 | ALIGN(sizeof(struct elf_prpsinfo), 4) + |
Omar Sandoval | 23c8509 | 2018-08-21 21:55:20 -0700 | [diff] [blame] | 90 | ALIGN(arch_task_struct_size, 4) + |
| 91 | ALIGN(vmcoreinfo_size, 4)); |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 92 | *data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len + |
| 93 | *notes_len); |
| 94 | return *data_offset + size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | } |
| 96 | |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 97 | #ifdef CONFIG_HIGHMEM |
| 98 | /* |
| 99 | * If no highmem, we can assume [0...max_low_pfn) continuous range of memory |
| 100 | * because memory hole is not as big as !HIGHMEM case. |
| 101 | * (HIGHMEM is special because part of memory is _invisible_ from the kernel.) |
| 102 | */ |
Omar Sandoval | b66fb00 | 2018-08-21 21:55:02 -0700 | [diff] [blame] | 103 | static int kcore_ram_list(struct list_head *head) |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 104 | { |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 105 | struct kcore_list *ent; |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 106 | |
| 107 | ent = kmalloc(sizeof(*ent), GFP_KERNEL); |
| 108 | if (!ent) |
| 109 | return -ENOMEM; |
| 110 | ent->addr = (unsigned long)__va(0); |
| 111 | ent->size = max_low_pfn << PAGE_SHIFT; |
| 112 | ent->type = KCORE_RAM; |
Omar Sandoval | b66fb00 | 2018-08-21 21:55:02 -0700 | [diff] [blame] | 113 | list_add(&ent->list, head); |
| 114 | return 0; |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 115 | } |
| 116 | |
| 117 | #else /* !CONFIG_HIGHMEM */ |
| 118 | |
KAMEZAWA Hiroyuki | 26562c5 | 2009-09-22 16:45:49 -0700 | [diff] [blame] | 119 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 120 | /* calculate vmemmap's address from given system ram pfn and register it */ |
Djalal Harouni | b908243 | 2012-03-23 15:02:52 -0700 | [diff] [blame] | 121 | static int |
| 122 | get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) |
KAMEZAWA Hiroyuki | 26562c5 | 2009-09-22 16:45:49 -0700 | [diff] [blame] | 123 | { |
| 124 | unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT; |
| 125 | unsigned long nr_pages = ent->size >> PAGE_SHIFT; |
| 126 | unsigned long start, end; |
| 127 | struct kcore_list *vmm, *tmp; |
| 128 | |
| 129 | |
| 130 | start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK; |
| 131 | end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1; |
Fabian Frederick | 108a8a1 | 2014-08-08 14:21:20 -0700 | [diff] [blame] | 132 | end = PAGE_ALIGN(end); |
KAMEZAWA Hiroyuki | 26562c5 | 2009-09-22 16:45:49 -0700 | [diff] [blame] | 133 | /* overlap check (because we have to align page */ |
| 134 | list_for_each_entry(tmp, head, list) { |
| 135 | if (tmp->type != KCORE_VMEMMAP) |
| 136 | continue; |
| 137 | if (start < tmp->addr + tmp->size) |
| 138 | if (end > tmp->addr) |
| 139 | end = tmp->addr; |
| 140 | } |
| 141 | if (start < end) { |
| 142 | vmm = kmalloc(sizeof(*vmm), GFP_KERNEL); |
| 143 | if (!vmm) |
| 144 | return 0; |
| 145 | vmm->addr = start; |
| 146 | vmm->size = end - start; |
| 147 | vmm->type = KCORE_VMEMMAP; |
| 148 | list_add_tail(&vmm->list, head); |
| 149 | } |
| 150 | return 1; |
| 151 | |
| 152 | } |
| 153 | #else |
Djalal Harouni | b908243 | 2012-03-23 15:02:52 -0700 | [diff] [blame] | 154 | static int |
| 155 | get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) |
KAMEZAWA Hiroyuki | 26562c5 | 2009-09-22 16:45:49 -0700 | [diff] [blame] | 156 | { |
| 157 | return 1; |
| 158 | } |
| 159 | |
| 160 | #endif |
| 161 | |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 162 | static int |
| 163 | kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg) |
| 164 | { |
| 165 | struct list_head *head = (struct list_head *)arg; |
| 166 | struct kcore_list *ent; |
Laura Abbott | 3955333 | 2018-05-11 16:01:57 -0700 | [diff] [blame] | 167 | struct page *p; |
| 168 | |
| 169 | if (!pfn_valid(pfn)) |
| 170 | return 1; |
| 171 | |
| 172 | p = pfn_to_page(pfn); |
| 173 | if (!memmap_valid_within(pfn, p, page_zone(p))) |
| 174 | return 1; |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 175 | |
| 176 | ent = kmalloc(sizeof(*ent), GFP_KERNEL); |
| 177 | if (!ent) |
| 178 | return -ENOMEM; |
Laura Abbott | 3955333 | 2018-05-11 16:01:57 -0700 | [diff] [blame] | 179 | ent->addr = (unsigned long)page_to_virt(p); |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 180 | ent->size = nr_pages << PAGE_SHIFT; |
| 181 | |
Laura Abbott | 3955333 | 2018-05-11 16:01:57 -0700 | [diff] [blame] | 182 | if (!virt_addr_valid(ent->addr)) |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 183 | goto free_out; |
| 184 | |
| 185 | /* cut not-mapped area. ....from ppc-32 code. */ |
| 186 | if (ULONG_MAX - ent->addr < ent->size) |
| 187 | ent->size = ULONG_MAX - ent->addr; |
| 188 | |
Laura Abbott | 3955333 | 2018-05-11 16:01:57 -0700 | [diff] [blame] | 189 | /* |
| 190 | * We've already checked virt_addr_valid so we know this address |
| 191 | * is a valid pointer, therefore we can check against it to determine |
| 192 | * if we need to trim |
| 193 | */ |
| 194 | if (VMALLOC_START > ent->addr) { |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 195 | if (VMALLOC_START - ent->addr < ent->size) |
| 196 | ent->size = VMALLOC_START - ent->addr; |
| 197 | } |
| 198 | |
| 199 | ent->type = KCORE_RAM; |
| 200 | list_add_tail(&ent->list, head); |
KAMEZAWA Hiroyuki | 26562c5 | 2009-09-22 16:45:49 -0700 | [diff] [blame] | 201 | |
| 202 | if (!get_sparsemem_vmemmap_info(ent, head)) { |
| 203 | list_del(&ent->list); |
| 204 | goto free_out; |
| 205 | } |
| 206 | |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 207 | return 0; |
| 208 | free_out: |
| 209 | kfree(ent); |
| 210 | return 1; |
| 211 | } |
| 212 | |
Omar Sandoval | b66fb00 | 2018-08-21 21:55:02 -0700 | [diff] [blame] | 213 | static int kcore_ram_list(struct list_head *list) |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 214 | { |
| 215 | int nid, ret; |
| 216 | unsigned long end_pfn; |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 217 | |
| 218 | /* Not inialized....update now */ |
| 219 | /* find out "max pfn" */ |
| 220 | end_pfn = 0; |
Lai Jiangshan | 4ff1b2c | 2012-12-12 13:51:25 -0800 | [diff] [blame] | 221 | for_each_node_state(nid, N_MEMORY) { |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 222 | unsigned long node_end; |
Xishi Qiu | 83285c7 | 2013-11-12 15:07:19 -0800 | [diff] [blame] | 223 | node_end = node_end_pfn(nid); |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 224 | if (end_pfn < node_end) |
| 225 | end_pfn = node_end; |
| 226 | } |
| 227 | /* scan 0 to max_pfn */ |
Omar Sandoval | b66fb00 | 2018-08-21 21:55:02 -0700 | [diff] [blame] | 228 | ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private); |
| 229 | if (ret) |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 230 | return -ENOMEM; |
Omar Sandoval | b66fb00 | 2018-08-21 21:55:02 -0700 | [diff] [blame] | 231 | return 0; |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 232 | } |
| 233 | #endif /* CONFIG_HIGHMEM */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | |
Omar Sandoval | b66fb00 | 2018-08-21 21:55:02 -0700 | [diff] [blame] | 235 | static int kcore_update_ram(void) |
| 236 | { |
| 237 | LIST_HEAD(list); |
| 238 | LIST_HEAD(garbage); |
| 239 | int nphdr; |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 240 | size_t phdrs_len, notes_len, data_offset; |
Omar Sandoval | b66fb00 | 2018-08-21 21:55:02 -0700 | [diff] [blame] | 241 | struct kcore_list *tmp, *pos; |
| 242 | int ret = 0; |
| 243 | |
| 244 | down_write(&kclist_lock); |
| 245 | if (!xchg(&kcore_need_update, 0)) |
| 246 | goto out; |
| 247 | |
| 248 | ret = kcore_ram_list(&list); |
| 249 | if (ret) { |
| 250 | /* Couldn't get the RAM list, try again next time. */ |
| 251 | WRITE_ONCE(kcore_need_update, 1); |
| 252 | list_splice_tail(&list, &garbage); |
| 253 | goto out; |
| 254 | } |
| 255 | |
| 256 | list_for_each_entry_safe(pos, tmp, &kclist_head, list) { |
| 257 | if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP) |
| 258 | list_move(&pos->list, &garbage); |
| 259 | } |
| 260 | list_splice_tail(&list, &kclist_head); |
| 261 | |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 262 | proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, ¬es_len, |
| 263 | &data_offset); |
Omar Sandoval | b66fb00 | 2018-08-21 21:55:02 -0700 | [diff] [blame] | 264 | |
| 265 | out: |
| 266 | up_write(&kclist_lock); |
| 267 | list_for_each_entry_safe(pos, tmp, &garbage, list) { |
| 268 | list_del(&pos->list); |
| 269 | kfree(pos); |
| 270 | } |
| 271 | return ret; |
| 272 | } |
| 273 | |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 274 | static void append_kcore_note(char *notes, size_t *i, const char *name, |
| 275 | unsigned int type, const void *desc, |
| 276 | size_t descsz) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | { |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 278 | struct elf_note *note = (struct elf_note *)¬es[*i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 280 | note->n_namesz = strlen(name) + 1; |
| 281 | note->n_descsz = descsz; |
| 282 | note->n_type = type; |
| 283 | *i += sizeof(*note); |
| 284 | memcpy(¬es[*i], name, note->n_namesz); |
| 285 | *i = ALIGN(*i + note->n_namesz, 4); |
| 286 | memcpy(¬es[*i], desc, descsz); |
| 287 | *i = ALIGN(*i + descsz, 4); |
| 288 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | static ssize_t |
| 291 | read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) |
| 292 | { |
Jiri Olsa | f5beeb1 | 2016-09-08 09:57:07 +0200 | [diff] [blame] | 293 | char *buf = file->private_data; |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 294 | size_t phdrs_offset, notes_offset, data_offset; |
| 295 | size_t phdrs_len, notes_len; |
| 296 | struct kcore_list *m; |
| 297 | size_t tsz; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | int nphdr; |
| 299 | unsigned long start; |
Omar Sandoval | 3673fb0 | 2018-08-21 21:55:06 -0700 | [diff] [blame] | 300 | size_t orig_buflen = buflen; |
| 301 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | |
Omar Sandoval | 0b172f8 | 2018-08-21 21:54:59 -0700 | [diff] [blame] | 303 | down_read(&kclist_lock); |
KAMEZAWA Hiroyuki | 678ad5d | 2009-09-22 16:45:51 -0700 | [diff] [blame] | 304 | |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 305 | get_kcore_size(&nphdr, &phdrs_len, ¬es_len, &data_offset); |
| 306 | phdrs_offset = sizeof(struct elfhdr); |
| 307 | notes_offset = phdrs_offset + phdrs_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 309 | /* ELF file header. */ |
| 310 | if (buflen && *fpos < sizeof(struct elfhdr)) { |
| 311 | struct elfhdr ehdr = { |
| 312 | .e_ident = { |
| 313 | [EI_MAG0] = ELFMAG0, |
| 314 | [EI_MAG1] = ELFMAG1, |
| 315 | [EI_MAG2] = ELFMAG2, |
| 316 | [EI_MAG3] = ELFMAG3, |
| 317 | [EI_CLASS] = ELF_CLASS, |
| 318 | [EI_DATA] = ELF_DATA, |
| 319 | [EI_VERSION] = EV_CURRENT, |
| 320 | [EI_OSABI] = ELF_OSABI, |
| 321 | }, |
| 322 | .e_type = ET_CORE, |
| 323 | .e_machine = ELF_ARCH, |
| 324 | .e_version = EV_CURRENT, |
| 325 | .e_phoff = sizeof(struct elfhdr), |
| 326 | .e_flags = ELF_CORE_EFLAGS, |
| 327 | .e_ehsize = sizeof(struct elfhdr), |
| 328 | .e_phentsize = sizeof(struct elf_phdr), |
| 329 | .e_phnum = nphdr, |
| 330 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 332 | tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos); |
| 333 | if (copy_to_user(buffer, (char *)&ehdr + *fpos, tsz)) { |
Omar Sandoval | 3673fb0 | 2018-08-21 21:55:06 -0700 | [diff] [blame] | 334 | ret = -EFAULT; |
| 335 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | } |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 337 | |
| 338 | buffer += tsz; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | buflen -= tsz; |
| 340 | *fpos += tsz; |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 341 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 343 | /* ELF program headers. */ |
| 344 | if (buflen && *fpos < phdrs_offset + phdrs_len) { |
| 345 | struct elf_phdr *phdrs, *phdr; |
| 346 | |
| 347 | phdrs = kzalloc(phdrs_len, GFP_KERNEL); |
| 348 | if (!phdrs) { |
| 349 | ret = -ENOMEM; |
Omar Sandoval | 3673fb0 | 2018-08-21 21:55:06 -0700 | [diff] [blame] | 350 | goto out; |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 351 | } |
| 352 | |
| 353 | phdrs[0].p_type = PT_NOTE; |
| 354 | phdrs[0].p_offset = notes_offset; |
| 355 | phdrs[0].p_filesz = notes_len; |
| 356 | |
| 357 | phdr = &phdrs[1]; |
| 358 | list_for_each_entry(m, &kclist_head, list) { |
| 359 | phdr->p_type = PT_LOAD; |
| 360 | phdr->p_flags = PF_R | PF_W | PF_X; |
| 361 | phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset; |
Linus Torvalds | d207ea8 | 2018-08-26 11:25:21 -0700 | [diff] [blame] | 362 | if (m->type == KCORE_REMAP) |
| 363 | phdr->p_vaddr = (size_t)m->vaddr; |
| 364 | else |
| 365 | phdr->p_vaddr = (size_t)m->addr; |
| 366 | if (m->type == KCORE_RAM || m->type == KCORE_REMAP) |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 367 | phdr->p_paddr = __pa(m->addr); |
| 368 | else if (m->type == KCORE_TEXT) |
| 369 | phdr->p_paddr = __pa_symbol(m->addr); |
| 370 | else |
| 371 | phdr->p_paddr = (elf_addr_t)-1; |
| 372 | phdr->p_filesz = phdr->p_memsz = m->size; |
| 373 | phdr->p_align = PAGE_SIZE; |
| 374 | phdr++; |
| 375 | } |
| 376 | |
| 377 | tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos); |
| 378 | if (copy_to_user(buffer, (char *)phdrs + *fpos - phdrs_offset, |
| 379 | tsz)) { |
| 380 | kfree(phdrs); |
| 381 | ret = -EFAULT; |
| 382 | goto out; |
| 383 | } |
| 384 | kfree(phdrs); |
| 385 | |
| 386 | buffer += tsz; |
| 387 | buflen -= tsz; |
| 388 | *fpos += tsz; |
| 389 | } |
| 390 | |
| 391 | /* ELF note segment. */ |
| 392 | if (buflen && *fpos < notes_offset + notes_len) { |
| 393 | struct elf_prstatus prstatus = {}; |
| 394 | struct elf_prpsinfo prpsinfo = { |
| 395 | .pr_sname = 'R', |
| 396 | .pr_fname = "vmlinux", |
| 397 | }; |
| 398 | char *notes; |
| 399 | size_t i = 0; |
| 400 | |
| 401 | strlcpy(prpsinfo.pr_psargs, saved_command_line, |
| 402 | sizeof(prpsinfo.pr_psargs)); |
| 403 | |
| 404 | notes = kzalloc(notes_len, GFP_KERNEL); |
| 405 | if (!notes) { |
| 406 | ret = -ENOMEM; |
| 407 | goto out; |
| 408 | } |
| 409 | |
| 410 | append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus, |
| 411 | sizeof(prstatus)); |
| 412 | append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo, |
| 413 | sizeof(prpsinfo)); |
| 414 | append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current, |
| 415 | arch_task_struct_size); |
Omar Sandoval | 23c8509 | 2018-08-21 21:55:20 -0700 | [diff] [blame] | 416 | /* |
| 417 | * vmcoreinfo_size is mostly constant after init time, but it |
| 418 | * can be changed by crash_save_vmcoreinfo(). Racing here with a |
| 419 | * panic on another CPU before the machine goes down is insanely |
| 420 | * unlikely, but it's better to not leave potential buffer |
| 421 | * overflows lying around, regardless. |
| 422 | */ |
| 423 | append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0, |
| 424 | vmcoreinfo_data, |
| 425 | min(vmcoreinfo_size, notes_len - i)); |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 426 | |
| 427 | tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos); |
| 428 | if (copy_to_user(buffer, notes + *fpos - notes_offset, tsz)) { |
| 429 | kfree(notes); |
| 430 | ret = -EFAULT; |
| 431 | goto out; |
| 432 | } |
| 433 | kfree(notes); |
| 434 | |
| 435 | buffer += tsz; |
| 436 | buflen -= tsz; |
| 437 | *fpos += tsz; |
Omar Sandoval | 3673fb0 | 2018-08-21 21:55:06 -0700 | [diff] [blame] | 438 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | |
| 440 | /* |
| 441 | * Check to see if our file offset matches with any of |
| 442 | * the addresses in the elf_phdr on our list. |
| 443 | */ |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 444 | start = kc_offset_to_vaddr(*fpos - data_offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) |
| 446 | tsz = buflen; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | |
Omar Sandoval | bf991c2 | 2018-08-21 21:55:13 -0700 | [diff] [blame] | 448 | m = NULL; |
Omar Sandoval | 37e949b | 2018-08-21 21:55:09 -0700 | [diff] [blame] | 449 | while (buflen) { |
Omar Sandoval | bf991c2 | 2018-08-21 21:55:13 -0700 | [diff] [blame] | 450 | /* |
| 451 | * If this is the first iteration or the address is not within |
| 452 | * the previous entry, search for a matching entry. |
| 453 | */ |
| 454 | if (!m || start < m->addr || start >= m->addr + m->size) { |
| 455 | list_for_each_entry(m, &kclist_head, list) { |
| 456 | if (start >= m->addr && |
| 457 | start < m->addr + m->size) |
| 458 | break; |
| 459 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | |
Dan Carpenter | 4fd2c20 | 2010-03-23 13:35:42 -0700 | [diff] [blame] | 462 | if (&m->list == &kclist_head) { |
Omar Sandoval | 3673fb0 | 2018-08-21 21:55:06 -0700 | [diff] [blame] | 463 | if (clear_user(buffer, tsz)) { |
| 464 | ret = -EFAULT; |
| 465 | goto out; |
| 466 | } |
Dominique Martinet | a1b3d2f | 2018-09-20 12:22:35 -0700 | [diff] [blame] | 467 | m = NULL; /* skip the list anchor */ |
Ard Biesheuvel | 737326a | 2017-06-14 12:43:54 +0200 | [diff] [blame] | 468 | } else if (m->type == KCORE_VMALLOC) { |
Jiri Olsa | f5beeb1 | 2016-09-08 09:57:07 +0200 | [diff] [blame] | 469 | vread(buf, (char *)start, tsz); |
KAMEZAWA Hiroyuki | 73d7c33 | 2009-09-21 17:02:35 -0700 | [diff] [blame] | 470 | /* we have to zero-fill user buffer even if no read */ |
Omar Sandoval | 3673fb0 | 2018-08-21 21:55:06 -0700 | [diff] [blame] | 471 | if (copy_to_user(buffer, buf, tsz)) { |
| 472 | ret = -EFAULT; |
| 473 | goto out; |
| 474 | } |
Jia Zhang | 595dd46 | 2018-02-12 22:44:53 +0800 | [diff] [blame] | 475 | } else if (m->type == KCORE_USER) { |
| 476 | /* User page is handled prior to normal kernel page: */ |
Omar Sandoval | 3673fb0 | 2018-08-21 21:55:06 -0700 | [diff] [blame] | 477 | if (copy_to_user(buffer, (char *)start, tsz)) { |
| 478 | ret = -EFAULT; |
| 479 | goto out; |
| 480 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | } else { |
| 482 | if (kern_addr_valid(start)) { |
Jiri Olsa | df04abf | 2016-09-08 09:57:08 +0200 | [diff] [blame] | 483 | /* |
| 484 | * Using bounce buffer to bypass the |
| 485 | * hardened user copy kernel text checks. |
| 486 | */ |
Heiko Carstens | d0290bc | 2018-02-06 15:37:13 -0800 | [diff] [blame] | 487 | if (probe_kernel_read(buf, (void *) start, tsz)) { |
Omar Sandoval | 3673fb0 | 2018-08-21 21:55:06 -0700 | [diff] [blame] | 488 | if (clear_user(buffer, tsz)) { |
| 489 | ret = -EFAULT; |
| 490 | goto out; |
| 491 | } |
Heiko Carstens | d0290bc | 2018-02-06 15:37:13 -0800 | [diff] [blame] | 492 | } else { |
Omar Sandoval | 3673fb0 | 2018-08-21 21:55:06 -0700 | [diff] [blame] | 493 | if (copy_to_user(buffer, buf, tsz)) { |
| 494 | ret = -EFAULT; |
| 495 | goto out; |
| 496 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | } |
| 498 | } else { |
Omar Sandoval | 3673fb0 | 2018-08-21 21:55:06 -0700 | [diff] [blame] | 499 | if (clear_user(buffer, tsz)) { |
| 500 | ret = -EFAULT; |
| 501 | goto out; |
| 502 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | } |
| 504 | } |
| 505 | buflen -= tsz; |
| 506 | *fpos += tsz; |
| 507 | buffer += tsz; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | start += tsz; |
| 509 | tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen); |
| 510 | } |
| 511 | |
Omar Sandoval | 3673fb0 | 2018-08-21 21:55:06 -0700 | [diff] [blame] | 512 | out: |
| 513 | up_read(&kclist_lock); |
| 514 | if (ret) |
| 515 | return ret; |
| 516 | return orig_buflen - buflen; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | } |
Alexey Dobriyan | 97ce5d6 | 2008-10-06 14:14:19 +0400 | [diff] [blame] | 518 | |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 519 | static int open_kcore(struct inode *inode, struct file *filp) |
| 520 | { |
| 521 | if (!capable(CAP_SYS_RAWIO)) |
| 522 | return -EPERM; |
Jiri Olsa | f5beeb1 | 2016-09-08 09:57:07 +0200 | [diff] [blame] | 523 | |
| 524 | filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL); |
| 525 | if (!filp->private_data) |
| 526 | return -ENOMEM; |
| 527 | |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 528 | if (kcore_need_update) |
| 529 | kcore_update_ram(); |
KAMEZAWA Hiroyuki | 0d4c36a | 2009-09-22 16:45:52 -0700 | [diff] [blame] | 530 | if (i_size_read(inode) != proc_root_kcore->size) { |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 531 | inode_lock(inode); |
KAMEZAWA Hiroyuki | 0d4c36a | 2009-09-22 16:45:52 -0700 | [diff] [blame] | 532 | i_size_write(inode, proc_root_kcore->size); |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 533 | inode_unlock(inode); |
KAMEZAWA Hiroyuki | 0d4c36a | 2009-09-22 16:45:52 -0700 | [diff] [blame] | 534 | } |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 535 | return 0; |
| 536 | } |
| 537 | |
Jiri Olsa | f5beeb1 | 2016-09-08 09:57:07 +0200 | [diff] [blame] | 538 | static int release_kcore(struct inode *inode, struct file *file) |
| 539 | { |
| 540 | kfree(file->private_data); |
| 541 | return 0; |
| 542 | } |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 543 | |
| 544 | static const struct file_operations proc_kcore_operations = { |
| 545 | .read = read_kcore, |
| 546 | .open = open_kcore, |
Jiri Olsa | f5beeb1 | 2016-09-08 09:57:07 +0200 | [diff] [blame] | 547 | .release = release_kcore, |
Dave Anderson | ceff1a7 | 2011-01-12 17:00:36 -0800 | [diff] [blame] | 548 | .llseek = default_llseek, |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 549 | }; |
| 550 | |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 551 | /* just remember that we have to update kcore */ |
| 552 | static int __meminit kcore_callback(struct notifier_block *self, |
| 553 | unsigned long action, void *arg) |
| 554 | { |
| 555 | switch (action) { |
| 556 | case MEM_ONLINE: |
| 557 | case MEM_OFFLINE: |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 558 | kcore_need_update = 1; |
Omar Sandoval | bf53183 | 2018-08-21 21:54:55 -0700 | [diff] [blame] | 559 | break; |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 560 | } |
| 561 | return NOTIFY_OK; |
| 562 | } |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 563 | |
Andrew Morton | 3c743a7 | 2013-04-29 15:08:08 -0700 | [diff] [blame] | 564 | static struct notifier_block kcore_callback_nb __meminitdata = { |
| 565 | .notifier_call = kcore_callback, |
| 566 | .priority = 0, |
| 567 | }; |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 568 | |
KAMEZAWA Hiroyuki | a0614da | 2009-09-22 16:45:44 -0700 | [diff] [blame] | 569 | static struct kcore_list kcore_vmalloc; |
| 570 | |
KAMEZAWA Hiroyuki | 9492587 | 2009-09-22 16:45:45 -0700 | [diff] [blame] | 571 | #ifdef CONFIG_ARCH_PROC_KCORE_TEXT |
| 572 | static struct kcore_list kcore_text; |
| 573 | /* |
| 574 | * If defined, special segment is used for mapping kernel text instead of |
| 575 | * direct-map area. We need to create special TEXT section. |
| 576 | */ |
| 577 | static void __init proc_kcore_text_init(void) |
| 578 | { |
Wu Fengguang | 36e1526 | 2010-05-26 14:43:27 -0700 | [diff] [blame] | 579 | kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT); |
KAMEZAWA Hiroyuki | 9492587 | 2009-09-22 16:45:45 -0700 | [diff] [blame] | 580 | } |
| 581 | #else |
| 582 | static void __init proc_kcore_text_init(void) |
| 583 | { |
| 584 | } |
| 585 | #endif |
| 586 | |
KAMEZAWA Hiroyuki | 81ac3ad | 2009-09-22 16:45:49 -0700 | [diff] [blame] | 587 | #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) |
| 588 | /* |
| 589 | * MODULES_VADDR has no intersection with VMALLOC_ADDR. |
| 590 | */ |
| 591 | struct kcore_list kcore_modules; |
| 592 | static void __init add_modules_range(void) |
| 593 | { |
Baoquan He | bf3e269 | 2014-10-09 15:25:56 -0700 | [diff] [blame] | 594 | if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) { |
| 595 | kclist_add(&kcore_modules, (void *)MODULES_VADDR, |
KAMEZAWA Hiroyuki | 81ac3ad | 2009-09-22 16:45:49 -0700 | [diff] [blame] | 596 | MODULES_END - MODULES_VADDR, KCORE_VMALLOC); |
Baoquan He | bf3e269 | 2014-10-09 15:25:56 -0700 | [diff] [blame] | 597 | } |
KAMEZAWA Hiroyuki | 81ac3ad | 2009-09-22 16:45:49 -0700 | [diff] [blame] | 598 | } |
| 599 | #else |
| 600 | static void __init add_modules_range(void) |
| 601 | { |
| 602 | } |
| 603 | #endif |
| 604 | |
Alexey Dobriyan | 97ce5d6 | 2008-10-06 14:14:19 +0400 | [diff] [blame] | 605 | static int __init proc_kcore_init(void) |
| 606 | { |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 607 | proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, |
| 608 | &proc_kcore_operations); |
KAMEZAWA Hiroyuki | 90396f9 | 2009-09-22 16:45:50 -0700 | [diff] [blame] | 609 | if (!proc_root_kcore) { |
Andrew Morton | 87ebdc0 | 2013-02-27 17:03:16 -0800 | [diff] [blame] | 610 | pr_err("couldn't create /proc/kcore\n"); |
KAMEZAWA Hiroyuki | 90396f9 | 2009-09-22 16:45:50 -0700 | [diff] [blame] | 611 | return 0; /* Always returns 0. */ |
| 612 | } |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 613 | /* Store text area if it's special */ |
KAMEZAWA Hiroyuki | 9492587 | 2009-09-22 16:45:45 -0700 | [diff] [blame] | 614 | proc_kcore_text_init(); |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 615 | /* Store vmalloc area */ |
KAMEZAWA Hiroyuki | a0614da | 2009-09-22 16:45:44 -0700 | [diff] [blame] | 616 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, |
| 617 | VMALLOC_END - VMALLOC_START, KCORE_VMALLOC); |
KAMEZAWA Hiroyuki | 81ac3ad | 2009-09-22 16:45:49 -0700 | [diff] [blame] | 618 | add_modules_range(); |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 619 | /* Store direct-map area from physical memory map */ |
| 620 | kcore_update_ram(); |
Andrew Morton | 3c743a7 | 2013-04-29 15:08:08 -0700 | [diff] [blame] | 621 | register_hotmemory_notifier(&kcore_callback_nb); |
KAMEZAWA Hiroyuki | 3089aa1 | 2009-09-22 16:45:48 -0700 | [diff] [blame] | 622 | |
Alexey Dobriyan | 97ce5d6 | 2008-10-06 14:14:19 +0400 | [diff] [blame] | 623 | return 0; |
| 624 | } |
Paul Gortmaker | abaf378 | 2014-01-23 15:55:45 -0800 | [diff] [blame] | 625 | fs_initcall(proc_kcore_init); |