blob: cf956edf05eff409380e9eaa4311c4685aee5ce8 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/fs/binfmt_elf.c
4 *
5 * These are the functions used to load ELF format executables as used
6 * on SVr4 machines. Information on the format may be found in the book
7 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
8 * Tools".
9 *
10 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/fs.h>
Chris Kennellyce81bb22020-10-15 20:12:32 -070016#include <linux/log2.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/mm.h>
18#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/errno.h>
20#include <linux/signal.h>
21#include <linux/binfmts.h>
22#include <linux/string.h>
23#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/personality.h>
26#include <linux/elfcore.h>
27#include <linux/init.h>
28#include <linux/highuid.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/compiler.h>
30#include <linux/highmem.h>
Anshuman Khandual03911132020-04-06 20:03:51 -070031#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/pagemap.h>
Denys Vlasenko2aa362c2012-10-04 17:15:36 -070033#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/security.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/random.h>
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070036#include <linux/elf.h>
Kees Cookd1fd8362015-04-14 15:48:07 -070037#include <linux/elf-randomize.h>
Alexey Dobriyan7e80d0d2007-05-08 00:28:59 -070038#include <linux/utsname.h>
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -080039#include <linux/coredump.h>
Frederic Weisbecker6fac4822012-11-13 14:20:55 +010040#include <linux/sched.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +010041#include <linux/sched/coredump.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010042#include <linux/sched/task_stack.h>
Ingo Molnar32ef5512017-02-05 11:48:36 +010043#include <linux/sched/cputime.h>
Dave Martin00e19ce2020-03-16 16:50:44 +000044#include <linux/sizes.h>
45#include <linux/types.h>
Ingo Molnar5b825c32017-02-02 17:54:15 +010046#include <linux/cred.h>
Ross Zwisler50378352015-10-05 16:33:36 -060047#include <linux/dax.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080048#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/param.h>
50#include <asm/page.h>
51
Dave Martin00e19ce2020-03-16 16:50:44 +000052#ifndef ELF_COMPAT
53#define ELF_COMPAT 0
54#endif
55
Denys Vlasenko2aa362c2012-10-04 17:15:36 -070056#ifndef user_long_t
57#define user_long_t long
58#endif
Denys Vlasenko49ae4d42012-10-04 17:15:35 -070059#ifndef user_siginfo_t
60#define user_siginfo_t siginfo_t
61#endif
62
Nicolas Pitre47552002017-08-16 16:05:13 -040063/* That's for binfmt_elf_fdpic to deal with */
64#ifndef elf_check_fdpic
65#define elf_check_fdpic(ex) false
66#endif
67
Al Viro71613c32012-10-20 22:00:48 -040068static int load_elf_binary(struct linux_binprm *bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Josh Triplett69369a72014-04-03 14:48:27 -070070#ifdef CONFIG_USELIB
71static int load_elf_library(struct file *);
72#else
73#define load_elf_library NULL
74#endif
75
Linus Torvalds1da177e2005-04-16 15:20:36 -070076/*
77 * If we don't support core dumping, then supply a NULL so we
78 * don't even try.
79 */
Christoph Hellwig698ba7b2009-12-15 16:47:37 -080080#ifdef CONFIG_ELF_CORE
Masami Hiramatsuf6151df2009-12-17 15:27:16 -080081static int elf_core_dump(struct coredump_params *cprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#else
83#define elf_core_dump NULL
84#endif
85
86#if ELF_EXEC_PAGESIZE > PAGE_SIZE
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070087#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070088#else
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070089#define ELF_MIN_ALIGN PAGE_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070090#endif
91
92#ifndef ELF_CORE_EFLAGS
93#define ELF_CORE_EFLAGS 0
94#endif
95
96#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
97#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
98#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
99
100static struct linux_binfmt elf_format = {
Mikael Petterssonf670d0e2011-01-12 17:00:02 -0800101 .module = THIS_MODULE,
102 .load_binary = load_elf_binary,
103 .load_shlib = load_elf_library,
104 .core_dump = elf_core_dump,
105 .min_coredump = ELF_EXEC_PAGESIZE,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106};
107
Alexey Dobriyan18676ff2020-01-30 22:17:01 -0800108#define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800110static int set_brk(unsigned long start, unsigned long end, int prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111{
112 start = ELF_PAGEALIGN(start);
113 end = ELF_PAGEALIGN(end);
114 if (end > start) {
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800115 /*
116 * Map the last of the bss segment.
117 * If the header is requesting these pages to be
118 * executable, honour that (ppc32 needs this).
119 */
120 int error = vm_brk_flags(start, end - start,
121 prot & PROT_EXEC ? VM_EXEC : 0);
Linus Torvalds5d22fc22016-05-27 15:57:31 -0700122 if (error)
123 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 }
125 current->mm->start_brk = current->mm->brk = end;
126 return 0;
127}
128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129/* We need to explicitly zero any fractional pages
130 after the data section (i.e. bss). This would
131 contain the junk from the file that should not
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700132 be in memory
133 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134static int padzero(unsigned long elf_bss)
135{
136 unsigned long nbyte;
137
138 nbyte = ELF_PAGEOFFSET(elf_bss);
139 if (nbyte) {
140 nbyte = ELF_MIN_ALIGN - nbyte;
141 if (clear_user((void __user *) elf_bss, nbyte))
142 return -EFAULT;
143 }
144 return 0;
145}
146
Ohad Ben-Cohen09c6dd32008-02-03 18:05:15 +0200147/* Let's use some macros to make this stack manipulation a little clearer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148#ifdef CONFIG_STACK_GROWSUP
149#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
150#define STACK_ROUND(sp, items) \
151 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700152#define STACK_ALLOC(sp, len) ({ \
153 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
154 old_sp; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155#else
156#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
157#define STACK_ROUND(sp, items) \
158 (((unsigned long) (sp - items)) &~ 15UL)
159#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
160#endif
161
Nathan Lynch483fad12008-07-22 04:48:46 +1000162#ifndef ELF_BASE_PLATFORM
163/*
164 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
165 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
166 * will be copied to the user stack in the same manner as AT_PLATFORM.
167 */
168#define ELF_BASE_PLATFORM NULL
169#endif
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171static int
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800172create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
173 unsigned long load_addr, unsigned long interp_load_addr,
174 unsigned long e_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175{
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800176 struct mm_struct *mm = current->mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 unsigned long p = bprm->p;
178 int argc = bprm->argc;
179 int envc = bprm->envc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 elf_addr_t __user *sp;
181 elf_addr_t __user *u_platform;
Nathan Lynch483fad12008-07-22 04:48:46 +1000182 elf_addr_t __user *u_base_platform;
Kees Cookf06295b2009-01-07 18:08:52 -0800183 elf_addr_t __user *u_rand_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 const char *k_platform = ELF_PLATFORM;
Nathan Lynch483fad12008-07-22 04:48:46 +1000185 const char *k_base_platform = ELF_BASE_PLATFORM;
Kees Cookf06295b2009-01-07 18:08:52 -0800186 unsigned char k_rand_bytes[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 int items;
188 elf_addr_t *elf_info;
Alexey Dobriyan1f83d802020-01-30 22:16:50 -0800189 int ei_index;
David Howells86a264a2008-11-14 10:39:18 +1100190 const struct cred *cred = current_cred();
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700191 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
193 /*
Franck Bui-Huud68c9d62007-10-16 23:30:24 -0700194 * In some cases (e.g. Hyper-Threading), we want to avoid L1
195 * evictions by the processes running on the same package. One
196 * thing we can do is to shuffle the initial stack for them.
197 */
198
199 p = arch_align_stack(p);
200
201 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 * If this architecture has a platform capability string, copy it
203 * to userspace. In some cases (Sparc), this info is impossible
204 * for userspace to get any other way, in others (i386) it is
205 * merely difficult.
206 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 u_platform = NULL;
208 if (k_platform) {
209 size_t len = strlen(k_platform) + 1;
210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
Al Viro646e84d2020-02-19 09:23:34 -0500212 if (copy_to_user(u_platform, k_platform, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 return -EFAULT;
214 }
215
Nathan Lynch483fad12008-07-22 04:48:46 +1000216 /*
217 * If this architecture has a "base" platform capability
218 * string, copy it to userspace.
219 */
220 u_base_platform = NULL;
221 if (k_base_platform) {
222 size_t len = strlen(k_base_platform) + 1;
223
224 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
Al Viro646e84d2020-02-19 09:23:34 -0500225 if (copy_to_user(u_base_platform, k_base_platform, len))
Nathan Lynch483fad12008-07-22 04:48:46 +1000226 return -EFAULT;
227 }
228
Kees Cookf06295b2009-01-07 18:08:52 -0800229 /*
230 * Generate 16 random bytes for userspace PRNG seeding.
231 */
232 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
233 u_rand_bytes = (elf_addr_t __user *)
234 STACK_ALLOC(p, sizeof(k_rand_bytes));
Al Viro646e84d2020-02-19 09:23:34 -0500235 if (copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
Kees Cookf06295b2009-01-07 18:08:52 -0800236 return -EFAULT;
237
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 /* Create the ELF interpreter info */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800239 elf_info = (elf_addr_t *)mm->saved_auxv;
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700240 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241#define NEW_AUX_ENT(id, val) \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700242 do { \
Alexey Dobriyan1f83d802020-01-30 22:16:50 -0800243 *elf_info++ = id; \
244 *elf_info++ = val; \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700245 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
247#ifdef ARCH_DLINFO
248 /*
249 * ARCH_DLINFO must come first so PPC can do its special alignment of
250 * AUXV.
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700251 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
252 * ARCH_DLINFO changes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 */
254 ARCH_DLINFO;
255#endif
256 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
257 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
258 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
259 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700260 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
262 NEW_AUX_ENT(AT_BASE, interp_load_addr);
263 NEW_AUX_ENT(AT_FLAGS, 0);
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800264 NEW_AUX_ENT(AT_ENTRY, e_entry);
Eric W. Biedermanebc887b2012-02-07 18:36:10 -0800265 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
266 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
267 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
268 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
Kees Cookc425e182017-07-18 15:25:22 -0700269 NEW_AUX_ENT(AT_SECURE, bprm->secureexec);
Kees Cookf06295b2009-01-07 18:08:52 -0800270 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
Michael Neuling21713642013-04-17 17:33:11 +0000271#ifdef ELF_HWCAP2
272 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
273#endif
John Reiser65191082008-07-21 14:21:32 -0700274 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 if (k_platform) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700276 NEW_AUX_ENT(AT_PLATFORM,
Jesper Juhl785d5572006-06-23 02:05:35 -0700277 (elf_addr_t)(unsigned long)u_platform);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 }
Nathan Lynch483fad12008-07-22 04:48:46 +1000279 if (k_base_platform) {
280 NEW_AUX_ENT(AT_BASE_PLATFORM,
281 (elf_addr_t)(unsigned long)u_base_platform);
282 }
Eric W. Biedermanb8a61c92020-05-14 15:17:40 -0500283 if (bprm->have_execfd) {
284 NEW_AUX_ENT(AT_EXECFD, bprm->execfd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 }
286#undef NEW_AUX_ENT
287 /* AT_NULL is zero; clear the rest too */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800288 memset(elf_info, 0, (char *)mm->saved_auxv +
289 sizeof(mm->saved_auxv) - (char *)elf_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
291 /* And advance past the AT_NULL entry. */
Alexey Dobriyan1f83d802020-01-30 22:16:50 -0800292 elf_info += 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800294 ei_index = elf_info - (elf_addr_t *)mm->saved_auxv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 sp = STACK_ADD(p, ei_index);
296
Andi Kleend20894a2008-02-08 04:21:54 -0800297 items = (argc + 1) + (envc + 1) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 bprm->p = STACK_ROUND(sp, items);
299
300 /* Point sp at the lowest address on the stack */
301#ifdef CONFIG_STACK_GROWSUP
302 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700303 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304#else
305 sp = (elf_addr_t __user *)bprm->p;
306#endif
307
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700308
309 /*
310 * Grow the stack manually; some architectures have a limit on how
311 * far ahead a user-space access may be in order to grow the stack.
312 */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800313 vma = find_extend_vma(mm, bprm->p);
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700314 if (!vma)
315 return -EFAULT;
316
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
Al Viro646e84d2020-02-19 09:23:34 -0500318 if (put_user(argc, sp++))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
Kees Cook67c67772017-07-10 15:52:54 -0700321 /* Populate list of argv pointers back to argv strings. */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800322 p = mm->arg_end = mm->arg_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 while (argc-- > 0) {
324 size_t len;
Al Viro646e84d2020-02-19 09:23:34 -0500325 if (put_user((elf_addr_t)p, sp++))
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800326 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700327 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
328 if (!len || len > MAX_ARG_STRLEN)
WANG Cong23c49712008-05-08 21:52:33 +0800329 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 p += len;
331 }
Al Viro646e84d2020-02-19 09:23:34 -0500332 if (put_user(0, sp++))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 return -EFAULT;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800334 mm->arg_end = p;
Kees Cook67c67772017-07-10 15:52:54 -0700335
336 /* Populate list of envp pointers back to envp strings. */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800337 mm->env_end = mm->env_start = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 while (envc-- > 0) {
339 size_t len;
Al Viro646e84d2020-02-19 09:23:34 -0500340 if (put_user((elf_addr_t)p, sp++))
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800341 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700342 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
343 if (!len || len > MAX_ARG_STRLEN)
WANG Cong23c49712008-05-08 21:52:33 +0800344 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 p += len;
346 }
Al Viro646e84d2020-02-19 09:23:34 -0500347 if (put_user(0, sp++))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 return -EFAULT;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800349 mm->env_end = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
351 /* Put the elf_info on the stack in the right place. */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800352 if (copy_to_user(sp, mm->saved_auxv, ei_index * sizeof(elf_addr_t)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 return -EFAULT;
354 return 0;
355}
356
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357static unsigned long elf_map(struct file *filep, unsigned long addr,
Alexey Dobriyan49ac9812019-03-07 16:29:03 -0800358 const struct elf_phdr *eppnt, int prot, int type,
Jiri Kosinacc503c12008-01-30 13:31:07 +0100359 unsigned long total_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
361 unsigned long map_addr;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100362 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
363 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
364 addr = ELF_PAGESTART(addr);
365 size = ELF_PAGEALIGN(size);
Jan Kratochvil60bfba72007-07-15 23:40:06 -0700366
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700367 /* mmap() will return -EINVAL if given a zero size, but a
368 * segment with zero filesize is perfectly valid */
Jiri Kosinacc503c12008-01-30 13:31:07 +0100369 if (!size)
370 return addr;
371
Jiri Kosinacc503c12008-01-30 13:31:07 +0100372 /*
373 * total_size is the size of the ELF (interpreter) image.
374 * The _first_ mmap needs to know the full size, otherwise
375 * randomization might put this image into an overlapping
376 * position with the ELF binary image. (since size < total_size)
377 * So we first map the 'big' image - and unmap the remainder at
378 * the end. (which unmap is needed for ELF images with holes.)
379 */
380 if (total_size) {
381 total_size = ELF_PAGEALIGN(total_size);
Al Viro5a5e4c22012-05-30 01:49:38 -0400382 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100383 if (!BAD_ADDR(map_addr))
Al Viro5a5e4c22012-05-30 01:49:38 -0400384 vm_munmap(map_addr+size, total_size-size);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100385 } else
Al Viro5a5e4c22012-05-30 01:49:38 -0400386 map_addr = vm_mmap(filep, addr, size, prot, type, off);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100387
Tetsuo Handad23a61e2018-04-20 14:56:13 -0700388 if ((type & MAP_FIXED_NOREPLACE) &&
389 PTR_ERR((void *)map_addr) == -EEXIST)
390 pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n",
391 task_pid_nr(current), current->comm, (void *)addr);
Michal Hocko4ed28632018-04-10 16:36:01 -0700392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 return(map_addr);
394}
395
Alexey Dobriyan49ac9812019-03-07 16:29:03 -0800396static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr)
Jiri Kosinacc503c12008-01-30 13:31:07 +0100397{
398 int i, first_idx = -1, last_idx = -1;
399
400 for (i = 0; i < nr; i++) {
401 if (cmds[i].p_type == PT_LOAD) {
402 last_idx = i;
403 if (first_idx == -1)
404 first_idx = i;
405 }
406 }
407 if (first_idx == -1)
408 return 0;
409
410 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
411 ELF_PAGESTART(cmds[first_idx].p_vaddr);
412}
413
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800414static int elf_read(struct file *file, void *buf, size_t len, loff_t pos)
415{
416 ssize_t rv;
417
418 rv = kernel_read(file, buf, len, &pos);
419 if (unlikely(rv != len)) {
420 return (rv < 0) ? rv : -EIO;
421 }
422 return 0;
423}
424
Chris Kennellyce81bb22020-10-15 20:12:32 -0700425static unsigned long maximum_alignment(struct elf_phdr *cmds, int nr)
426{
427 unsigned long alignment = 0;
428 int i;
429
430 for (i = 0; i < nr; i++) {
431 if (cmds[i].p_type == PT_LOAD) {
432 unsigned long p_align = cmds[i].p_align;
433
434 /* skip non-power of two alignments as invalid */
435 if (!is_power_of_2(p_align))
436 continue;
437 alignment = max(alignment, p_align);
438 }
439 }
440
441 /* ensure we align to at least one page */
442 return ELF_PAGEALIGN(alignment);
443}
444
Paul Burton6a8d3892014-09-11 08:30:14 +0100445/**
446 * load_elf_phdrs() - load ELF program headers
447 * @elf_ex: ELF header of the binary whose program headers should be loaded
448 * @elf_file: the opened ELF binary file
449 *
450 * Loads ELF program headers from the binary file elf_file, which has the ELF
451 * header pointed to by elf_ex, into a newly allocated array. The caller is
452 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
453 */
Alexey Dobriyan49ac9812019-03-07 16:29:03 -0800454static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
Paul Burton6a8d3892014-09-11 08:30:14 +0100455 struct file *elf_file)
456{
457 struct elf_phdr *elf_phdata = NULL;
Alexey Dobriyanfaf1c312019-03-07 16:28:56 -0800458 int retval, err = -1;
Alexey Dobriyanfaf1c312019-03-07 16:28:56 -0800459 unsigned int size;
Paul Burton6a8d3892014-09-11 08:30:14 +0100460
461 /*
462 * If the size of this structure has changed, then punt, since
463 * we will be doing the wrong thing.
464 */
465 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
466 goto out;
467
468 /* Sanity check the number of program headers... */
Paul Burton6a8d3892014-09-11 08:30:14 +0100469 /* ...and their total size. */
470 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
Alexey Dobriyanfaf1c312019-03-07 16:28:56 -0800471 if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN)
Paul Burton6a8d3892014-09-11 08:30:14 +0100472 goto out;
473
474 elf_phdata = kmalloc(size, GFP_KERNEL);
475 if (!elf_phdata)
476 goto out;
477
478 /* Read in the program headers */
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800479 retval = elf_read(elf_file, elf_phdata, size, elf_ex->e_phoff);
480 if (retval < 0) {
481 err = retval;
Paul Burton6a8d3892014-09-11 08:30:14 +0100482 goto out;
483 }
484
485 /* Success! */
486 err = 0;
487out:
488 if (err) {
489 kfree(elf_phdata);
490 elf_phdata = NULL;
491 }
492 return elf_phdata;
493}
Jiri Kosinacc503c12008-01-30 13:31:07 +0100494
Paul Burton774c1052014-09-11 08:30:16 +0100495#ifndef CONFIG_ARCH_BINFMT_ELF_STATE
496
497/**
498 * struct arch_elf_state - arch-specific ELF loading state
499 *
500 * This structure is used to preserve architecture specific data during
501 * the loading of an ELF file, throughout the checking of architecture
502 * specific ELF headers & through to the point where the ELF load is
503 * known to be proceeding (ie. SET_PERSONALITY).
504 *
505 * This implementation is a dummy for architectures which require no
506 * specific state.
507 */
508struct arch_elf_state {
509};
510
511#define INIT_ARCH_ELF_STATE {}
512
513/**
514 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
515 * @ehdr: The main ELF header
516 * @phdr: The program header to check
517 * @elf: The open ELF file
518 * @is_interp: True if the phdr is from the interpreter of the ELF being
519 * loaded, else false.
520 * @state: Architecture-specific state preserved throughout the process
521 * of loading the ELF.
522 *
523 * Inspects the program header phdr to validate its correctness and/or
524 * suitability for the system. Called once per ELF program header in the
525 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
526 * interpreter.
527 *
528 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
529 * with that return code.
530 */
531static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
532 struct elf_phdr *phdr,
533 struct file *elf, bool is_interp,
534 struct arch_elf_state *state)
535{
536 /* Dummy implementation, always proceed */
537 return 0;
538}
539
540/**
Maciej W. Rozycki54d157142015-10-26 15:47:57 +0000541 * arch_check_elf() - check an ELF executable
Paul Burton774c1052014-09-11 08:30:16 +0100542 * @ehdr: The main ELF header
543 * @has_interp: True if the ELF has an interpreter, else false.
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000544 * @interp_ehdr: The interpreter's ELF header
Paul Burton774c1052014-09-11 08:30:16 +0100545 * @state: Architecture-specific state preserved throughout the process
546 * of loading the ELF.
547 *
548 * Provides a final opportunity for architecture code to reject the loading
549 * of the ELF & cause an exec syscall to return an error. This is called after
550 * all program headers to be checked by arch_elf_pt_proc have been.
551 *
552 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
553 * with that return code.
554 */
555static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000556 struct elfhdr *interp_ehdr,
Paul Burton774c1052014-09-11 08:30:16 +0100557 struct arch_elf_state *state)
558{
559 /* Dummy implementation, always proceed */
560 return 0;
561}
562
563#endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
Dave Martinfe0f6762020-03-16 16:50:46 +0000565static inline int make_prot(u32 p_flags, struct arch_elf_state *arch_state,
566 bool has_interp, bool is_interp)
Alexey Dobriyand8e7cb32019-05-14 15:43:51 -0700567{
568 int prot = 0;
569
570 if (p_flags & PF_R)
571 prot |= PROT_READ;
572 if (p_flags & PF_W)
573 prot |= PROT_WRITE;
574 if (p_flags & PF_X)
575 prot |= PROT_EXEC;
Dave Martinfe0f6762020-03-16 16:50:46 +0000576
577 return arch_elf_adjust_prot(prot, arch_state, has_interp, is_interp);
Alexey Dobriyand8e7cb32019-05-14 15:43:51 -0700578}
579
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580/* This is much more generalized than the library routine read function,
581 so we keep this separate. Technically the library read function
582 is only provided so that we can read a.out libraries that have
583 an ELF header */
584
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700585static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
Alexey Dobriyan81696d52019-12-04 16:52:22 -0800586 struct file *interpreter,
Dave Martinfe0f6762020-03-16 16:50:46 +0000587 unsigned long no_base, struct elf_phdr *interp_elf_phdata,
588 struct arch_elf_state *arch_state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 struct elf_phdr *eppnt;
591 unsigned long load_addr = 0;
592 int load_addr_set = 0;
593 unsigned long last_bss = 0, elf_bss = 0;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800594 int bss_prot = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 unsigned long error = ~0UL;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100596 unsigned long total_size;
Paul Burton6a8d3892014-09-11 08:30:14 +0100597 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
599 /* First of all, some simple consistency checks */
600 if (interp_elf_ex->e_type != ET_EXEC &&
601 interp_elf_ex->e_type != ET_DYN)
602 goto out;
Nicolas Pitre47552002017-08-16 16:05:13 -0400603 if (!elf_check_arch(interp_elf_ex) ||
604 elf_check_fdpic(interp_elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 goto out;
Al Viro72c2d532013-09-22 16:27:52 -0400606 if (!interpreter->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 goto out;
608
Paul Burtona9d9ef12014-09-11 08:30:15 +0100609 total_size = total_mapping_size(interp_elf_phdata,
610 interp_elf_ex->e_phnum);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100611 if (!total_size) {
612 error = -EINVAL;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100613 goto out;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100614 }
615
Paul Burtona9d9ef12014-09-11 08:30:15 +0100616 eppnt = interp_elf_phdata;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700617 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
618 if (eppnt->p_type == PT_LOAD) {
619 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
Dave Martinfe0f6762020-03-16 16:50:46 +0000620 int elf_prot = make_prot(eppnt->p_flags, arch_state,
621 true, true);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700622 unsigned long vaddr = 0;
623 unsigned long k, map_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700625 vaddr = eppnt->p_vaddr;
626 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
Michal Hocko4ed28632018-04-10 16:36:01 -0700627 elf_type |= MAP_FIXED_NOREPLACE;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100628 else if (no_base && interp_elf_ex->e_type == ET_DYN)
629 load_addr = -vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700631 map_addr = elf_map(interpreter, load_addr + vaddr,
Andrew Mortonbb1ad822008-01-30 13:31:07 +0100632 eppnt, elf_prot, elf_type, total_size);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100633 total_size = 0;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700634 error = map_addr;
635 if (BAD_ADDR(map_addr))
Paul Burtona9d9ef12014-09-11 08:30:15 +0100636 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700638 if (!load_addr_set &&
639 interp_elf_ex->e_type == ET_DYN) {
640 load_addr = map_addr - ELF_PAGESTART(vaddr);
641 load_addr_set = 1;
642 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700644 /*
645 * Check to see if the section's size will overflow the
646 * allowed task size. Note that p_filesz must always be
647 * <= p_memsize so it's only necessary to check p_memsz.
648 */
649 k = load_addr + eppnt->p_vaddr;
Chuck Ebbertce510592006-07-03 00:24:14 -0700650 if (BAD_ADDR(k) ||
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700651 eppnt->p_filesz > eppnt->p_memsz ||
652 eppnt->p_memsz > TASK_SIZE ||
653 TASK_SIZE - eppnt->p_memsz < k) {
654 error = -ENOMEM;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100655 goto out;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700656 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700658 /*
659 * Find the end of the file mapping for this phdr, and
660 * keep track of the largest address we see for this.
661 */
662 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
663 if (k > elf_bss)
664 elf_bss = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700666 /*
667 * Do the same thing for the memory mapping - between
668 * elf_bss and last_bss is the bss section.
669 */
Kees Cook0036d1f2016-08-02 14:04:51 -0700670 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800671 if (k > last_bss) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700672 last_bss = k;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800673 bss_prot = elf_prot;
674 }
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700675 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 }
677
Kees Cook0036d1f2016-08-02 14:04:51 -0700678 /*
679 * Now fill out the bss section: first pad the last page from
680 * the file up to the page boundary, and zero it from elf_bss
681 * up to the end of the page.
682 */
683 if (padzero(elf_bss)) {
684 error = -EFAULT;
685 goto out;
686 }
687 /*
688 * Next, align both the file and mem bss up to the page size,
689 * since this is where elf_bss was just zeroed up to, and where
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800690 * last_bss will end after the vm_brk_flags() below.
Kees Cook0036d1f2016-08-02 14:04:51 -0700691 */
692 elf_bss = ELF_PAGEALIGN(elf_bss);
693 last_bss = ELF_PAGEALIGN(last_bss);
694 /* Finally, if there is still more bss to allocate, do it. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 if (last_bss > elf_bss) {
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800696 error = vm_brk_flags(elf_bss, last_bss - elf_bss,
697 bss_prot & PROT_EXEC ? VM_EXEC : 0);
Linus Torvalds5d22fc22016-05-27 15:57:31 -0700698 if (error)
Paul Burtona9d9ef12014-09-11 08:30:15 +0100699 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 }
701
Jiri Kosinacc503c12008-01-30 13:31:07 +0100702 error = load_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703out:
704 return error;
705}
706
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707/*
708 * These are the functions used to load ELF style executables and shared
709 * libraries. There is no binary dependent code anywhere else.
710 */
711
Dave Martin00e19ce2020-03-16 16:50:44 +0000712static int parse_elf_property(const char *data, size_t *off, size_t datasz,
713 struct arch_elf_state *arch,
714 bool have_prev_type, u32 *prev_type)
715{
716 size_t o, step;
717 const struct gnu_property *pr;
718 int ret;
719
720 if (*off == datasz)
721 return -ENOENT;
722
723 if (WARN_ON_ONCE(*off > datasz || *off % ELF_GNU_PROPERTY_ALIGN))
724 return -EIO;
725 o = *off;
726 datasz -= *off;
727
728 if (datasz < sizeof(*pr))
729 return -ENOEXEC;
730 pr = (const struct gnu_property *)(data + o);
731 o += sizeof(*pr);
732 datasz -= sizeof(*pr);
733
734 if (pr->pr_datasz > datasz)
735 return -ENOEXEC;
736
737 WARN_ON_ONCE(o % ELF_GNU_PROPERTY_ALIGN);
738 step = round_up(pr->pr_datasz, ELF_GNU_PROPERTY_ALIGN);
739 if (step > datasz)
740 return -ENOEXEC;
741
742 /* Properties are supposed to be unique and sorted on pr_type: */
743 if (have_prev_type && pr->pr_type <= *prev_type)
744 return -ENOEXEC;
745 *prev_type = pr->pr_type;
746
747 ret = arch_parse_elf_property(pr->pr_type, data + o,
748 pr->pr_datasz, ELF_COMPAT, arch);
749 if (ret)
750 return ret;
751
752 *off = o + step;
753 return 0;
754}
755
756#define NOTE_DATA_SZ SZ_1K
757#define GNU_PROPERTY_TYPE_0_NAME "GNU"
758#define NOTE_NAME_SZ (sizeof(GNU_PROPERTY_TYPE_0_NAME))
759
760static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr,
761 struct arch_elf_state *arch)
762{
763 union {
764 struct elf_note nhdr;
765 char data[NOTE_DATA_SZ];
766 } note;
767 loff_t pos;
768 ssize_t n;
769 size_t off, datasz;
770 int ret;
771 bool have_prev_type;
772 u32 prev_type;
773
774 if (!IS_ENABLED(CONFIG_ARCH_USE_GNU_PROPERTY) || !phdr)
775 return 0;
776
777 /* load_elf_binary() shouldn't call us unless this is true... */
778 if (WARN_ON_ONCE(phdr->p_type != PT_GNU_PROPERTY))
779 return -ENOEXEC;
780
781 /* If the properties are crazy large, that's too bad (for now): */
782 if (phdr->p_filesz > sizeof(note))
783 return -ENOEXEC;
784
785 pos = phdr->p_offset;
786 n = kernel_read(f, &note, phdr->p_filesz, &pos);
787
788 BUILD_BUG_ON(sizeof(note) < sizeof(note.nhdr) + NOTE_NAME_SZ);
789 if (n < 0 || n < sizeof(note.nhdr) + NOTE_NAME_SZ)
790 return -EIO;
791
792 if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 ||
793 note.nhdr.n_namesz != NOTE_NAME_SZ ||
794 strncmp(note.data + sizeof(note.nhdr),
795 GNU_PROPERTY_TYPE_0_NAME, n - sizeof(note.nhdr)))
796 return -ENOEXEC;
797
798 off = round_up(sizeof(note.nhdr) + NOTE_NAME_SZ,
799 ELF_GNU_PROPERTY_ALIGN);
800 if (off > n)
801 return -ENOEXEC;
802
803 if (note.nhdr.n_descsz > n - off)
804 return -ENOEXEC;
805 datasz = off + note.nhdr.n_descsz;
806
807 have_prev_type = false;
808 do {
809 ret = parse_elf_property(note.data, &off, datasz, arch,
810 have_prev_type, &prev_type);
811 have_prev_type = true;
812 } while (!ret);
813
814 return ret == -ENOENT ? 0 : ret;
815}
816
Al Viro71613c32012-10-20 22:00:48 -0400817static int load_elf_binary(struct linux_binprm *bprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818{
819 struct file *interpreter = NULL; /* to shut gcc up */
820 unsigned long load_addr = 0, load_bias = 0;
821 int load_addr_set = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 unsigned long error;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100823 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
Dave Martin00e19ce2020-03-16 16:50:44 +0000824 struct elf_phdr *elf_property_phdata = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 unsigned long elf_bss, elf_brk;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800826 int bss_prot = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 int retval, i;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100828 unsigned long elf_entry;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800829 unsigned long e_entry;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100830 unsigned long interp_load_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 unsigned long start_code, end_code, start_data, end_data;
David Daney1a530a62011-03-22 16:34:48 -0700832 unsigned long reloc_func_desc __maybe_unused = 0;
David Rientjes8de61e62006-12-06 20:40:16 -0800833 int executable_stack = EXSTACK_DEFAULT;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800834 struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf;
Alexey Dobriyan0693ffe2020-04-06 20:11:29 -0700835 struct elfhdr *interp_elf_ex = NULL;
Paul Burton774c1052014-09-11 08:30:16 +0100836 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800837 struct mm_struct *mm;
Alexey Dobriyan249b08e2019-05-14 15:43:54 -0700838 struct pt_regs *regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 retval = -ENOEXEC;
841 /* First of all, some simple consistency checks */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800842 if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 goto out;
844
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800845 if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 goto out;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800847 if (!elf_check_arch(elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 goto out;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800849 if (elf_check_fdpic(elf_ex))
Nicolas Pitre47552002017-08-16 16:05:13 -0400850 goto out;
Al Viro72c2d532013-09-22 16:27:52 -0400851 if (!bprm->file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 goto out;
853
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800854 elf_phdata = load_elf_phdrs(elf_ex, bprm->file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 if (!elf_phdata)
856 goto out;
857
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 elf_ppnt = elf_phdata;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800859 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) {
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700860 char *elf_interpreter;
Alexey Dobriyan5cf4a362019-05-14 15:43:36 -0700861
Dave Martin00e19ce2020-03-16 16:50:44 +0000862 if (elf_ppnt->p_type == PT_GNU_PROPERTY) {
863 elf_property_phdata = elf_ppnt;
864 continue;
865 }
866
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700867 if (elf_ppnt->p_type != PT_INTERP)
868 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700870 /*
871 * This is the program interpreter used for shared libraries -
872 * for now assume that this is an a.out format binary.
873 */
874 retval = -ENOEXEC;
875 if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2)
876 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700878 retval = -ENOMEM;
879 elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL);
880 if (!elf_interpreter)
881 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800883 retval = elf_read(bprm->file, elf_interpreter, elf_ppnt->p_filesz,
884 elf_ppnt->p_offset);
885 if (retval < 0)
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700886 goto out_free_interp;
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700887 /* make sure path is NULL terminated */
888 retval = -ENOEXEC;
889 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
890 goto out_free_interp;
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800891
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700892 interpreter = open_exec(elf_interpreter);
893 kfree(elf_interpreter);
894 retval = PTR_ERR(interpreter);
895 if (IS_ERR(interpreter))
896 goto out_free_ph;
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800897
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700898 /*
899 * If the binary is not readable then enforce mm->dumpable = 0
900 * regardless of the interpreter's permissions.
901 */
902 would_dump(bprm, interpreter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
Alexey Dobriyan0693ffe2020-04-06 20:11:29 -0700904 interp_elf_ex = kmalloc(sizeof(*interp_elf_ex), GFP_KERNEL);
905 if (!interp_elf_ex) {
906 retval = -ENOMEM;
907 goto out_free_ph;
908 }
909
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700910 /* Get the exec headers */
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700911 retval = elf_read(interpreter, interp_elf_ex,
912 sizeof(*interp_elf_ex), 0);
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800913 if (retval < 0)
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700914 goto out_free_dentry;
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700915
916 break;
Alexey Dobriyancc338012019-05-14 15:43:39 -0700917
918out_free_interp:
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700919 kfree(elf_interpreter);
920 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 }
922
923 elf_ppnt = elf_phdata;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800924 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++)
Paul Burton774c1052014-09-11 08:30:16 +0100925 switch (elf_ppnt->p_type) {
926 case PT_GNU_STACK:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 if (elf_ppnt->p_flags & PF_X)
928 executable_stack = EXSTACK_ENABLE_X;
929 else
930 executable_stack = EXSTACK_DISABLE_X;
931 break;
Paul Burton774c1052014-09-11 08:30:16 +0100932
933 case PT_LOPROC ... PT_HIPROC:
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800934 retval = arch_elf_pt_proc(elf_ex, elf_ppnt,
Paul Burton774c1052014-09-11 08:30:16 +0100935 bprm->file, false,
936 &arch_state);
937 if (retval)
938 goto out_free_dentry;
939 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941
942 /* Some simple consistency checks for the interpreter */
Alexey Dobriyancc338012019-05-14 15:43:39 -0700943 if (interpreter) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 retval = -ELIBBAD;
Andi Kleend20894a2008-02-08 04:21:54 -0800945 /* Not an ELF interpreter */
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700946 if (memcmp(interp_elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 /* Verify the interpreter has a valid arch */
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700949 if (!elf_check_arch(interp_elf_ex) ||
950 elf_check_fdpic(interp_elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 goto out_free_dentry;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100952
953 /* Load the interpreter program headers */
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700954 interp_elf_phdata = load_elf_phdrs(interp_elf_ex,
Paul Burtona9d9ef12014-09-11 08:30:15 +0100955 interpreter);
956 if (!interp_elf_phdata)
957 goto out_free_dentry;
Paul Burton774c1052014-09-11 08:30:16 +0100958
959 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
Dave Martin00e19ce2020-03-16 16:50:44 +0000960 elf_property_phdata = NULL;
Paul Burton774c1052014-09-11 08:30:16 +0100961 elf_ppnt = interp_elf_phdata;
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700962 for (i = 0; i < interp_elf_ex->e_phnum; i++, elf_ppnt++)
Paul Burton774c1052014-09-11 08:30:16 +0100963 switch (elf_ppnt->p_type) {
Dave Martin00e19ce2020-03-16 16:50:44 +0000964 case PT_GNU_PROPERTY:
965 elf_property_phdata = elf_ppnt;
966 break;
967
Paul Burton774c1052014-09-11 08:30:16 +0100968 case PT_LOPROC ... PT_HIPROC:
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700969 retval = arch_elf_pt_proc(interp_elf_ex,
Paul Burton774c1052014-09-11 08:30:16 +0100970 elf_ppnt, interpreter,
971 true, &arch_state);
972 if (retval)
973 goto out_free_dentry;
974 break;
975 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 }
977
Dave Martin00e19ce2020-03-16 16:50:44 +0000978 retval = parse_elf_properties(interpreter ?: bprm->file,
979 elf_property_phdata, &arch_state);
980 if (retval)
981 goto out_free_dentry;
982
Paul Burton774c1052014-09-11 08:30:16 +0100983 /*
984 * Allow arch code to reject the ELF at this point, whilst it's
985 * still possible to return an error to the code that invoked
986 * the exec syscall.
987 */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800988 retval = arch_check_elf(elf_ex,
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700989 !!interpreter, interp_elf_ex,
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000990 &arch_state);
Paul Burton774c1052014-09-11 08:30:16 +0100991 if (retval)
992 goto out_free_dentry;
993
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 /* Flush all traces of the currently running executable */
Eric W. Biederman23887772020-05-03 07:54:10 -0500995 retval = begin_new_exec(bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 if (retval)
997 goto out_free_dentry;
998
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
1000 may depend on the personality. */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001001 SET_PERSONALITY2(*elf_ex, &arch_state);
1002 if (elf_read_implies_exec(*elf_ex, executable_stack))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 current->personality |= READ_IMPLIES_EXEC;
1004
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001005 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 current->flags |= PF_RANDOMIZE;
Linus Torvalds221af7f2010-01-28 22:14:42 -08001007
1008 setup_new_exec(bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009
1010 /* Do this so that we can load the interpreter, if need be. We will
1011 change some of these later */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
1013 executable_stack);
Al Viro19d860a2014-05-04 20:11:36 -04001014 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
Alexey Dobriyan852643162019-05-14 15:43:48 -07001017 elf_bss = 0;
1018 elf_brk = 0;
1019
1020 start_code = ~0UL;
1021 end_code = 0;
1022 start_data = 0;
1023 end_data = 0;
1024
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001025 /* Now we do a little grungy work by mmapping the ELF image into
Jiri Kosinacc503c12008-01-30 13:31:07 +01001026 the correct location in memory. */
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001027 for(i = 0, elf_ppnt = elf_phdata;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001028 i < elf_ex->e_phnum; i++, elf_ppnt++) {
Linus Torvaldsb2129212019-10-06 13:53:27 -07001029 int elf_prot, elf_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 unsigned long k, vaddr;
Michael Davidsona87938b2015-04-14 15:47:38 -07001031 unsigned long total_size = 0;
Chris Kennellyce81bb22020-10-15 20:12:32 -07001032 unsigned long alignment;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033
1034 if (elf_ppnt->p_type != PT_LOAD)
1035 continue;
1036
1037 if (unlikely (elf_brk > elf_bss)) {
1038 unsigned long nbyte;
1039
1040 /* There was a PT_LOAD segment with p_memsz > p_filesz
1041 before this one. Map anonymous pages, if needed,
1042 and clear the area. */
Mikael Petterssonf670d0e2011-01-12 17:00:02 -08001043 retval = set_brk(elf_bss + load_bias,
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001044 elf_brk + load_bias,
1045 bss_prot);
Al Viro19d860a2014-05-04 20:11:36 -04001046 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 nbyte = ELF_PAGEOFFSET(elf_bss);
1049 if (nbyte) {
1050 nbyte = ELF_MIN_ALIGN - nbyte;
1051 if (nbyte > elf_brk - elf_bss)
1052 nbyte = elf_brk - elf_bss;
1053 if (clear_user((void __user *)elf_bss +
1054 load_bias, nbyte)) {
1055 /*
1056 * This bss-zeroing can fail if the ELF
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001057 * file specifies odd protections. So
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 * we don't check the return value
1059 */
1060 }
1061 }
1062 }
1063
Dave Martinfe0f6762020-03-16 16:50:46 +00001064 elf_prot = make_prot(elf_ppnt->p_flags, &arch_state,
1065 !!interpreter, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001067 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
1069 vaddr = elf_ppnt->p_vaddr;
Kees Cookeab09532017-07-10 15:52:37 -07001070 /*
1071 * If we are loading ET_EXEC or we have already performed
1072 * the ET_DYN load_addr calculations, proceed normally.
1073 */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001074 if (elf_ex->e_type == ET_EXEC || load_addr_set) {
Linus Torvaldsb2129212019-10-06 13:53:27 -07001075 elf_flags |= MAP_FIXED;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001076 } else if (elf_ex->e_type == ET_DYN) {
Kees Cookeab09532017-07-10 15:52:37 -07001077 /*
1078 * This logic is run once for the first LOAD Program
1079 * Header for ET_DYN binaries to calculate the
1080 * randomization (load_bias) for all the LOAD
1081 * Program Headers, and to calculate the entire
1082 * size of the ELF mapping (total_size). (Note that
1083 * load_addr_set is set to true later once the
1084 * initial mapping is performed.)
1085 *
1086 * There are effectively two types of ET_DYN
1087 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
1088 * and loaders (ET_DYN without INTERP, since they
1089 * _are_ the ELF interpreter). The loaders must
1090 * be loaded away from programs since the program
1091 * may otherwise collide with the loader (especially
1092 * for ET_EXEC which does not have a randomized
1093 * position). For example to handle invocations of
1094 * "./ld.so someprog" to test out a new version of
1095 * the loader, the subsequent program that the
1096 * loader loads must avoid the loader itself, so
1097 * they cannot share the same load range. Sufficient
1098 * room for the brk must be allocated with the
1099 * loader as well, since brk must be available with
1100 * the loader.
1101 *
1102 * Therefore, programs are loaded offset from
1103 * ELF_ET_DYN_BASE and loaders are loaded into the
1104 * independently randomized mmap region (0 load_bias
1105 * without MAP_FIXED).
1106 */
Alexey Dobriyancc338012019-05-14 15:43:39 -07001107 if (interpreter) {
Kees Cookeab09532017-07-10 15:52:37 -07001108 load_bias = ELF_ET_DYN_BASE;
1109 if (current->flags & PF_RANDOMIZE)
1110 load_bias += arch_mmap_rnd();
Chris Kennellyce81bb22020-10-15 20:12:32 -07001111 alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
1112 if (alignment)
1113 load_bias &= ~(alignment - 1);
Linus Torvaldsb2129212019-10-06 13:53:27 -07001114 elf_flags |= MAP_FIXED;
Kees Cookeab09532017-07-10 15:52:37 -07001115 } else
1116 load_bias = 0;
1117
1118 /*
1119 * Since load_bias is used for all subsequent loading
1120 * calculations, we must lower it by the first vaddr
1121 * so that the remaining calculations based on the
1122 * ELF vaddrs will be correctly offset. The result
1123 * is then page aligned.
1124 */
1125 load_bias = ELF_PAGESTART(load_bias - vaddr);
1126
Michael Davidsona87938b2015-04-14 15:47:38 -07001127 total_size = total_mapping_size(elf_phdata,
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001128 elf_ex->e_phnum);
Michael Davidsona87938b2015-04-14 15:47:38 -07001129 if (!total_size) {
Andrew Morton2b1d3ae2015-05-28 15:44:24 -07001130 retval = -EINVAL;
Michael Davidsona87938b2015-04-14 15:47:38 -07001131 goto out_free_dentry;
1132 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 }
1134
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001135 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
Michael Davidsona87938b2015-04-14 15:47:38 -07001136 elf_prot, elf_flags, total_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 if (BAD_ADDR(error)) {
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -07001138 retval = IS_ERR((void *)error) ?
1139 PTR_ERR((void*)error) : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 goto out_free_dentry;
1141 }
1142
1143 if (!load_addr_set) {
1144 load_addr_set = 1;
1145 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001146 if (elf_ex->e_type == ET_DYN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 load_bias += error -
1148 ELF_PAGESTART(load_bias + vaddr);
1149 load_addr += load_bias;
1150 reloc_func_desc = load_bias;
1151 }
1152 }
1153 k = elf_ppnt->p_vaddr;
Alexey Dobriyanf67ef442020-01-30 22:16:52 -08001154 if ((elf_ppnt->p_flags & PF_X) && k < start_code)
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001155 start_code = k;
1156 if (start_data < k)
1157 start_data = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158
1159 /*
1160 * Check to see if the section's size will overflow the
1161 * allowed task size. Note that p_filesz must always be
1162 * <= p_memsz so it is only necessary to check p_memsz.
1163 */
Chuck Ebbertce510592006-07-03 00:24:14 -07001164 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 elf_ppnt->p_memsz > TASK_SIZE ||
1166 TASK_SIZE - elf_ppnt->p_memsz < k) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001167 /* set_brk can never work. Avoid overflows. */
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -07001168 retval = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 goto out_free_dentry;
1170 }
1171
1172 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1173
1174 if (k > elf_bss)
1175 elf_bss = k;
1176 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1177 end_code = k;
1178 if (end_data < k)
1179 end_data = k;
1180 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001181 if (k > elf_brk) {
1182 bss_prot = elf_prot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 elf_brk = k;
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001184 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 }
1186
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001187 e_entry = elf_ex->e_entry + load_bias;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 elf_bss += load_bias;
1189 elf_brk += load_bias;
1190 start_code += load_bias;
1191 end_code += load_bias;
1192 start_data += load_bias;
1193 end_data += load_bias;
1194
1195 /* Calling set_brk effectively mmaps the pages that we need
1196 * for the bss and break sections. We must do this before
1197 * mapping in the interpreter, to make sure it doesn't wind
1198 * up getting placed where the bss needs to go.
1199 */
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001200 retval = set_brk(elf_bss, elf_brk, bss_prot);
Al Viro19d860a2014-05-04 20:11:36 -04001201 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 goto out_free_dentry;
akpm@osdl.org6de50512005-10-11 08:29:08 -07001203 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 retval = -EFAULT; /* Nobody gets to see this, but.. */
1205 goto out_free_dentry;
1206 }
1207
Alexey Dobriyancc338012019-05-14 15:43:39 -07001208 if (interpreter) {
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -07001209 elf_entry = load_elf_interp(interp_elf_ex,
Andi Kleend20894a2008-02-08 04:21:54 -08001210 interpreter,
Dave Martinfe0f6762020-03-16 16:50:46 +00001211 load_bias, interp_elf_phdata,
1212 &arch_state);
Andi Kleend20894a2008-02-08 04:21:54 -08001213 if (!IS_ERR((void *)elf_entry)) {
1214 /*
1215 * load_elf_interp() returns relocation
1216 * adjustment
1217 */
1218 interp_load_addr = elf_entry;
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -07001219 elf_entry += interp_elf_ex->e_entry;
Jiri Kosinacc503c12008-01-30 13:31:07 +01001220 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 if (BAD_ADDR(elf_entry)) {
Chuck Ebbertce510592006-07-03 00:24:14 -07001222 retval = IS_ERR((void *)elf_entry) ?
1223 (int)elf_entry : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 goto out_free_dentry;
1225 }
1226 reloc_func_desc = interp_load_addr;
1227
1228 allow_write_access(interpreter);
1229 fput(interpreter);
Alexey Dobriyan0693ffe2020-04-06 20:11:29 -07001230
1231 kfree(interp_elf_ex);
Alexey Dobriyanaa0d1562020-04-06 20:11:32 -07001232 kfree(interp_elf_phdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 } else {
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001234 elf_entry = e_entry;
Suresh Siddha5342fba2006-02-26 04:18:28 +01001235 if (BAD_ADDR(elf_entry)) {
Chuck Ebbertce510592006-07-03 00:24:14 -07001236 retval = -EINVAL;
Suresh Siddha5342fba2006-02-26 04:18:28 +01001237 goto out_free_dentry;
1238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 }
1240
1241 kfree(elf_phdata);
1242
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 set_binfmt(&elf_format);
1244
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -07001245#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
Alexey Dobriyancc338012019-05-14 15:43:39 -07001246 retval = arch_setup_additional_pages(bprm, !!interpreter);
Al Viro19d860a2014-05-04 20:11:36 -04001247 if (retval < 0)
Roland McGrath18c8baff2005-04-28 15:17:19 -07001248 goto out;
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -07001249#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1250
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001251 retval = create_elf_tables(bprm, elf_ex,
1252 load_addr, interp_load_addr, e_entry);
Al Viro19d860a2014-05-04 20:11:36 -04001253 if (retval < 0)
Ollie Wildb6a2fea2007-07-19 01:48:16 -07001254 goto out;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001255
1256 mm = current->mm;
1257 mm->end_code = end_code;
1258 mm->start_code = start_code;
1259 mm->start_data = start_data;
1260 mm->end_data = end_data;
1261 mm->start_stack = bprm->p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262
Jiri Kosina4471a672011-04-14 15:22:09 -07001263 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
Kees Cookbbdc6072019-05-14 15:43:57 -07001264 /*
1265 * For architectures with ELF randomization, when executing
1266 * a loader directly (i.e. no interpreter listed in ELF
1267 * headers), move the brk area out of the mmap region
1268 * (since it grows up, and may collide early with the stack
1269 * growing down), and into the unused ELF_ET_DYN_BASE region.
1270 */
Kees Cook7be3cb02019-09-26 10:15:25 -07001271 if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001272 elf_ex->e_type == ET_DYN && !interpreter) {
1273 mm->brk = mm->start_brk = ELF_ET_DYN_BASE;
1274 }
Kees Cookbbdc6072019-05-14 15:43:57 -07001275
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001276 mm->brk = mm->start_brk = arch_randomize_brk(mm);
Kees Cook204db6e2015-04-14 15:48:12 -07001277#ifdef compat_brk_randomized
Jiri Kosina4471a672011-04-14 15:22:09 -07001278 current->brk_randomized = 1;
1279#endif
1280 }
Jiri Kosinac1d171a2008-01-30 13:30:40 +01001281
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 if (current->personality & MMAP_PAGE_ZERO) {
1283 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1284 and some applications "depend" upon this behavior.
1285 Since we do not have the power to recompile these, we
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001286 emulate the SVr4 behavior. Sigh. */
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001287 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 MAP_FIXED | MAP_PRIVATE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 }
1290
Alexey Dobriyan249b08e2019-05-14 15:43:54 -07001291 regs = current_pt_regs();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292#ifdef ELF_PLAT_INIT
1293 /*
1294 * The ABI may specify that certain registers be set up in special
1295 * ways (on i386 %edx is the address of a DT_FINI function, for
1296 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1297 * that the e_entry field is the address of the function descriptor
1298 * for the startup routine, rather than the address of the startup
1299 * routine itself. This macro performs whatever initialization to
1300 * the regs structure is required as well as any relocations to the
1301 * function descriptor entries when executing dynamically links apps.
1302 */
1303 ELF_PLAT_INIT(regs, reloc_func_desc);
1304#endif
1305
Kees Cookb8383832018-04-10 16:34:57 -07001306 finalize_exec(bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 start_thread(regs, elf_entry, bprm->p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 retval = 0;
1309out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 return retval;
1311
1312 /* error cleanup */
1313out_free_dentry:
Alexey Dobriyan0693ffe2020-04-06 20:11:29 -07001314 kfree(interp_elf_ex);
Paul Burtona9d9ef12014-09-11 08:30:15 +01001315 kfree(interp_elf_phdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 allow_write_access(interpreter);
1317 if (interpreter)
1318 fput(interpreter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319out_free_ph:
1320 kfree(elf_phdata);
1321 goto out;
1322}
1323
Josh Triplett69369a72014-04-03 14:48:27 -07001324#ifdef CONFIG_USELIB
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325/* This is really simpleminded and specialized - we are loading an
1326 a.out library that is given an ELF header. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327static int load_elf_library(struct file *file)
1328{
1329 struct elf_phdr *elf_phdata;
1330 struct elf_phdr *eppnt;
1331 unsigned long elf_bss, bss, len;
1332 int retval, error, i, j;
1333 struct elfhdr elf_ex;
1334
1335 error = -ENOEXEC;
Alexey Dobriyan658c0332019-12-04 16:52:25 -08001336 retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0);
1337 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 goto out;
1339
1340 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1341 goto out;
1342
1343 /* First of all, some simple consistency checks */
1344 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
Al Viro72c2d532013-09-22 16:27:52 -04001345 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 goto out;
Nicolas Pitre47552002017-08-16 16:05:13 -04001347 if (elf_check_fdpic(&elf_ex))
1348 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349
1350 /* Now read in all of the header information */
1351
1352 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1353 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1354
1355 error = -ENOMEM;
1356 elf_phdata = kmalloc(j, GFP_KERNEL);
1357 if (!elf_phdata)
1358 goto out;
1359
1360 eppnt = elf_phdata;
1361 error = -ENOEXEC;
Alexey Dobriyan658c0332019-12-04 16:52:25 -08001362 retval = elf_read(file, eppnt, j, elf_ex.e_phoff);
1363 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 goto out_free_ph;
1365
1366 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1367 if ((eppnt + i)->p_type == PT_LOAD)
1368 j++;
1369 if (j != 1)
1370 goto out_free_ph;
1371
1372 while (eppnt->p_type != PT_LOAD)
1373 eppnt++;
1374
1375 /* Now use mmap to map the library into memory. */
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001376 error = vm_mmap(file,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 ELF_PAGESTART(eppnt->p_vaddr),
1378 (eppnt->p_filesz +
1379 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1380 PROT_READ | PROT_WRITE | PROT_EXEC,
Michal Hocko4ed28632018-04-10 16:36:01 -07001381 MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_DENYWRITE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 (eppnt->p_offset -
1383 ELF_PAGEOFFSET(eppnt->p_vaddr)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1385 goto out_free_ph;
1386
1387 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1388 if (padzero(elf_bss)) {
1389 error = -EFAULT;
1390 goto out_free_ph;
1391 }
1392
Oscar Salvador24962af2018-07-13 16:59:13 -07001393 len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
1394 bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
Michal Hockoecc2bc82016-05-23 16:25:39 -07001395 if (bss > len) {
1396 error = vm_brk(len, bss - len);
Linus Torvalds5d22fc22016-05-27 15:57:31 -07001397 if (error)
Michal Hockoecc2bc82016-05-23 16:25:39 -07001398 goto out_free_ph;
1399 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 error = 0;
1401
1402out_free_ph:
1403 kfree(elf_phdata);
1404out:
1405 return error;
1406}
Josh Triplett69369a72014-04-03 14:48:27 -07001407#endif /* #ifdef CONFIG_USELIB */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
Christoph Hellwig698ba7b2009-12-15 16:47:37 -08001409#ifdef CONFIG_ELF_CORE
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410/*
1411 * ELF core dumper
1412 *
1413 * Modelled on fs/exec.c:aout_core_dump()
1414 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1415 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416
1417/*
Jason Baron909af762012-03-23 15:02:51 -07001418 * The purpose of always_dump_vma() is to make sure that special kernel mappings
1419 * that are useful for post-mortem analysis are included in every core dump.
1420 * In that way we ensure that the core dump is fully interpretable later
1421 * without matching up the same kernel and hardware config to see what PC values
1422 * meant. These special mappings include - vDSO, vsyscall, and other
1423 * architecture specific mappings
1424 */
1425static bool always_dump_vma(struct vm_area_struct *vma)
1426{
1427 /* Any vsyscall mappings? */
1428 if (vma == get_gate_vma(vma->vm_mm))
1429 return true;
Andy Lutomirski78d683e2014-05-19 15:58:32 -07001430
1431 /*
1432 * Assume that all vmas with a .name op should always be dumped.
1433 * If this changes, a new vm_ops field can easily be added.
1434 */
1435 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1436 return true;
1437
Jason Baron909af762012-03-23 15:02:51 -07001438 /*
1439 * arch_vma_name() returns non-NULL for special architecture mappings,
1440 * such as vDSO sections.
1441 */
1442 if (arch_vma_name(vma))
1443 return true;
1444
1445 return false;
1446}
1447
1448/*
Roland McGrath82df3972007-10-16 23:27:02 -07001449 * Decide what to dump of a segment, part, all or none.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 */
Roland McGrath82df3972007-10-16 23:27:02 -07001451static unsigned long vma_dump_size(struct vm_area_struct *vma,
1452 unsigned long mm_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453{
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001454#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1455
Jason Baron909af762012-03-23 15:02:51 -07001456 /* always dump the vdso and vsyscall sections */
1457 if (always_dump_vma(vma))
Roland McGrath82df3972007-10-16 23:27:02 -07001458 goto whole;
Roland McGrathe5b97dd2007-01-26 00:56:48 -08001459
Konstantin Khlebnikov0103bd12012-10-08 16:28:59 -07001460 if (vma->vm_flags & VM_DONTDUMP)
Jason Baronaccb61f2012-03-23 15:02:51 -07001461 return 0;
1462
Ross Zwisler50378352015-10-05 16:33:36 -06001463 /* support for DAX */
1464 if (vma_is_dax(vma)) {
1465 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1466 goto whole;
1467 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1468 goto whole;
1469 return 0;
1470 }
1471
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001472 /* Hugetlb memory check */
Anshuman Khandual03911132020-04-06 20:03:51 -07001473 if (is_vm_hugetlb_page(vma)) {
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001474 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1475 goto whole;
1476 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1477 goto whole;
Naoya Horiguchi23d9e482013-04-17 15:58:28 -07001478 return 0;
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001479 }
1480
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 /* Do not dump I/O mapped devices or special mappings */
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07001482 if (vma->vm_flags & VM_IO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 return 0;
1484
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001485 /* By default, dump shared memory if mapped from an anonymous file. */
1486 if (vma->vm_flags & VM_SHARED) {
Al Viro496ad9a2013-01-23 17:07:38 -05001487 if (file_inode(vma->vm_file)->i_nlink == 0 ?
Roland McGrath82df3972007-10-16 23:27:02 -07001488 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1489 goto whole;
1490 return 0;
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001491 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492
Roland McGrath82df3972007-10-16 23:27:02 -07001493 /* Dump segments that have been written to. */
1494 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1495 goto whole;
1496 if (vma->vm_file == NULL)
1497 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498
Roland McGrath82df3972007-10-16 23:27:02 -07001499 if (FILTER(MAPPED_PRIVATE))
1500 goto whole;
1501
1502 /*
1503 * If this looks like the beginning of a DSO or executable mapping,
1504 * check for an ELF header. If we find one, dump the first page to
1505 * aid in determining what was mapped here.
1506 */
Roland McGrath92dc07b2009-02-06 17:34:07 -08001507 if (FILTER(ELF_HEADERS) &&
1508 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
Roland McGrath82df3972007-10-16 23:27:02 -07001509 u32 __user *header = (u32 __user *) vma->vm_start;
1510 u32 word;
1511 /*
1512 * Doing it this way gets the constant folded by GCC.
1513 */
1514 union {
1515 u32 cmp;
1516 char elfmag[SELFMAG];
1517 } magic;
1518 BUILD_BUG_ON(SELFMAG != sizeof word);
1519 magic.elfmag[EI_MAG0] = ELFMAG0;
1520 magic.elfmag[EI_MAG1] = ELFMAG1;
1521 magic.elfmag[EI_MAG2] = ELFMAG2;
1522 magic.elfmag[EI_MAG3] = ELFMAG3;
Roland McGrath92dc07b2009-02-06 17:34:07 -08001523 if (unlikely(get_user(word, header)))
1524 word = 0;
Roland McGrath92dc07b2009-02-06 17:34:07 -08001525 if (word == magic.cmp)
Roland McGrath82df3972007-10-16 23:27:02 -07001526 return PAGE_SIZE;
1527 }
1528
1529#undef FILTER
1530
1531 return 0;
1532
1533whole:
1534 return vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535}
1536
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537/* An ELF note in memory */
1538struct memelfnote
1539{
1540 const char *name;
1541 int type;
1542 unsigned int datasz;
1543 void *data;
1544};
1545
1546static int notesize(struct memelfnote *en)
1547{
1548 int sz;
1549
1550 sz = sizeof(struct elf_note);
1551 sz += roundup(strlen(en->name) + 1, 4);
1552 sz += roundup(en->datasz, 4);
1553
1554 return sz;
1555}
1556
Al Viroecc8c772013-10-05 15:32:35 -04001557static int writenote(struct memelfnote *men, struct coredump_params *cprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558{
1559 struct elf_note en;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 en.n_namesz = strlen(men->name) + 1;
1561 en.n_descsz = men->datasz;
1562 en.n_type = men->type;
1563
Al Viroecc8c772013-10-05 15:32:35 -04001564 return dump_emit(cprm, &en, sizeof(en)) &&
Al Viro22a8cb82013-10-08 11:05:01 -04001565 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1566 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
Roland McGrath3aba4812008-01-30 13:31:44 +01001569static void fill_elf_header(struct elfhdr *elf, int segs,
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001570 u16 machine, u32 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571{
Cyrill Gorcunov6970c8e2008-04-29 01:01:18 -07001572 memset(elf, 0, sizeof(*elf));
1573
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1575 elf->e_ident[EI_CLASS] = ELF_CLASS;
1576 elf->e_ident[EI_DATA] = ELF_DATA;
1577 elf->e_ident[EI_VERSION] = EV_CURRENT;
1578 elf->e_ident[EI_OSABI] = ELF_OSABI;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579
1580 elf->e_type = ET_CORE;
Roland McGrath3aba4812008-01-30 13:31:44 +01001581 elf->e_machine = machine;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 elf->e_version = EV_CURRENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 elf->e_phoff = sizeof(struct elfhdr);
Roland McGrath3aba4812008-01-30 13:31:44 +01001584 elf->e_flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 elf->e_ehsize = sizeof(struct elfhdr);
1586 elf->e_phentsize = sizeof(struct elf_phdr);
1587 elf->e_phnum = segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588}
1589
Andrew Morton8d6b5eee2006-09-25 23:32:04 -07001590static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591{
1592 phdr->p_type = PT_NOTE;
1593 phdr->p_offset = offset;
1594 phdr->p_vaddr = 0;
1595 phdr->p_paddr = 0;
1596 phdr->p_filesz = sz;
1597 phdr->p_memsz = 0;
1598 phdr->p_flags = 0;
1599 phdr->p_align = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600}
1601
1602static void fill_note(struct memelfnote *note, const char *name, int type,
1603 unsigned int sz, void *data)
1604{
1605 note->name = name;
1606 note->type = type;
1607 note->datasz = sz;
1608 note->data = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609}
1610
1611/*
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001612 * fill up all the fields in prstatus from the given task struct, except
1613 * registers which need to be filled up separately.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 */
1615static void fill_prstatus(struct elf_prstatus *prstatus,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001616 struct task_struct *p, long signr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617{
1618 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1619 prstatus->pr_sigpend = p->pending.signal.sig[0];
1620 prstatus->pr_sighold = p->blocked.sig[0];
Oleg Nesterov3b34fc52009-06-17 16:27:38 -07001621 rcu_read_lock();
1622 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1623 rcu_read_unlock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001624 prstatus->pr_pid = task_pid_vnr(p);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001625 prstatus->pr_pgrp = task_pgrp_vnr(p);
1626 prstatus->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 if (thread_group_leader(p)) {
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001628 struct task_cputime cputime;
Frank Mayharf06febc2008-09-12 09:54:39 -07001629
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 /*
Frank Mayharf06febc2008-09-12 09:54:39 -07001631 * This is the record for the group leader. It shows the
1632 * group-wide total, not its individual thread total.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 */
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001634 thread_group_cputime(p, &cputime);
Arnd Bergmanne2bb80d2017-11-23 13:46:33 +01001635 prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime);
1636 prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 } else {
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001638 u64 utime, stime;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001639
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001640 task_cputime(p, &utime, &stime);
Arnd Bergmanne2bb80d2017-11-23 13:46:33 +01001641 prstatus->pr_utime = ns_to_kernel_old_timeval(utime);
1642 prstatus->pr_stime = ns_to_kernel_old_timeval(stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 }
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001644
Arnd Bergmanne2bb80d2017-11-23 13:46:33 +01001645 prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime);
1646 prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647}
1648
1649static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1650 struct mm_struct *mm)
1651{
David Howellsc69e8d92008-11-14 10:39:19 +11001652 const struct cred *cred;
Greg Kroah-Hartmana84a5052005-05-11 00:10:44 -07001653 unsigned int i, len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
1655 /* first copy the parameters from user space */
1656 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1657
1658 len = mm->arg_end - mm->arg_start;
1659 if (len >= ELF_PRARGSZ)
1660 len = ELF_PRARGSZ-1;
1661 if (copy_from_user(&psinfo->pr_psargs,
1662 (const char __user *)mm->arg_start, len))
1663 return -EFAULT;
1664 for(i = 0; i < len; i++)
1665 if (psinfo->pr_psargs[i] == 0)
1666 psinfo->pr_psargs[i] = ' ';
1667 psinfo->pr_psargs[len] = 0;
1668
Oleg Nesterov3b34fc52009-06-17 16:27:38 -07001669 rcu_read_lock();
1670 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1671 rcu_read_unlock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001672 psinfo->pr_pid = task_pid_vnr(p);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001673 psinfo->pr_pgrp = task_pgrp_vnr(p);
1674 psinfo->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675
1676 i = p->state ? ffz(~p->state) + 1 : 0;
1677 psinfo->pr_state = i;
Carsten Otte55148542006-03-25 03:08:22 -08001678 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1680 psinfo->pr_nice = task_nice(p);
1681 psinfo->pr_flag = p->flags;
David Howellsc69e8d92008-11-14 10:39:19 +11001682 rcu_read_lock();
1683 cred = __task_cred(p);
Eric W. Biedermanebc887b2012-02-07 18:36:10 -08001684 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1685 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
David Howellsc69e8d92008-11-14 10:39:19 +11001686 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1688
1689 return 0;
1690}
1691
Roland McGrath3aba4812008-01-30 13:31:44 +01001692static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1693{
1694 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1695 int i = 0;
1696 do
1697 i += 2;
1698 while (auxv[i - 2] != AT_NULL);
1699 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1700}
1701
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001702static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001703 const kernel_siginfo_t *siginfo)
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001704{
Eric W. Biedermanfa4751f2020-05-05 12:12:54 +02001705 copy_siginfo_to_external(csigdata, siginfo);
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001706 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1707}
1708
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001709#define MAX_FILE_NOTE_SIZE (4*1024*1024)
1710/*
1711 * Format of NT_FILE note:
1712 *
1713 * long count -- how many files are mapped
1714 * long page_size -- units for file_ofs
1715 * array of [COUNT] elements of
1716 * long start
1717 * long end
1718 * long file_ofs
1719 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1720 */
Dan Aloni72023652013-09-30 13:45:02 -07001721static int fill_files_note(struct memelfnote *note)
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001722{
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001723 struct mm_struct *mm = current->mm;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001724 struct vm_area_struct *vma;
1725 unsigned count, size, names_ofs, remaining, n;
1726 user_long_t *data;
1727 user_long_t *start_end_ofs;
1728 char *name_base, *name_curpos;
1729
1730 /* *Estimated* file count and total data size needed */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001731 count = mm->map_count;
Alexey Dobriyan60c9d922018-02-06 15:39:13 -08001732 if (count > UINT_MAX / 64)
1733 return -EINVAL;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001734 size = count * 64;
1735
1736 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1737 alloc:
1738 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
Dan Aloni72023652013-09-30 13:45:02 -07001739 return -EINVAL;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001740 size = round_up(size, PAGE_SIZE);
Alexey Dobriyan1fbede62020-01-30 22:17:10 -08001741 /*
1742 * "size" can be 0 here legitimately.
1743 * Let it ENOMEM and omit NT_FILE section which will be empty anyway.
1744 */
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07001745 data = kvmalloc(size, GFP_KERNEL);
1746 if (ZERO_OR_NULL_PTR(data))
Dan Aloni72023652013-09-30 13:45:02 -07001747 return -ENOMEM;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001748
1749 start_end_ofs = data + 2;
1750 name_base = name_curpos = ((char *)data) + names_ofs;
1751 remaining = size - names_ofs;
1752 count = 0;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001753 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001754 struct file *file;
1755 const char *filename;
1756
1757 file = vma->vm_file;
1758 if (!file)
1759 continue;
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02001760 filename = file_path(file, name_curpos, remaining);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001761 if (IS_ERR(filename)) {
1762 if (PTR_ERR(filename) == -ENAMETOOLONG) {
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07001763 kvfree(data);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001764 size = size * 5 / 4;
1765 goto alloc;
1766 }
1767 continue;
1768 }
1769
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02001770 /* file_path() fills at the end, move name down */
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001771 /* n = strlen(filename) + 1: */
1772 n = (name_curpos + remaining) - filename;
1773 remaining = filename - name_curpos;
1774 memmove(name_curpos, filename, n);
1775 name_curpos += n;
1776
1777 *start_end_ofs++ = vma->vm_start;
1778 *start_end_ofs++ = vma->vm_end;
1779 *start_end_ofs++ = vma->vm_pgoff;
1780 count++;
1781 }
1782
1783 /* Now we know exact count of files, can store it */
1784 data[0] = count;
1785 data[1] = PAGE_SIZE;
1786 /*
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001787 * Count usually is less than mm->map_count,
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001788 * we need to move filenames down.
1789 */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001790 n = mm->map_count - count;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001791 if (n != 0) {
1792 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1793 memmove(name_base - shift_bytes, name_base,
1794 name_curpos - name_base);
1795 name_curpos -= shift_bytes;
1796 }
1797
1798 size = name_curpos - (char *)data;
1799 fill_note(note, "CORE", NT_FILE, size, data);
Dan Aloni72023652013-09-30 13:45:02 -07001800 return 0;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001801}
1802
Roland McGrath4206d3a2008-01-30 13:31:45 +01001803#ifdef CORE_DUMP_USE_REGSET
1804#include <linux/regset.h>
1805
1806struct elf_thread_core_info {
1807 struct elf_thread_core_info *next;
1808 struct task_struct *task;
1809 struct elf_prstatus prstatus;
1810 struct memelfnote notes[0];
1811};
1812
1813struct elf_note_info {
1814 struct elf_thread_core_info *thread;
1815 struct memelfnote psinfo;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001816 struct memelfnote signote;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001817 struct memelfnote auxv;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001818 struct memelfnote files;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001819 user_siginfo_t csigdata;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001820 size_t size;
1821 int thread_notes;
1822};
1823
Roland McGrathd31472b2008-03-04 14:28:30 -08001824/*
1825 * When a regset has a writeback hook, we call it on each thread before
1826 * dumping user memory. On register window machines, this makes sure the
1827 * user memory backing the register data is up to date before we read it.
1828 */
1829static void do_thread_regset_writeback(struct task_struct *task,
1830 const struct user_regset *regset)
1831{
1832 if (regset->writeback)
1833 regset->writeback(task, regset, 1);
1834}
1835
H. J. Lu0953f65d2012-02-14 13:34:52 -08001836#ifndef PRSTATUS_SIZE
Dmitry Safonov90954e72016-09-05 16:33:06 +03001837#define PRSTATUS_SIZE(S, R) sizeof(S)
H. J. Lu0953f65d2012-02-14 13:34:52 -08001838#endif
1839
1840#ifndef SET_PR_FPVALID
Dmitry Safonov90954e72016-09-05 16:33:06 +03001841#define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V))
H. J. Lu0953f65d2012-02-14 13:34:52 -08001842#endif
1843
Roland McGrath4206d3a2008-01-30 13:31:45 +01001844static int fill_thread_core_info(struct elf_thread_core_info *t,
1845 const struct user_regset_view *view,
1846 long signr, size_t *total)
1847{
1848 unsigned int i;
Al Virob4e9c952020-06-01 19:42:40 -04001849 int regset0_size;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001850
1851 /*
1852 * NT_PRSTATUS is the one special case, because the regset data
1853 * goes into the pr_reg field inside the note contents, rather
1854 * than being the whole note contents. We fill the reset in here.
1855 * We assume that regset 0 is NT_PRSTATUS.
1856 */
1857 fill_prstatus(&t->prstatus, t->task, signr);
Al Virob4e9c952020-06-01 19:42:40 -04001858 regset0_size = regset_get(t->task, &view->regsets[0],
1859 sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg);
1860 if (regset0_size < 0)
1861 return 0;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001862
1863 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
Dave Martin27e64b42017-10-31 15:50:53 +00001864 PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001865 *total += notesize(&t->notes[0]);
1866
Roland McGrathd31472b2008-03-04 14:28:30 -08001867 do_thread_regset_writeback(t->task, &view->regsets[0]);
1868
Roland McGrath4206d3a2008-01-30 13:31:45 +01001869 /*
1870 * Each other regset might generate a note too. For each regset
1871 * that has no core_note_type or is inactive, we leave t->notes[i]
1872 * all zero and we'll know to skip writing it later.
1873 */
1874 for (i = 1; i < view->n; ++i) {
1875 const struct user_regset *regset = &view->regsets[i];
Al Virob4e9c952020-06-01 19:42:40 -04001876 int note_type = regset->core_note_type;
1877 bool is_fpreg = note_type == NT_PRFPREG;
1878 void *data;
1879 int ret;
1880
Roland McGrathd31472b2008-03-04 14:28:30 -08001881 do_thread_regset_writeback(t->task, regset);
Al Virob4e9c952020-06-01 19:42:40 -04001882 if (!note_type) // not for coredumps
1883 continue;
1884 if (regset->active && regset->active(t->task, regset) <= 0)
1885 continue;
1886
1887 ret = regset_get_alloc(t->task, regset, ~0U, &data);
1888 if (ret < 0)
1889 continue;
1890
1891 if (is_fpreg)
1892 SET_PR_FPVALID(&t->prstatus, 1, regset0_size);
1893
1894 fill_note(&t->notes[i], is_fpreg ? "CORE" : "LINUX",
1895 note_type, ret, data);
1896
1897 *total += notesize(&t->notes[i]);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001898 }
1899
1900 return 1;
1901}
1902
1903static int fill_note_info(struct elfhdr *elf, int phdrs,
1904 struct elf_note_info *info,
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001905 const kernel_siginfo_t *siginfo, struct pt_regs *regs)
Roland McGrath4206d3a2008-01-30 13:31:45 +01001906{
1907 struct task_struct *dump_task = current;
1908 const struct user_regset_view *view = task_user_regset_view(dump_task);
1909 struct elf_thread_core_info *t;
1910 struct elf_prpsinfo *psinfo;
Oleg Nesterov83914442008-07-25 01:47:45 -07001911 struct core_thread *ct;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001912 unsigned int i;
1913
1914 info->size = 0;
1915 info->thread = NULL;
1916
1917 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
Alan Cox6899e922012-12-17 16:02:09 -08001918 if (psinfo == NULL) {
1919 info->psinfo.data = NULL; /* So we don't free this wrongly */
Roland McGrath4206d3a2008-01-30 13:31:45 +01001920 return 0;
Alan Cox6899e922012-12-17 16:02:09 -08001921 }
Roland McGrath4206d3a2008-01-30 13:31:45 +01001922
Amerigo Wange2dbe122009-07-01 01:06:26 -04001923 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1924
Roland McGrath4206d3a2008-01-30 13:31:45 +01001925 /*
1926 * Figure out how many notes we're going to need for each thread.
1927 */
1928 info->thread_notes = 0;
1929 for (i = 0; i < view->n; ++i)
1930 if (view->regsets[i].core_note_type != 0)
1931 ++info->thread_notes;
1932
1933 /*
1934 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1935 * since it is our one special case.
1936 */
1937 if (unlikely(info->thread_notes == 0) ||
1938 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1939 WARN_ON(1);
1940 return 0;
1941 }
1942
1943 /*
1944 * Initialize the ELF file header.
1945 */
1946 fill_elf_header(elf, phdrs,
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001947 view->e_machine, view->e_flags);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001948
1949 /*
1950 * Allocate a structure for each thread.
1951 */
Oleg Nesterov83914442008-07-25 01:47:45 -07001952 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1953 t = kzalloc(offsetof(struct elf_thread_core_info,
1954 notes[info->thread_notes]),
1955 GFP_KERNEL);
1956 if (unlikely(!t))
1957 return 0;
Oleg Nesterov24d52882008-07-25 01:47:40 -07001958
Oleg Nesterov83914442008-07-25 01:47:45 -07001959 t->task = ct->task;
1960 if (ct->task == dump_task || !info->thread) {
1961 t->next = info->thread;
1962 info->thread = t;
1963 } else {
1964 /*
1965 * Make sure to keep the original task at
1966 * the head of the list.
1967 */
1968 t->next = info->thread->next;
1969 info->thread->next = t;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001970 }
Oleg Nesterov83914442008-07-25 01:47:45 -07001971 }
Roland McGrath4206d3a2008-01-30 13:31:45 +01001972
1973 /*
1974 * Now fill in each thread's information.
1975 */
1976 for (t = info->thread; t != NULL; t = t->next)
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07001977 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001978 return 0;
1979
1980 /*
1981 * Fill in the two process-wide notes.
1982 */
1983 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1984 info->size += notesize(&info->psinfo);
1985
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001986 fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
1987 info->size += notesize(&info->signote);
1988
Roland McGrath4206d3a2008-01-30 13:31:45 +01001989 fill_auxv_note(&info->auxv, current->mm);
1990 info->size += notesize(&info->auxv);
1991
Dan Aloni72023652013-09-30 13:45:02 -07001992 if (fill_files_note(&info->files) == 0)
1993 info->size += notesize(&info->files);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001994
Roland McGrath4206d3a2008-01-30 13:31:45 +01001995 return 1;
1996}
1997
1998static size_t get_note_info_size(struct elf_note_info *info)
1999{
2000 return info->size;
2001}
2002
2003/*
2004 * Write all the notes for each thread. When writing the first thread, the
2005 * process-wide notes are interleaved after the first thread-specific note.
2006 */
2007static int write_note_info(struct elf_note_info *info,
Al Viroecc8c772013-10-05 15:32:35 -04002008 struct coredump_params *cprm)
Roland McGrath4206d3a2008-01-30 13:31:45 +01002009{
Fabian Frederickb219e252014-06-04 16:12:14 -07002010 bool first = true;
Roland McGrath4206d3a2008-01-30 13:31:45 +01002011 struct elf_thread_core_info *t = info->thread;
2012
2013 do {
2014 int i;
2015
Al Viroecc8c772013-10-05 15:32:35 -04002016 if (!writenote(&t->notes[0], cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01002017 return 0;
2018
Al Viroecc8c772013-10-05 15:32:35 -04002019 if (first && !writenote(&info->psinfo, cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01002020 return 0;
Al Viroecc8c772013-10-05 15:32:35 -04002021 if (first && !writenote(&info->signote, cprm))
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07002022 return 0;
Al Viroecc8c772013-10-05 15:32:35 -04002023 if (first && !writenote(&info->auxv, cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01002024 return 0;
Dan Aloni72023652013-09-30 13:45:02 -07002025 if (first && info->files.data &&
Al Viroecc8c772013-10-05 15:32:35 -04002026 !writenote(&info->files, cprm))
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002027 return 0;
Roland McGrath4206d3a2008-01-30 13:31:45 +01002028
2029 for (i = 1; i < info->thread_notes; ++i)
2030 if (t->notes[i].data &&
Al Viroecc8c772013-10-05 15:32:35 -04002031 !writenote(&t->notes[i], cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01002032 return 0;
2033
Fabian Frederickb219e252014-06-04 16:12:14 -07002034 first = false;
Roland McGrath4206d3a2008-01-30 13:31:45 +01002035 t = t->next;
2036 } while (t);
2037
2038 return 1;
2039}
2040
2041static void free_note_info(struct elf_note_info *info)
2042{
2043 struct elf_thread_core_info *threads = info->thread;
2044 while (threads) {
2045 unsigned int i;
2046 struct elf_thread_core_info *t = threads;
2047 threads = t->next;
2048 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
2049 for (i = 1; i < info->thread_notes; ++i)
2050 kfree(t->notes[i].data);
2051 kfree(t);
2052 }
2053 kfree(info->psinfo.data);
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002054 kvfree(info->files.data);
Roland McGrath4206d3a2008-01-30 13:31:45 +01002055}
2056
2057#else
2058
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059/* Here is the structure in which status of each thread is captured. */
2060struct elf_thread_status
2061{
2062 struct list_head list;
2063 struct elf_prstatus prstatus; /* NT_PRSTATUS */
2064 elf_fpregset_t fpu; /* NT_PRFPREG */
2065 struct task_struct *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 struct memelfnote notes[3];
2067 int num_notes;
2068};
2069
2070/*
2071 * In order to add the specific thread information for the elf file format,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002072 * we need to keep a linked list of every threads pr_status and then create
2073 * a single section for them in the final core file.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 */
2075static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
2076{
2077 int sz = 0;
2078 struct task_struct *p = t->thread;
2079 t->num_notes = 0;
2080
2081 fill_prstatus(&t->prstatus, p, signr);
2082 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
2083
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002084 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
2085 &(t->prstatus));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 t->num_notes++;
2087 sz += notesize(&t->notes[0]);
2088
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002089 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
2090 &t->fpu))) {
2091 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
2092 &(t->fpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 t->num_notes++;
2094 sz += notesize(&t->notes[1]);
2095 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 return sz;
2097}
2098
Roland McGrath3aba4812008-01-30 13:31:44 +01002099struct elf_note_info {
2100 struct memelfnote *notes;
Dan Aloni72023652013-09-30 13:45:02 -07002101 struct memelfnote *notes_files;
Roland McGrath3aba4812008-01-30 13:31:44 +01002102 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
2103 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
2104 struct list_head thread_list;
2105 elf_fpregset_t *fpu;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07002106 user_siginfo_t csigdata;
Roland McGrath3aba4812008-01-30 13:31:44 +01002107 int thread_status_size;
2108 int numnote;
2109};
2110
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002111static int elf_note_info_init(struct elf_note_info *info)
Roland McGrath3aba4812008-01-30 13:31:44 +01002112{
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002113 memset(info, 0, sizeof(*info));
Roland McGrath3aba4812008-01-30 13:31:44 +01002114 INIT_LIST_HEAD(&info->thread_list);
2115
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07002116 /* Allocate space for ELF notes */
Kees Cook6da2ec52018-06-12 13:55:00 -07002117 info->notes = kmalloc_array(8, sizeof(struct memelfnote), GFP_KERNEL);
Roland McGrath3aba4812008-01-30 13:31:44 +01002118 if (!info->notes)
2119 return 0;
2120 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
2121 if (!info->psinfo)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10002122 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01002123 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
2124 if (!info->prstatus)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10002125 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01002126 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
2127 if (!info->fpu)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10002128 return 0;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002129 return 1;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002130}
Roland McGrath3aba4812008-01-30 13:31:44 +01002131
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002132static int fill_note_info(struct elfhdr *elf, int phdrs,
2133 struct elf_note_info *info,
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02002134 const kernel_siginfo_t *siginfo, struct pt_regs *regs)
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002135{
Al Viroafabada2013-10-14 07:39:56 -04002136 struct core_thread *ct;
2137 struct elf_thread_status *ets;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002138
2139 if (!elf_note_info_init(info))
2140 return 0;
2141
Al Viroafabada2013-10-14 07:39:56 -04002142 for (ct = current->mm->core_state->dumper.next;
2143 ct; ct = ct->next) {
2144 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
2145 if (!ets)
2146 return 0;
Oleg Nesterov24d52882008-07-25 01:47:40 -07002147
Al Viroafabada2013-10-14 07:39:56 -04002148 ets->thread = ct->task;
2149 list_add(&ets->list, &info->thread_list);
2150 }
Oleg Nesterov83914442008-07-25 01:47:45 -07002151
Alexey Dobriyan93f044e2019-03-07 16:28:59 -08002152 list_for_each_entry(ets, &info->thread_list, list) {
Al Viroafabada2013-10-14 07:39:56 -04002153 int sz;
Oleg Nesterov83914442008-07-25 01:47:45 -07002154
Al Viroafabada2013-10-14 07:39:56 -04002155 sz = elf_dump_thread_status(siginfo->si_signo, ets);
2156 info->thread_status_size += sz;
Roland McGrath3aba4812008-01-30 13:31:44 +01002157 }
2158 /* now collect the dump for the current */
2159 memset(info->prstatus, 0, sizeof(*info->prstatus));
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07002160 fill_prstatus(info->prstatus, current, siginfo->si_signo);
Roland McGrath3aba4812008-01-30 13:31:44 +01002161 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
2162
2163 /* Set up header */
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08002164 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
Roland McGrath3aba4812008-01-30 13:31:44 +01002165
2166 /*
2167 * Set up the notes in similar form to SVR4 core dumps made
2168 * with info from their /proc.
2169 */
2170
2171 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2172 sizeof(*info->prstatus), info->prstatus);
2173 fill_psinfo(info->psinfo, current->group_leader, current->mm);
2174 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2175 sizeof(*info->psinfo), info->psinfo);
2176
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002177 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
2178 fill_auxv_note(info->notes + 3, current->mm);
Dan Aloni72023652013-09-30 13:45:02 -07002179 info->numnote = 4;
Roland McGrath3aba4812008-01-30 13:31:44 +01002180
Dan Aloni72023652013-09-30 13:45:02 -07002181 if (fill_files_note(info->notes + info->numnote) == 0) {
2182 info->notes_files = info->notes + info->numnote;
2183 info->numnote++;
2184 }
Roland McGrath3aba4812008-01-30 13:31:44 +01002185
2186 /* Try to dump the FPU. */
2187 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
2188 info->fpu);
2189 if (info->prstatus->pr_fpvalid)
2190 fill_note(info->notes + info->numnote++,
2191 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
Roland McGrath3aba4812008-01-30 13:31:44 +01002192 return 1;
Roland McGrath3aba4812008-01-30 13:31:44 +01002193}
2194
2195static size_t get_note_info_size(struct elf_note_info *info)
2196{
2197 int sz = 0;
2198 int i;
2199
2200 for (i = 0; i < info->numnote; i++)
2201 sz += notesize(info->notes + i);
2202
2203 sz += info->thread_status_size;
2204
2205 return sz;
2206}
2207
2208static int write_note_info(struct elf_note_info *info,
Al Viroecc8c772013-10-05 15:32:35 -04002209 struct coredump_params *cprm)
Roland McGrath3aba4812008-01-30 13:31:44 +01002210{
Alexey Dobriyan93f044e2019-03-07 16:28:59 -08002211 struct elf_thread_status *ets;
Roland McGrath3aba4812008-01-30 13:31:44 +01002212 int i;
Roland McGrath3aba4812008-01-30 13:31:44 +01002213
2214 for (i = 0; i < info->numnote; i++)
Al Viroecc8c772013-10-05 15:32:35 -04002215 if (!writenote(info->notes + i, cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002216 return 0;
2217
2218 /* write out the thread status notes section */
Alexey Dobriyan93f044e2019-03-07 16:28:59 -08002219 list_for_each_entry(ets, &info->thread_list, list) {
2220 for (i = 0; i < ets->num_notes; i++)
2221 if (!writenote(&ets->notes[i], cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002222 return 0;
2223 }
2224
2225 return 1;
2226}
2227
2228static void free_note_info(struct elf_note_info *info)
2229{
2230 while (!list_empty(&info->thread_list)) {
2231 struct list_head *tmp = info->thread_list.next;
2232 list_del(tmp);
2233 kfree(list_entry(tmp, struct elf_thread_status, list));
2234 }
2235
Dan Aloni72023652013-09-30 13:45:02 -07002236 /* Free data possibly allocated by fill_files_note(): */
2237 if (info->notes_files)
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002238 kvfree(info->notes_files->data);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002239
Roland McGrath3aba4812008-01-30 13:31:44 +01002240 kfree(info->prstatus);
2241 kfree(info->psinfo);
2242 kfree(info->notes);
2243 kfree(info->fpu);
Roland McGrath3aba4812008-01-30 13:31:44 +01002244}
2245
Roland McGrath4206d3a2008-01-30 13:31:45 +01002246#endif
2247
Roland McGrathf47aef52007-01-26 00:56:49 -08002248static struct vm_area_struct *first_vma(struct task_struct *tsk,
2249 struct vm_area_struct *gate_vma)
2250{
2251 struct vm_area_struct *ret = tsk->mm->mmap;
2252
2253 if (ret)
2254 return ret;
2255 return gate_vma;
2256}
2257/*
2258 * Helper function for iterating across a vma list. It ensures that the caller
2259 * will visit `gate_vma' prior to terminating the search.
2260 */
2261static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
2262 struct vm_area_struct *gate_vma)
2263{
2264 struct vm_area_struct *ret;
2265
2266 ret = this_vma->vm_next;
2267 if (ret)
2268 return ret;
2269 if (this_vma == gate_vma)
2270 return NULL;
2271 return gate_vma;
2272}
2273
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002274static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2275 elf_addr_t e_shoff, int segs)
2276{
2277 elf->e_shoff = e_shoff;
2278 elf->e_shentsize = sizeof(*shdr4extnum);
2279 elf->e_shnum = 1;
2280 elf->e_shstrndx = SHN_UNDEF;
2281
2282 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2283
2284 shdr4extnum->sh_type = SHT_NULL;
2285 shdr4extnum->sh_size = elf->e_shnum;
2286 shdr4extnum->sh_link = elf->e_shstrndx;
2287 shdr4extnum->sh_info = segs;
2288}
2289
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290/*
2291 * Actual dumper
2292 *
2293 * This is a two-pass process; first we find the offsets of the bits,
2294 * and then they are actually written out. If we run out of core limit
2295 * we just truncate.
2296 */
Masami Hiramatsuf6151df2009-12-17 15:27:16 -08002297static int elf_core_dump(struct coredump_params *cprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 int has_dumped = 0;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002300 int segs, i;
2301 size_t vma_data_size = 0;
Roland McGrathf47aef52007-01-26 00:56:49 -08002302 struct vm_area_struct *vma, *gate_vma;
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002303 struct elfhdr elf;
Al Virocdc3d562013-10-05 22:24:29 -04002304 loff_t offset = 0, dataoff;
Dan Aloni72023652013-09-30 13:45:02 -07002305 struct elf_note_info info = { };
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002306 struct elf_phdr *phdr4note = NULL;
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002307 struct elf_shdr *shdr4extnum = NULL;
2308 Elf_Half e_phnum;
2309 elf_addr_t e_shoff;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002310 elf_addr_t *vma_filesz = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311
2312 /*
2313 * We no longer stop all VM operations.
2314 *
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002315 * This is because those proceses that could possibly change map_count
2316 * or the mmap / vma pages are now blocked in do_exit on current
2317 * finishing this core dump.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 *
2319 * Only ptrace can touch these memory addresses, but it doesn't change
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002320 * the map_count or the pages allocated. So no possibility of crashing
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 * exists while dumping the mm->vm_next areas to the core file.
2322 */
2323
KAMEZAWA Hiroyuki341c87b2009-06-30 11:41:23 -07002324 /*
2325 * The number of segs are recored into ELF header as 16bit value.
2326 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2327 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 segs = current->mm->map_count;
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002329 segs += elf_core_extra_phdrs();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330
Stephen Wilson31db58b2011-03-13 15:49:15 -04002331 gate_vma = get_gate_vma(current->mm);
Roland McGrathf47aef52007-01-26 00:56:49 -08002332 if (gate_vma != NULL)
2333 segs++;
2334
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002335 /* for notes section */
2336 segs++;
2337
2338 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2339 * this, kernel supports extended numbering. Have a look at
2340 * include/linux/elf.h for further information. */
2341 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2342
Roland McGrath3aba4812008-01-30 13:31:44 +01002343 /*
2344 * Collect all the non-memory information about the process for the
2345 * notes. This also sets up the file header.
2346 */
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002347 if (!fill_note_info(&elf, e_phnum, &info, cprm->siginfo, cprm->regs))
Christoph Hellwigd2530b42020-05-05 12:12:55 +02002348 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349
2350 has_dumped = 1;
Oleg Nesterov079148b2013-04-30 15:28:16 -07002351
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002352 offset += sizeof(elf); /* Elf header */
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002353 offset += segs * sizeof(struct elf_phdr); /* Program headers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354
2355 /* Write notes phdr entry */
2356 {
Roland McGrath3aba4812008-01-30 13:31:44 +01002357 size_t sz = get_note_info_size(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358
Michael Ellermane5501492007-09-19 14:38:12 +10002359 sz += elf_coredump_extra_notes_size();
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002360
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002361 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2362 if (!phdr4note)
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -08002363 goto end_coredump;
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002364
2365 fill_elf_note_phdr(phdr4note, sz, offset);
2366 offset += sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 }
2368
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2370
Alexey Dobriyan1fbede62020-01-30 22:17:10 -08002371 /*
2372 * Zero vma process will get ZERO_SIZE_PTR here.
2373 * Let coredump continue for register state at least.
2374 */
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002375 vma_filesz = kvmalloc(array_size(sizeof(*vma_filesz), (segs - 1)),
2376 GFP_KERNEL);
Alexey Dobriyan1fbede62020-01-30 22:17:10 -08002377 if (!vma_filesz)
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002378 goto end_coredump;
2379
2380 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2381 vma = next_vma(vma, gate_vma)) {
2382 unsigned long dump_size;
2383
2384 dump_size = vma_dump_size(vma, cprm->mm_flags);
2385 vma_filesz[i++] = dump_size;
2386 vma_data_size += dump_size;
2387 }
2388
2389 offset += vma_data_size;
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002390 offset += elf_core_extra_data_size();
2391 e_shoff = offset;
2392
2393 if (e_phnum == PN_XNUM) {
2394 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2395 if (!shdr4extnum)
2396 goto end_coredump;
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002397 fill_extnum_info(&elf, shdr4extnum, e_shoff, segs);
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002398 }
2399
2400 offset = dataoff;
2401
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002402 if (!dump_emit(cprm, &elf, sizeof(elf)))
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002403 goto end_coredump;
2404
Al Viroecc8c772013-10-05 15:32:35 -04002405 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002406 goto end_coredump;
2407
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 /* Write program headers for segments dump */
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002409 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
Roland McGrathf47aef52007-01-26 00:56:49 -08002410 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411 struct elf_phdr phdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412
2413 phdr.p_type = PT_LOAD;
2414 phdr.p_offset = offset;
2415 phdr.p_vaddr = vma->vm_start;
2416 phdr.p_paddr = 0;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002417 phdr.p_filesz = vma_filesz[i++];
Roland McGrath82df3972007-10-16 23:27:02 -07002418 phdr.p_memsz = vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 offset += phdr.p_filesz;
2420 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002421 if (vma->vm_flags & VM_WRITE)
2422 phdr.p_flags |= PF_W;
2423 if (vma->vm_flags & VM_EXEC)
2424 phdr.p_flags |= PF_X;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 phdr.p_align = ELF_EXEC_PAGESIZE;
2426
Al Viroecc8c772013-10-05 15:32:35 -04002427 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -08002428 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 }
2430
Al Viro506f21c2013-10-05 17:22:57 -04002431 if (!elf_core_write_extra_phdrs(cprm, offset))
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002432 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433
2434 /* write out the notes section */
Al Viroecc8c772013-10-05 15:32:35 -04002435 if (!write_note_info(&info, cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002436 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437
Al Virocdc3d562013-10-05 22:24:29 -04002438 if (elf_coredump_extra_notes_write(cprm))
Michael Ellermane5501492007-09-19 14:38:12 +10002439 goto end_coredump;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002440
Andi Kleend025c9d2006-09-30 23:29:28 -07002441 /* Align to page */
Mateusz Guzik1607f092016-06-05 23:14:14 +02002442 if (!dump_skip(cprm, dataoff - cprm->pos))
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002443 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002445 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
Roland McGrathf47aef52007-01-26 00:56:49 -08002446 vma = next_vma(vma, gate_vma)) {
Jann Hornafc63a97b2020-10-15 20:12:46 -07002447 if (!dump_user_range(cprm, vma->vm_start, vma_filesz[i++]))
2448 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 }
Dave Kleikamp4d22c752017-01-11 13:25:00 -06002450 dump_truncate(cprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451
Al Viroaa3e7ea2013-10-05 17:50:15 -04002452 if (!elf_core_write_extra_data(cprm))
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002453 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002455 if (e_phnum == PN_XNUM) {
Al Viro13046ec2013-10-05 18:08:47 -04002456 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002457 goto end_coredump;
2458 }
2459
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460end_coredump:
Roland McGrath3aba4812008-01-30 13:31:44 +01002461 free_note_info(&info);
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002462 kfree(shdr4extnum);
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002463 kvfree(vma_filesz);
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002464 kfree(phdr4note);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 return has_dumped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466}
2467
Christoph Hellwig698ba7b2009-12-15 16:47:37 -08002468#endif /* CONFIG_ELF_CORE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469
2470static int __init init_elf_binfmt(void)
2471{
Al Viro8fc3dc52012-03-17 03:05:16 -04002472 register_binfmt(&elf_format);
2473 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474}
2475
2476static void __exit exit_elf_binfmt(void)
2477{
2478 /* Remove the COFF and ELF loaders. */
2479 unregister_binfmt(&elf_format);
2480}
2481
2482core_initcall(init_elf_binfmt);
2483module_exit(exit_elf_binfmt);
2484MODULE_LICENSE("GPL");