blob: 13f25e241ac46cbd2f5ffa23de45e60a035a0c1a [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/fs/binfmt_elf.c
4 *
5 * These are the functions used to load ELF format executables as used
6 * on SVr4 machines. Information on the format may be found in the book
7 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
8 * Tools".
9 *
10 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/mm.h>
17#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/errno.h>
19#include <linux/signal.h>
20#include <linux/binfmts.h>
21#include <linux/string.h>
22#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/personality.h>
25#include <linux/elfcore.h>
26#include <linux/init.h>
27#include <linux/highuid.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/compiler.h>
29#include <linux/highmem.h>
Anshuman Khandual03911132020-04-06 20:03:51 -070030#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/pagemap.h>
Denys Vlasenko2aa362c2012-10-04 17:15:36 -070032#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/security.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/random.h>
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070035#include <linux/elf.h>
Kees Cookd1fd8362015-04-14 15:48:07 -070036#include <linux/elf-randomize.h>
Alexey Dobriyan7e80d0d2007-05-08 00:28:59 -070037#include <linux/utsname.h>
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -080038#include <linux/coredump.h>
Frederic Weisbecker6fac4822012-11-13 14:20:55 +010039#include <linux/sched.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +010040#include <linux/sched/coredump.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010041#include <linux/sched/task_stack.h>
Ingo Molnar32ef5512017-02-05 11:48:36 +010042#include <linux/sched/cputime.h>
Ingo Molnar5b825c32017-02-02 17:54:15 +010043#include <linux/cred.h>
Ross Zwisler50378352015-10-05 16:33:36 -060044#include <linux/dax.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080045#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <asm/param.h>
47#include <asm/page.h>
48
Denys Vlasenko2aa362c2012-10-04 17:15:36 -070049#ifndef user_long_t
50#define user_long_t long
51#endif
Denys Vlasenko49ae4d42012-10-04 17:15:35 -070052#ifndef user_siginfo_t
53#define user_siginfo_t siginfo_t
54#endif
55
Nicolas Pitre47552002017-08-16 16:05:13 -040056/* That's for binfmt_elf_fdpic to deal with */
57#ifndef elf_check_fdpic
58#define elf_check_fdpic(ex) false
59#endif
60
Al Viro71613c32012-10-20 22:00:48 -040061static int load_elf_binary(struct linux_binprm *bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Josh Triplett69369a72014-04-03 14:48:27 -070063#ifdef CONFIG_USELIB
64static int load_elf_library(struct file *);
65#else
66#define load_elf_library NULL
67#endif
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069/*
70 * If we don't support core dumping, then supply a NULL so we
71 * don't even try.
72 */
Christoph Hellwig698ba7b2009-12-15 16:47:37 -080073#ifdef CONFIG_ELF_CORE
Masami Hiramatsuf6151df2009-12-17 15:27:16 -080074static int elf_core_dump(struct coredump_params *cprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#else
76#define elf_core_dump NULL
77#endif
78
79#if ELF_EXEC_PAGESIZE > PAGE_SIZE
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070080#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#else
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070082#define ELF_MIN_ALIGN PAGE_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#endif
84
85#ifndef ELF_CORE_EFLAGS
86#define ELF_CORE_EFLAGS 0
87#endif
88
89#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
90#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
91#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
92
93static struct linux_binfmt elf_format = {
Mikael Petterssonf670d0e2011-01-12 17:00:02 -080094 .module = THIS_MODULE,
95 .load_binary = load_elf_binary,
96 .load_shlib = load_elf_library,
97 .core_dump = elf_core_dump,
98 .min_coredump = ELF_EXEC_PAGESIZE,
Linus Torvalds1da177e2005-04-16 15:20:36 -070099};
100
Alexey Dobriyan18676ff2020-01-30 22:17:01 -0800101#define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800103static int set_brk(unsigned long start, unsigned long end, int prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
105 start = ELF_PAGEALIGN(start);
106 end = ELF_PAGEALIGN(end);
107 if (end > start) {
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800108 /*
109 * Map the last of the bss segment.
110 * If the header is requesting these pages to be
111 * executable, honour that (ppc32 needs this).
112 */
113 int error = vm_brk_flags(start, end - start,
114 prot & PROT_EXEC ? VM_EXEC : 0);
Linus Torvalds5d22fc22016-05-27 15:57:31 -0700115 if (error)
116 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 }
118 current->mm->start_brk = current->mm->brk = end;
119 return 0;
120}
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122/* We need to explicitly zero any fractional pages
123 after the data section (i.e. bss). This would
124 contain the junk from the file that should not
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700125 be in memory
126 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127static int padzero(unsigned long elf_bss)
128{
129 unsigned long nbyte;
130
131 nbyte = ELF_PAGEOFFSET(elf_bss);
132 if (nbyte) {
133 nbyte = ELF_MIN_ALIGN - nbyte;
134 if (clear_user((void __user *) elf_bss, nbyte))
135 return -EFAULT;
136 }
137 return 0;
138}
139
Ohad Ben-Cohen09c6dd32008-02-03 18:05:15 +0200140/* Let's use some macros to make this stack manipulation a little clearer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141#ifdef CONFIG_STACK_GROWSUP
142#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
143#define STACK_ROUND(sp, items) \
144 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700145#define STACK_ALLOC(sp, len) ({ \
146 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
147 old_sp; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148#else
149#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
150#define STACK_ROUND(sp, items) \
151 (((unsigned long) (sp - items)) &~ 15UL)
152#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
153#endif
154
Nathan Lynch483fad12008-07-22 04:48:46 +1000155#ifndef ELF_BASE_PLATFORM
156/*
157 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
158 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
159 * will be copied to the user stack in the same manner as AT_PLATFORM.
160 */
161#define ELF_BASE_PLATFORM NULL
162#endif
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164static int
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800165create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
166 unsigned long load_addr, unsigned long interp_load_addr,
167 unsigned long e_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168{
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800169 struct mm_struct *mm = current->mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 unsigned long p = bprm->p;
171 int argc = bprm->argc;
172 int envc = bprm->envc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 elf_addr_t __user *sp;
174 elf_addr_t __user *u_platform;
Nathan Lynch483fad12008-07-22 04:48:46 +1000175 elf_addr_t __user *u_base_platform;
Kees Cookf06295b2009-01-07 18:08:52 -0800176 elf_addr_t __user *u_rand_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 const char *k_platform = ELF_PLATFORM;
Nathan Lynch483fad12008-07-22 04:48:46 +1000178 const char *k_base_platform = ELF_BASE_PLATFORM;
Kees Cookf06295b2009-01-07 18:08:52 -0800179 unsigned char k_rand_bytes[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 int items;
181 elf_addr_t *elf_info;
Alexey Dobriyan1f83d802020-01-30 22:16:50 -0800182 int ei_index;
David Howells86a264a2008-11-14 10:39:18 +1100183 const struct cred *cred = current_cred();
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700184 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
186 /*
Franck Bui-Huud68c9d62007-10-16 23:30:24 -0700187 * In some cases (e.g. Hyper-Threading), we want to avoid L1
188 * evictions by the processes running on the same package. One
189 * thing we can do is to shuffle the initial stack for them.
190 */
191
192 p = arch_align_stack(p);
193
194 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 * If this architecture has a platform capability string, copy it
196 * to userspace. In some cases (Sparc), this info is impossible
197 * for userspace to get any other way, in others (i386) it is
198 * merely difficult.
199 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 u_platform = NULL;
201 if (k_platform) {
202 size_t len = strlen(k_platform) + 1;
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
205 if (__copy_to_user(u_platform, k_platform, len))
206 return -EFAULT;
207 }
208
Nathan Lynch483fad12008-07-22 04:48:46 +1000209 /*
210 * If this architecture has a "base" platform capability
211 * string, copy it to userspace.
212 */
213 u_base_platform = NULL;
214 if (k_base_platform) {
215 size_t len = strlen(k_base_platform) + 1;
216
217 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
218 if (__copy_to_user(u_base_platform, k_base_platform, len))
219 return -EFAULT;
220 }
221
Kees Cookf06295b2009-01-07 18:08:52 -0800222 /*
223 * Generate 16 random bytes for userspace PRNG seeding.
224 */
225 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
226 u_rand_bytes = (elf_addr_t __user *)
227 STACK_ALLOC(p, sizeof(k_rand_bytes));
228 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
229 return -EFAULT;
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 /* Create the ELF interpreter info */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800232 elf_info = (elf_addr_t *)mm->saved_auxv;
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700233 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234#define NEW_AUX_ENT(id, val) \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700235 do { \
Alexey Dobriyan1f83d802020-01-30 22:16:50 -0800236 *elf_info++ = id; \
237 *elf_info++ = val; \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700238 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
240#ifdef ARCH_DLINFO
241 /*
242 * ARCH_DLINFO must come first so PPC can do its special alignment of
243 * AUXV.
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700244 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
245 * ARCH_DLINFO changes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 */
247 ARCH_DLINFO;
248#endif
249 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
250 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
251 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
252 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700253 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
255 NEW_AUX_ENT(AT_BASE, interp_load_addr);
256 NEW_AUX_ENT(AT_FLAGS, 0);
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800257 NEW_AUX_ENT(AT_ENTRY, e_entry);
Eric W. Biedermanebc887b2012-02-07 18:36:10 -0800258 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
259 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
260 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
261 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
Kees Cookc425e182017-07-18 15:25:22 -0700262 NEW_AUX_ENT(AT_SECURE, bprm->secureexec);
Kees Cookf06295b2009-01-07 18:08:52 -0800263 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
Michael Neuling21713642013-04-17 17:33:11 +0000264#ifdef ELF_HWCAP2
265 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
266#endif
John Reiser65191082008-07-21 14:21:32 -0700267 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 if (k_platform) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700269 NEW_AUX_ENT(AT_PLATFORM,
Jesper Juhl785d5572006-06-23 02:05:35 -0700270 (elf_addr_t)(unsigned long)u_platform);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 }
Nathan Lynch483fad12008-07-22 04:48:46 +1000272 if (k_base_platform) {
273 NEW_AUX_ENT(AT_BASE_PLATFORM,
274 (elf_addr_t)(unsigned long)u_base_platform);
275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
Jesper Juhl785d5572006-06-23 02:05:35 -0700277 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 }
279#undef NEW_AUX_ENT
280 /* AT_NULL is zero; clear the rest too */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800281 memset(elf_info, 0, (char *)mm->saved_auxv +
282 sizeof(mm->saved_auxv) - (char *)elf_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* And advance past the AT_NULL entry. */
Alexey Dobriyan1f83d802020-01-30 22:16:50 -0800285 elf_info += 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800287 ei_index = elf_info - (elf_addr_t *)mm->saved_auxv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 sp = STACK_ADD(p, ei_index);
289
Andi Kleend20894a2008-02-08 04:21:54 -0800290 items = (argc + 1) + (envc + 1) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 bprm->p = STACK_ROUND(sp, items);
292
293 /* Point sp at the lowest address on the stack */
294#ifdef CONFIG_STACK_GROWSUP
295 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700296 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297#else
298 sp = (elf_addr_t __user *)bprm->p;
299#endif
300
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700301
302 /*
303 * Grow the stack manually; some architectures have a limit on how
304 * far ahead a user-space access may be in order to grow the stack.
305 */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800306 vma = find_extend_vma(mm, bprm->p);
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700307 if (!vma)
308 return -EFAULT;
309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
311 if (__put_user(argc, sp++))
312 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
Kees Cook67c67772017-07-10 15:52:54 -0700314 /* Populate list of argv pointers back to argv strings. */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800315 p = mm->arg_end = mm->arg_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 while (argc-- > 0) {
317 size_t len;
Kees Cook67c67772017-07-10 15:52:54 -0700318 if (__put_user((elf_addr_t)p, sp++))
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800319 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700320 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
321 if (!len || len > MAX_ARG_STRLEN)
WANG Cong23c49712008-05-08 21:52:33 +0800322 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 p += len;
324 }
Kees Cook67c67772017-07-10 15:52:54 -0700325 if (__put_user(0, sp++))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 return -EFAULT;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800327 mm->arg_end = p;
Kees Cook67c67772017-07-10 15:52:54 -0700328
329 /* Populate list of envp pointers back to envp strings. */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800330 mm->env_end = mm->env_start = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 while (envc-- > 0) {
332 size_t len;
Kees Cook67c67772017-07-10 15:52:54 -0700333 if (__put_user((elf_addr_t)p, sp++))
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800334 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700335 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
336 if (!len || len > MAX_ARG_STRLEN)
WANG Cong23c49712008-05-08 21:52:33 +0800337 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 p += len;
339 }
Kees Cook67c67772017-07-10 15:52:54 -0700340 if (__put_user(0, sp++))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 return -EFAULT;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800342 mm->env_end = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344 /* Put the elf_info on the stack in the right place. */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800345 if (copy_to_user(sp, mm->saved_auxv, ei_index * sizeof(elf_addr_t)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 return -EFAULT;
347 return 0;
348}
349
James Hoganc07380b2011-05-09 10:58:40 +0100350#ifndef elf_map
351
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352static unsigned long elf_map(struct file *filep, unsigned long addr,
Alexey Dobriyan49ac9812019-03-07 16:29:03 -0800353 const struct elf_phdr *eppnt, int prot, int type,
Jiri Kosinacc503c12008-01-30 13:31:07 +0100354 unsigned long total_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
356 unsigned long map_addr;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100357 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
358 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
359 addr = ELF_PAGESTART(addr);
360 size = ELF_PAGEALIGN(size);
Jan Kratochvil60bfba72007-07-15 23:40:06 -0700361
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700362 /* mmap() will return -EINVAL if given a zero size, but a
363 * segment with zero filesize is perfectly valid */
Jiri Kosinacc503c12008-01-30 13:31:07 +0100364 if (!size)
365 return addr;
366
Jiri Kosinacc503c12008-01-30 13:31:07 +0100367 /*
368 * total_size is the size of the ELF (interpreter) image.
369 * The _first_ mmap needs to know the full size, otherwise
370 * randomization might put this image into an overlapping
371 * position with the ELF binary image. (since size < total_size)
372 * So we first map the 'big' image - and unmap the remainder at
373 * the end. (which unmap is needed for ELF images with holes.)
374 */
375 if (total_size) {
376 total_size = ELF_PAGEALIGN(total_size);
Al Viro5a5e4c22012-05-30 01:49:38 -0400377 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100378 if (!BAD_ADDR(map_addr))
Al Viro5a5e4c22012-05-30 01:49:38 -0400379 vm_munmap(map_addr+size, total_size-size);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100380 } else
Al Viro5a5e4c22012-05-30 01:49:38 -0400381 map_addr = vm_mmap(filep, addr, size, prot, type, off);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100382
Tetsuo Handad23a61e2018-04-20 14:56:13 -0700383 if ((type & MAP_FIXED_NOREPLACE) &&
384 PTR_ERR((void *)map_addr) == -EEXIST)
385 pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n",
386 task_pid_nr(current), current->comm, (void *)addr);
Michal Hocko4ed28632018-04-10 16:36:01 -0700387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 return(map_addr);
389}
390
James Hoganc07380b2011-05-09 10:58:40 +0100391#endif /* !elf_map */
392
Alexey Dobriyan49ac9812019-03-07 16:29:03 -0800393static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr)
Jiri Kosinacc503c12008-01-30 13:31:07 +0100394{
395 int i, first_idx = -1, last_idx = -1;
396
397 for (i = 0; i < nr; i++) {
398 if (cmds[i].p_type == PT_LOAD) {
399 last_idx = i;
400 if (first_idx == -1)
401 first_idx = i;
402 }
403 }
404 if (first_idx == -1)
405 return 0;
406
407 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
408 ELF_PAGESTART(cmds[first_idx].p_vaddr);
409}
410
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800411static int elf_read(struct file *file, void *buf, size_t len, loff_t pos)
412{
413 ssize_t rv;
414
415 rv = kernel_read(file, buf, len, &pos);
416 if (unlikely(rv != len)) {
417 return (rv < 0) ? rv : -EIO;
418 }
419 return 0;
420}
421
Paul Burton6a8d3892014-09-11 08:30:14 +0100422/**
423 * load_elf_phdrs() - load ELF program headers
424 * @elf_ex: ELF header of the binary whose program headers should be loaded
425 * @elf_file: the opened ELF binary file
426 *
427 * Loads ELF program headers from the binary file elf_file, which has the ELF
428 * header pointed to by elf_ex, into a newly allocated array. The caller is
429 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
430 */
Alexey Dobriyan49ac9812019-03-07 16:29:03 -0800431static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
Paul Burton6a8d3892014-09-11 08:30:14 +0100432 struct file *elf_file)
433{
434 struct elf_phdr *elf_phdata = NULL;
Alexey Dobriyanfaf1c312019-03-07 16:28:56 -0800435 int retval, err = -1;
Alexey Dobriyanfaf1c312019-03-07 16:28:56 -0800436 unsigned int size;
Paul Burton6a8d3892014-09-11 08:30:14 +0100437
438 /*
439 * If the size of this structure has changed, then punt, since
440 * we will be doing the wrong thing.
441 */
442 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
443 goto out;
444
445 /* Sanity check the number of program headers... */
Paul Burton6a8d3892014-09-11 08:30:14 +0100446 /* ...and their total size. */
447 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
Alexey Dobriyanfaf1c312019-03-07 16:28:56 -0800448 if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN)
Paul Burton6a8d3892014-09-11 08:30:14 +0100449 goto out;
450
451 elf_phdata = kmalloc(size, GFP_KERNEL);
452 if (!elf_phdata)
453 goto out;
454
455 /* Read in the program headers */
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800456 retval = elf_read(elf_file, elf_phdata, size, elf_ex->e_phoff);
457 if (retval < 0) {
458 err = retval;
Paul Burton6a8d3892014-09-11 08:30:14 +0100459 goto out;
460 }
461
462 /* Success! */
463 err = 0;
464out:
465 if (err) {
466 kfree(elf_phdata);
467 elf_phdata = NULL;
468 }
469 return elf_phdata;
470}
Jiri Kosinacc503c12008-01-30 13:31:07 +0100471
Paul Burton774c1052014-09-11 08:30:16 +0100472#ifndef CONFIG_ARCH_BINFMT_ELF_STATE
473
474/**
475 * struct arch_elf_state - arch-specific ELF loading state
476 *
477 * This structure is used to preserve architecture specific data during
478 * the loading of an ELF file, throughout the checking of architecture
479 * specific ELF headers & through to the point where the ELF load is
480 * known to be proceeding (ie. SET_PERSONALITY).
481 *
482 * This implementation is a dummy for architectures which require no
483 * specific state.
484 */
485struct arch_elf_state {
486};
487
488#define INIT_ARCH_ELF_STATE {}
489
490/**
491 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
492 * @ehdr: The main ELF header
493 * @phdr: The program header to check
494 * @elf: The open ELF file
495 * @is_interp: True if the phdr is from the interpreter of the ELF being
496 * loaded, else false.
497 * @state: Architecture-specific state preserved throughout the process
498 * of loading the ELF.
499 *
500 * Inspects the program header phdr to validate its correctness and/or
501 * suitability for the system. Called once per ELF program header in the
502 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
503 * interpreter.
504 *
505 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
506 * with that return code.
507 */
508static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
509 struct elf_phdr *phdr,
510 struct file *elf, bool is_interp,
511 struct arch_elf_state *state)
512{
513 /* Dummy implementation, always proceed */
514 return 0;
515}
516
517/**
Maciej W. Rozycki54d157142015-10-26 15:47:57 +0000518 * arch_check_elf() - check an ELF executable
Paul Burton774c1052014-09-11 08:30:16 +0100519 * @ehdr: The main ELF header
520 * @has_interp: True if the ELF has an interpreter, else false.
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000521 * @interp_ehdr: The interpreter's ELF header
Paul Burton774c1052014-09-11 08:30:16 +0100522 * @state: Architecture-specific state preserved throughout the process
523 * of loading the ELF.
524 *
525 * Provides a final opportunity for architecture code to reject the loading
526 * of the ELF & cause an exec syscall to return an error. This is called after
527 * all program headers to be checked by arch_elf_pt_proc have been.
528 *
529 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
530 * with that return code.
531 */
532static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000533 struct elfhdr *interp_ehdr,
Paul Burton774c1052014-09-11 08:30:16 +0100534 struct arch_elf_state *state)
535{
536 /* Dummy implementation, always proceed */
537 return 0;
538}
539
540#endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
Alexey Dobriyand8e7cb32019-05-14 15:43:51 -0700542static inline int make_prot(u32 p_flags)
543{
544 int prot = 0;
545
546 if (p_flags & PF_R)
547 prot |= PROT_READ;
548 if (p_flags & PF_W)
549 prot |= PROT_WRITE;
550 if (p_flags & PF_X)
551 prot |= PROT_EXEC;
552 return prot;
553}
554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555/* This is much more generalized than the library routine read function,
556 so we keep this separate. Technically the library read function
557 is only provided so that we can read a.out libraries that have
558 an ELF header */
559
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700560static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
Alexey Dobriyan81696d52019-12-04 16:52:22 -0800561 struct file *interpreter,
Paul Burtona9d9ef12014-09-11 08:30:15 +0100562 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 struct elf_phdr *eppnt;
565 unsigned long load_addr = 0;
566 int load_addr_set = 0;
567 unsigned long last_bss = 0, elf_bss = 0;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800568 int bss_prot = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 unsigned long error = ~0UL;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100570 unsigned long total_size;
Paul Burton6a8d3892014-09-11 08:30:14 +0100571 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
573 /* First of all, some simple consistency checks */
574 if (interp_elf_ex->e_type != ET_EXEC &&
575 interp_elf_ex->e_type != ET_DYN)
576 goto out;
Nicolas Pitre47552002017-08-16 16:05:13 -0400577 if (!elf_check_arch(interp_elf_ex) ||
578 elf_check_fdpic(interp_elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 goto out;
Al Viro72c2d532013-09-22 16:27:52 -0400580 if (!interpreter->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 goto out;
582
Paul Burtona9d9ef12014-09-11 08:30:15 +0100583 total_size = total_mapping_size(interp_elf_phdata,
584 interp_elf_ex->e_phnum);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100585 if (!total_size) {
586 error = -EINVAL;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100587 goto out;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100588 }
589
Paul Burtona9d9ef12014-09-11 08:30:15 +0100590 eppnt = interp_elf_phdata;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700591 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
592 if (eppnt->p_type == PT_LOAD) {
593 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
Alexey Dobriyand8e7cb32019-05-14 15:43:51 -0700594 int elf_prot = make_prot(eppnt->p_flags);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700595 unsigned long vaddr = 0;
596 unsigned long k, map_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700598 vaddr = eppnt->p_vaddr;
599 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
Michal Hocko4ed28632018-04-10 16:36:01 -0700600 elf_type |= MAP_FIXED_NOREPLACE;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100601 else if (no_base && interp_elf_ex->e_type == ET_DYN)
602 load_addr = -vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700604 map_addr = elf_map(interpreter, load_addr + vaddr,
Andrew Mortonbb1ad822008-01-30 13:31:07 +0100605 eppnt, elf_prot, elf_type, total_size);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100606 total_size = 0;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700607 error = map_addr;
608 if (BAD_ADDR(map_addr))
Paul Burtona9d9ef12014-09-11 08:30:15 +0100609 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700611 if (!load_addr_set &&
612 interp_elf_ex->e_type == ET_DYN) {
613 load_addr = map_addr - ELF_PAGESTART(vaddr);
614 load_addr_set = 1;
615 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700617 /*
618 * Check to see if the section's size will overflow the
619 * allowed task size. Note that p_filesz must always be
620 * <= p_memsize so it's only necessary to check p_memsz.
621 */
622 k = load_addr + eppnt->p_vaddr;
Chuck Ebbertce510592006-07-03 00:24:14 -0700623 if (BAD_ADDR(k) ||
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700624 eppnt->p_filesz > eppnt->p_memsz ||
625 eppnt->p_memsz > TASK_SIZE ||
626 TASK_SIZE - eppnt->p_memsz < k) {
627 error = -ENOMEM;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100628 goto out;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700629 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700631 /*
632 * Find the end of the file mapping for this phdr, and
633 * keep track of the largest address we see for this.
634 */
635 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
636 if (k > elf_bss)
637 elf_bss = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700639 /*
640 * Do the same thing for the memory mapping - between
641 * elf_bss and last_bss is the bss section.
642 */
Kees Cook0036d1f2016-08-02 14:04:51 -0700643 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800644 if (k > last_bss) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700645 last_bss = k;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800646 bss_prot = elf_prot;
647 }
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700648 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 }
650
Kees Cook0036d1f2016-08-02 14:04:51 -0700651 /*
652 * Now fill out the bss section: first pad the last page from
653 * the file up to the page boundary, and zero it from elf_bss
654 * up to the end of the page.
655 */
656 if (padzero(elf_bss)) {
657 error = -EFAULT;
658 goto out;
659 }
660 /*
661 * Next, align both the file and mem bss up to the page size,
662 * since this is where elf_bss was just zeroed up to, and where
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800663 * last_bss will end after the vm_brk_flags() below.
Kees Cook0036d1f2016-08-02 14:04:51 -0700664 */
665 elf_bss = ELF_PAGEALIGN(elf_bss);
666 last_bss = ELF_PAGEALIGN(last_bss);
667 /* Finally, if there is still more bss to allocate, do it. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 if (last_bss > elf_bss) {
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800669 error = vm_brk_flags(elf_bss, last_bss - elf_bss,
670 bss_prot & PROT_EXEC ? VM_EXEC : 0);
Linus Torvalds5d22fc22016-05-27 15:57:31 -0700671 if (error)
Paul Burtona9d9ef12014-09-11 08:30:15 +0100672 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 }
674
Jiri Kosinacc503c12008-01-30 13:31:07 +0100675 error = load_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676out:
677 return error;
678}
679
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680/*
681 * These are the functions used to load ELF style executables and shared
682 * libraries. There is no binary dependent code anywhere else.
683 */
684
Al Viro71613c32012-10-20 22:00:48 -0400685static int load_elf_binary(struct linux_binprm *bprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686{
687 struct file *interpreter = NULL; /* to shut gcc up */
688 unsigned long load_addr = 0, load_bias = 0;
689 int load_addr_set = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 unsigned long error;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100691 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 unsigned long elf_bss, elf_brk;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800693 int bss_prot = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 int retval, i;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100695 unsigned long elf_entry;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800696 unsigned long e_entry;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100697 unsigned long interp_load_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 unsigned long start_code, end_code, start_data, end_data;
David Daney1a530a62011-03-22 16:34:48 -0700699 unsigned long reloc_func_desc __maybe_unused = 0;
David Rientjes8de61e62006-12-06 20:40:16 -0800700 int executable_stack = EXSTACK_DEFAULT;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800701 struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf;
Alexey Dobriyan0693ffe2020-04-06 20:11:29 -0700702 struct elfhdr *interp_elf_ex = NULL;
Paul Burton774c1052014-09-11 08:30:16 +0100703 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800704 struct mm_struct *mm;
Alexey Dobriyan249b08e2019-05-14 15:43:54 -0700705 struct pt_regs *regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 retval = -ENOEXEC;
708 /* First of all, some simple consistency checks */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800709 if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 goto out;
711
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800712 if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 goto out;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800714 if (!elf_check_arch(elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 goto out;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800716 if (elf_check_fdpic(elf_ex))
Nicolas Pitre47552002017-08-16 16:05:13 -0400717 goto out;
Al Viro72c2d532013-09-22 16:27:52 -0400718 if (!bprm->file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 goto out;
720
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800721 elf_phdata = load_elf_phdrs(elf_ex, bprm->file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 if (!elf_phdata)
723 goto out;
724
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 elf_ppnt = elf_phdata;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800726 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) {
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700727 char *elf_interpreter;
Alexey Dobriyan5cf4a362019-05-14 15:43:36 -0700728
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700729 if (elf_ppnt->p_type != PT_INTERP)
730 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700732 /*
733 * This is the program interpreter used for shared libraries -
734 * for now assume that this is an a.out format binary.
735 */
736 retval = -ENOEXEC;
737 if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2)
738 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700740 retval = -ENOMEM;
741 elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL);
742 if (!elf_interpreter)
743 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800745 retval = elf_read(bprm->file, elf_interpreter, elf_ppnt->p_filesz,
746 elf_ppnt->p_offset);
747 if (retval < 0)
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700748 goto out_free_interp;
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700749 /* make sure path is NULL terminated */
750 retval = -ENOEXEC;
751 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
752 goto out_free_interp;
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800753
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700754 interpreter = open_exec(elf_interpreter);
755 kfree(elf_interpreter);
756 retval = PTR_ERR(interpreter);
757 if (IS_ERR(interpreter))
758 goto out_free_ph;
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800759
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700760 /*
761 * If the binary is not readable then enforce mm->dumpable = 0
762 * regardless of the interpreter's permissions.
763 */
764 would_dump(bprm, interpreter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
Alexey Dobriyan0693ffe2020-04-06 20:11:29 -0700766 interp_elf_ex = kmalloc(sizeof(*interp_elf_ex), GFP_KERNEL);
767 if (!interp_elf_ex) {
768 retval = -ENOMEM;
769 goto out_free_ph;
770 }
771
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700772 /* Get the exec headers */
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700773 retval = elf_read(interpreter, interp_elf_ex,
774 sizeof(*interp_elf_ex), 0);
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800775 if (retval < 0)
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700776 goto out_free_dentry;
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700777
778 break;
Alexey Dobriyancc338012019-05-14 15:43:39 -0700779
780out_free_interp:
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700781 kfree(elf_interpreter);
782 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 }
784
785 elf_ppnt = elf_phdata;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800786 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++)
Paul Burton774c1052014-09-11 08:30:16 +0100787 switch (elf_ppnt->p_type) {
788 case PT_GNU_STACK:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 if (elf_ppnt->p_flags & PF_X)
790 executable_stack = EXSTACK_ENABLE_X;
791 else
792 executable_stack = EXSTACK_DISABLE_X;
793 break;
Paul Burton774c1052014-09-11 08:30:16 +0100794
795 case PT_LOPROC ... PT_HIPROC:
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800796 retval = arch_elf_pt_proc(elf_ex, elf_ppnt,
Paul Burton774c1052014-09-11 08:30:16 +0100797 bprm->file, false,
798 &arch_state);
799 if (retval)
800 goto out_free_dentry;
801 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
804 /* Some simple consistency checks for the interpreter */
Alexey Dobriyancc338012019-05-14 15:43:39 -0700805 if (interpreter) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 retval = -ELIBBAD;
Andi Kleend20894a2008-02-08 04:21:54 -0800807 /* Not an ELF interpreter */
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700808 if (memcmp(interp_elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 /* Verify the interpreter has a valid arch */
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700811 if (!elf_check_arch(interp_elf_ex) ||
812 elf_check_fdpic(interp_elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 goto out_free_dentry;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100814
815 /* Load the interpreter program headers */
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700816 interp_elf_phdata = load_elf_phdrs(interp_elf_ex,
Paul Burtona9d9ef12014-09-11 08:30:15 +0100817 interpreter);
818 if (!interp_elf_phdata)
819 goto out_free_dentry;
Paul Burton774c1052014-09-11 08:30:16 +0100820
821 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
822 elf_ppnt = interp_elf_phdata;
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700823 for (i = 0; i < interp_elf_ex->e_phnum; i++, elf_ppnt++)
Paul Burton774c1052014-09-11 08:30:16 +0100824 switch (elf_ppnt->p_type) {
825 case PT_LOPROC ... PT_HIPROC:
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700826 retval = arch_elf_pt_proc(interp_elf_ex,
Paul Burton774c1052014-09-11 08:30:16 +0100827 elf_ppnt, interpreter,
828 true, &arch_state);
829 if (retval)
830 goto out_free_dentry;
831 break;
832 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 }
834
Paul Burton774c1052014-09-11 08:30:16 +0100835 /*
836 * Allow arch code to reject the ELF at this point, whilst it's
837 * still possible to return an error to the code that invoked
838 * the exec syscall.
839 */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800840 retval = arch_check_elf(elf_ex,
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700841 !!interpreter, interp_elf_ex,
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000842 &arch_state);
Paul Burton774c1052014-09-11 08:30:16 +0100843 if (retval)
844 goto out_free_dentry;
845
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 /* Flush all traces of the currently running executable */
847 retval = flush_old_exec(bprm);
848 if (retval)
849 goto out_free_dentry;
850
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
852 may depend on the personality. */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800853 SET_PERSONALITY2(*elf_ex, &arch_state);
854 if (elf_read_implies_exec(*elf_ex, executable_stack))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 current->personality |= READ_IMPLIES_EXEC;
856
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700857 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 current->flags |= PF_RANDOMIZE;
Linus Torvalds221af7f2010-01-28 22:14:42 -0800859
860 setup_new_exec(bprm);
Linus Torvalds9f834ec2016-08-22 16:41:46 -0700861 install_exec_creds(bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
863 /* Do this so that we can load the interpreter, if need be. We will
864 change some of these later */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
866 executable_stack);
Al Viro19d860a2014-05-04 20:11:36 -0400867 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
Alexey Dobriyan852643162019-05-14 15:43:48 -0700870 elf_bss = 0;
871 elf_brk = 0;
872
873 start_code = ~0UL;
874 end_code = 0;
875 start_data = 0;
876 end_data = 0;
877
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200878 /* Now we do a little grungy work by mmapping the ELF image into
Jiri Kosinacc503c12008-01-30 13:31:07 +0100879 the correct location in memory. */
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700880 for(i = 0, elf_ppnt = elf_phdata;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800881 i < elf_ex->e_phnum; i++, elf_ppnt++) {
Linus Torvaldsb2129212019-10-06 13:53:27 -0700882 int elf_prot, elf_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 unsigned long k, vaddr;
Michael Davidsona87938b2015-04-14 15:47:38 -0700884 unsigned long total_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885
886 if (elf_ppnt->p_type != PT_LOAD)
887 continue;
888
889 if (unlikely (elf_brk > elf_bss)) {
890 unsigned long nbyte;
891
892 /* There was a PT_LOAD segment with p_memsz > p_filesz
893 before this one. Map anonymous pages, if needed,
894 and clear the area. */
Mikael Petterssonf670d0e2011-01-12 17:00:02 -0800895 retval = set_brk(elf_bss + load_bias,
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800896 elf_brk + load_bias,
897 bss_prot);
Al Viro19d860a2014-05-04 20:11:36 -0400898 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 nbyte = ELF_PAGEOFFSET(elf_bss);
901 if (nbyte) {
902 nbyte = ELF_MIN_ALIGN - nbyte;
903 if (nbyte > elf_brk - elf_bss)
904 nbyte = elf_brk - elf_bss;
905 if (clear_user((void __user *)elf_bss +
906 load_bias, nbyte)) {
907 /*
908 * This bss-zeroing can fail if the ELF
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700909 * file specifies odd protections. So
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 * we don't check the return value
911 */
912 }
913 }
914 }
915
Alexey Dobriyand8e7cb32019-05-14 15:43:51 -0700916 elf_prot = make_prot(elf_ppnt->p_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700918 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919
920 vaddr = elf_ppnt->p_vaddr;
Kees Cookeab09532017-07-10 15:52:37 -0700921 /*
922 * If we are loading ET_EXEC or we have already performed
923 * the ET_DYN load_addr calculations, proceed normally.
924 */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800925 if (elf_ex->e_type == ET_EXEC || load_addr_set) {
Linus Torvaldsb2129212019-10-06 13:53:27 -0700926 elf_flags |= MAP_FIXED;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800927 } else if (elf_ex->e_type == ET_DYN) {
Kees Cookeab09532017-07-10 15:52:37 -0700928 /*
929 * This logic is run once for the first LOAD Program
930 * Header for ET_DYN binaries to calculate the
931 * randomization (load_bias) for all the LOAD
932 * Program Headers, and to calculate the entire
933 * size of the ELF mapping (total_size). (Note that
934 * load_addr_set is set to true later once the
935 * initial mapping is performed.)
936 *
937 * There are effectively two types of ET_DYN
938 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
939 * and loaders (ET_DYN without INTERP, since they
940 * _are_ the ELF interpreter). The loaders must
941 * be loaded away from programs since the program
942 * may otherwise collide with the loader (especially
943 * for ET_EXEC which does not have a randomized
944 * position). For example to handle invocations of
945 * "./ld.so someprog" to test out a new version of
946 * the loader, the subsequent program that the
947 * loader loads must avoid the loader itself, so
948 * they cannot share the same load range. Sufficient
949 * room for the brk must be allocated with the
950 * loader as well, since brk must be available with
951 * the loader.
952 *
953 * Therefore, programs are loaded offset from
954 * ELF_ET_DYN_BASE and loaders are loaded into the
955 * independently randomized mmap region (0 load_bias
956 * without MAP_FIXED).
957 */
Alexey Dobriyancc338012019-05-14 15:43:39 -0700958 if (interpreter) {
Kees Cookeab09532017-07-10 15:52:37 -0700959 load_bias = ELF_ET_DYN_BASE;
960 if (current->flags & PF_RANDOMIZE)
961 load_bias += arch_mmap_rnd();
Linus Torvaldsb2129212019-10-06 13:53:27 -0700962 elf_flags |= MAP_FIXED;
Kees Cookeab09532017-07-10 15:52:37 -0700963 } else
964 load_bias = 0;
965
966 /*
967 * Since load_bias is used for all subsequent loading
968 * calculations, we must lower it by the first vaddr
969 * so that the remaining calculations based on the
970 * ELF vaddrs will be correctly offset. The result
971 * is then page aligned.
972 */
973 load_bias = ELF_PAGESTART(load_bias - vaddr);
974
Michael Davidsona87938b2015-04-14 15:47:38 -0700975 total_size = total_mapping_size(elf_phdata,
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800976 elf_ex->e_phnum);
Michael Davidsona87938b2015-04-14 15:47:38 -0700977 if (!total_size) {
Andrew Morton2b1d3ae2015-05-28 15:44:24 -0700978 retval = -EINVAL;
Michael Davidsona87938b2015-04-14 15:47:38 -0700979 goto out_free_dentry;
980 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 }
982
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700983 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
Michael Davidsona87938b2015-04-14 15:47:38 -0700984 elf_prot, elf_flags, total_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 if (BAD_ADDR(error)) {
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -0700986 retval = IS_ERR((void *)error) ?
987 PTR_ERR((void*)error) : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 goto out_free_dentry;
989 }
990
991 if (!load_addr_set) {
992 load_addr_set = 1;
993 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800994 if (elf_ex->e_type == ET_DYN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 load_bias += error -
996 ELF_PAGESTART(load_bias + vaddr);
997 load_addr += load_bias;
998 reloc_func_desc = load_bias;
999 }
1000 }
1001 k = elf_ppnt->p_vaddr;
Alexey Dobriyanf67ef442020-01-30 22:16:52 -08001002 if ((elf_ppnt->p_flags & PF_X) && k < start_code)
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001003 start_code = k;
1004 if (start_data < k)
1005 start_data = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
1007 /*
1008 * Check to see if the section's size will overflow the
1009 * allowed task size. Note that p_filesz must always be
1010 * <= p_memsz so it is only necessary to check p_memsz.
1011 */
Chuck Ebbertce510592006-07-03 00:24:14 -07001012 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 elf_ppnt->p_memsz > TASK_SIZE ||
1014 TASK_SIZE - elf_ppnt->p_memsz < k) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001015 /* set_brk can never work. Avoid overflows. */
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -07001016 retval = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 goto out_free_dentry;
1018 }
1019
1020 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1021
1022 if (k > elf_bss)
1023 elf_bss = k;
1024 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1025 end_code = k;
1026 if (end_data < k)
1027 end_data = k;
1028 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001029 if (k > elf_brk) {
1030 bss_prot = elf_prot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 elf_brk = k;
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001032 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 }
1034
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001035 e_entry = elf_ex->e_entry + load_bias;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 elf_bss += load_bias;
1037 elf_brk += load_bias;
1038 start_code += load_bias;
1039 end_code += load_bias;
1040 start_data += load_bias;
1041 end_data += load_bias;
1042
1043 /* Calling set_brk effectively mmaps the pages that we need
1044 * for the bss and break sections. We must do this before
1045 * mapping in the interpreter, to make sure it doesn't wind
1046 * up getting placed where the bss needs to go.
1047 */
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001048 retval = set_brk(elf_bss, elf_brk, bss_prot);
Al Viro19d860a2014-05-04 20:11:36 -04001049 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 goto out_free_dentry;
akpm@osdl.org6de50512005-10-11 08:29:08 -07001051 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 retval = -EFAULT; /* Nobody gets to see this, but.. */
1053 goto out_free_dentry;
1054 }
1055
Alexey Dobriyancc338012019-05-14 15:43:39 -07001056 if (interpreter) {
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -07001057 elf_entry = load_elf_interp(interp_elf_ex,
Andi Kleend20894a2008-02-08 04:21:54 -08001058 interpreter,
Paul Burtona9d9ef12014-09-11 08:30:15 +01001059 load_bias, interp_elf_phdata);
Andi Kleend20894a2008-02-08 04:21:54 -08001060 if (!IS_ERR((void *)elf_entry)) {
1061 /*
1062 * load_elf_interp() returns relocation
1063 * adjustment
1064 */
1065 interp_load_addr = elf_entry;
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -07001066 elf_entry += interp_elf_ex->e_entry;
Jiri Kosinacc503c12008-01-30 13:31:07 +01001067 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 if (BAD_ADDR(elf_entry)) {
Chuck Ebbertce510592006-07-03 00:24:14 -07001069 retval = IS_ERR((void *)elf_entry) ?
1070 (int)elf_entry : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 goto out_free_dentry;
1072 }
1073 reloc_func_desc = interp_load_addr;
1074
1075 allow_write_access(interpreter);
1076 fput(interpreter);
Alexey Dobriyan0693ffe2020-04-06 20:11:29 -07001077
1078 kfree(interp_elf_ex);
Alexey Dobriyanaa0d1562020-04-06 20:11:32 -07001079 kfree(interp_elf_phdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 } else {
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001081 elf_entry = e_entry;
Suresh Siddha5342fba2006-02-26 04:18:28 +01001082 if (BAD_ADDR(elf_entry)) {
Chuck Ebbertce510592006-07-03 00:24:14 -07001083 retval = -EINVAL;
Suresh Siddha5342fba2006-02-26 04:18:28 +01001084 goto out_free_dentry;
1085 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 }
1087
1088 kfree(elf_phdata);
1089
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 set_binfmt(&elf_format);
1091
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -07001092#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
Alexey Dobriyancc338012019-05-14 15:43:39 -07001093 retval = arch_setup_additional_pages(bprm, !!interpreter);
Al Viro19d860a2014-05-04 20:11:36 -04001094 if (retval < 0)
Roland McGrath18c8baff2005-04-28 15:17:19 -07001095 goto out;
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -07001096#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1097
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001098 retval = create_elf_tables(bprm, elf_ex,
1099 load_addr, interp_load_addr, e_entry);
Al Viro19d860a2014-05-04 20:11:36 -04001100 if (retval < 0)
Ollie Wildb6a2fea2007-07-19 01:48:16 -07001101 goto out;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001102
1103 mm = current->mm;
1104 mm->end_code = end_code;
1105 mm->start_code = start_code;
1106 mm->start_data = start_data;
1107 mm->end_data = end_data;
1108 mm->start_stack = bprm->p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Jiri Kosina4471a672011-04-14 15:22:09 -07001110 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
Kees Cookbbdc6072019-05-14 15:43:57 -07001111 /*
1112 * For architectures with ELF randomization, when executing
1113 * a loader directly (i.e. no interpreter listed in ELF
1114 * headers), move the brk area out of the mmap region
1115 * (since it grows up, and may collide early with the stack
1116 * growing down), and into the unused ELF_ET_DYN_BASE region.
1117 */
Kees Cook7be3cb02019-09-26 10:15:25 -07001118 if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001119 elf_ex->e_type == ET_DYN && !interpreter) {
1120 mm->brk = mm->start_brk = ELF_ET_DYN_BASE;
1121 }
Kees Cookbbdc6072019-05-14 15:43:57 -07001122
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001123 mm->brk = mm->start_brk = arch_randomize_brk(mm);
Kees Cook204db6e2015-04-14 15:48:12 -07001124#ifdef compat_brk_randomized
Jiri Kosina4471a672011-04-14 15:22:09 -07001125 current->brk_randomized = 1;
1126#endif
1127 }
Jiri Kosinac1d171a2008-01-30 13:30:40 +01001128
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 if (current->personality & MMAP_PAGE_ZERO) {
1130 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1131 and some applications "depend" upon this behavior.
1132 Since we do not have the power to recompile these, we
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001133 emulate the SVr4 behavior. Sigh. */
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001134 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 MAP_FIXED | MAP_PRIVATE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 }
1137
Alexey Dobriyan249b08e2019-05-14 15:43:54 -07001138 regs = current_pt_regs();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139#ifdef ELF_PLAT_INIT
1140 /*
1141 * The ABI may specify that certain registers be set up in special
1142 * ways (on i386 %edx is the address of a DT_FINI function, for
1143 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1144 * that the e_entry field is the address of the function descriptor
1145 * for the startup routine, rather than the address of the startup
1146 * routine itself. This macro performs whatever initialization to
1147 * the regs structure is required as well as any relocations to the
1148 * function descriptor entries when executing dynamically links apps.
1149 */
1150 ELF_PLAT_INIT(regs, reloc_func_desc);
1151#endif
1152
Kees Cookb8383832018-04-10 16:34:57 -07001153 finalize_exec(bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 start_thread(regs, elf_entry, bprm->p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 retval = 0;
1156out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 return retval;
1158
1159 /* error cleanup */
1160out_free_dentry:
Alexey Dobriyan0693ffe2020-04-06 20:11:29 -07001161 kfree(interp_elf_ex);
Paul Burtona9d9ef12014-09-11 08:30:15 +01001162 kfree(interp_elf_phdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 allow_write_access(interpreter);
1164 if (interpreter)
1165 fput(interpreter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166out_free_ph:
1167 kfree(elf_phdata);
1168 goto out;
1169}
1170
Josh Triplett69369a72014-04-03 14:48:27 -07001171#ifdef CONFIG_USELIB
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172/* This is really simpleminded and specialized - we are loading an
1173 a.out library that is given an ELF header. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174static int load_elf_library(struct file *file)
1175{
1176 struct elf_phdr *elf_phdata;
1177 struct elf_phdr *eppnt;
1178 unsigned long elf_bss, bss, len;
1179 int retval, error, i, j;
1180 struct elfhdr elf_ex;
1181
1182 error = -ENOEXEC;
Alexey Dobriyan658c0332019-12-04 16:52:25 -08001183 retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0);
1184 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 goto out;
1186
1187 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1188 goto out;
1189
1190 /* First of all, some simple consistency checks */
1191 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
Al Viro72c2d532013-09-22 16:27:52 -04001192 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 goto out;
Nicolas Pitre47552002017-08-16 16:05:13 -04001194 if (elf_check_fdpic(&elf_ex))
1195 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
1197 /* Now read in all of the header information */
1198
1199 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1200 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1201
1202 error = -ENOMEM;
1203 elf_phdata = kmalloc(j, GFP_KERNEL);
1204 if (!elf_phdata)
1205 goto out;
1206
1207 eppnt = elf_phdata;
1208 error = -ENOEXEC;
Alexey Dobriyan658c0332019-12-04 16:52:25 -08001209 retval = elf_read(file, eppnt, j, elf_ex.e_phoff);
1210 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 goto out_free_ph;
1212
1213 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1214 if ((eppnt + i)->p_type == PT_LOAD)
1215 j++;
1216 if (j != 1)
1217 goto out_free_ph;
1218
1219 while (eppnt->p_type != PT_LOAD)
1220 eppnt++;
1221
1222 /* Now use mmap to map the library into memory. */
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001223 error = vm_mmap(file,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 ELF_PAGESTART(eppnt->p_vaddr),
1225 (eppnt->p_filesz +
1226 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1227 PROT_READ | PROT_WRITE | PROT_EXEC,
Michal Hocko4ed28632018-04-10 16:36:01 -07001228 MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_DENYWRITE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 (eppnt->p_offset -
1230 ELF_PAGEOFFSET(eppnt->p_vaddr)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1232 goto out_free_ph;
1233
1234 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1235 if (padzero(elf_bss)) {
1236 error = -EFAULT;
1237 goto out_free_ph;
1238 }
1239
Oscar Salvador24962af2018-07-13 16:59:13 -07001240 len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
1241 bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
Michal Hockoecc2bc82016-05-23 16:25:39 -07001242 if (bss > len) {
1243 error = vm_brk(len, bss - len);
Linus Torvalds5d22fc22016-05-27 15:57:31 -07001244 if (error)
Michal Hockoecc2bc82016-05-23 16:25:39 -07001245 goto out_free_ph;
1246 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 error = 0;
1248
1249out_free_ph:
1250 kfree(elf_phdata);
1251out:
1252 return error;
1253}
Josh Triplett69369a72014-04-03 14:48:27 -07001254#endif /* #ifdef CONFIG_USELIB */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255
Christoph Hellwig698ba7b2009-12-15 16:47:37 -08001256#ifdef CONFIG_ELF_CORE
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257/*
1258 * ELF core dumper
1259 *
1260 * Modelled on fs/exec.c:aout_core_dump()
1261 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1262 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
1264/*
Jason Baron909af762012-03-23 15:02:51 -07001265 * The purpose of always_dump_vma() is to make sure that special kernel mappings
1266 * that are useful for post-mortem analysis are included in every core dump.
1267 * In that way we ensure that the core dump is fully interpretable later
1268 * without matching up the same kernel and hardware config to see what PC values
1269 * meant. These special mappings include - vDSO, vsyscall, and other
1270 * architecture specific mappings
1271 */
1272static bool always_dump_vma(struct vm_area_struct *vma)
1273{
1274 /* Any vsyscall mappings? */
1275 if (vma == get_gate_vma(vma->vm_mm))
1276 return true;
Andy Lutomirski78d683e2014-05-19 15:58:32 -07001277
1278 /*
1279 * Assume that all vmas with a .name op should always be dumped.
1280 * If this changes, a new vm_ops field can easily be added.
1281 */
1282 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1283 return true;
1284
Jason Baron909af762012-03-23 15:02:51 -07001285 /*
1286 * arch_vma_name() returns non-NULL for special architecture mappings,
1287 * such as vDSO sections.
1288 */
1289 if (arch_vma_name(vma))
1290 return true;
1291
1292 return false;
1293}
1294
1295/*
Roland McGrath82df3972007-10-16 23:27:02 -07001296 * Decide what to dump of a segment, part, all or none.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 */
Roland McGrath82df3972007-10-16 23:27:02 -07001298static unsigned long vma_dump_size(struct vm_area_struct *vma,
1299 unsigned long mm_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300{
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001301#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1302
Jason Baron909af762012-03-23 15:02:51 -07001303 /* always dump the vdso and vsyscall sections */
1304 if (always_dump_vma(vma))
Roland McGrath82df3972007-10-16 23:27:02 -07001305 goto whole;
Roland McGrathe5b97dd2007-01-26 00:56:48 -08001306
Konstantin Khlebnikov0103bd12012-10-08 16:28:59 -07001307 if (vma->vm_flags & VM_DONTDUMP)
Jason Baronaccb61f2012-03-23 15:02:51 -07001308 return 0;
1309
Ross Zwisler50378352015-10-05 16:33:36 -06001310 /* support for DAX */
1311 if (vma_is_dax(vma)) {
1312 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1313 goto whole;
1314 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1315 goto whole;
1316 return 0;
1317 }
1318
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001319 /* Hugetlb memory check */
Anshuman Khandual03911132020-04-06 20:03:51 -07001320 if (is_vm_hugetlb_page(vma)) {
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001321 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1322 goto whole;
1323 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1324 goto whole;
Naoya Horiguchi23d9e482013-04-17 15:58:28 -07001325 return 0;
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001326 }
1327
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 /* Do not dump I/O mapped devices or special mappings */
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07001329 if (vma->vm_flags & VM_IO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 return 0;
1331
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001332 /* By default, dump shared memory if mapped from an anonymous file. */
1333 if (vma->vm_flags & VM_SHARED) {
Al Viro496ad9a2013-01-23 17:07:38 -05001334 if (file_inode(vma->vm_file)->i_nlink == 0 ?
Roland McGrath82df3972007-10-16 23:27:02 -07001335 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1336 goto whole;
1337 return 0;
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001338 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
Roland McGrath82df3972007-10-16 23:27:02 -07001340 /* Dump segments that have been written to. */
1341 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1342 goto whole;
1343 if (vma->vm_file == NULL)
1344 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345
Roland McGrath82df3972007-10-16 23:27:02 -07001346 if (FILTER(MAPPED_PRIVATE))
1347 goto whole;
1348
1349 /*
1350 * If this looks like the beginning of a DSO or executable mapping,
1351 * check for an ELF header. If we find one, dump the first page to
1352 * aid in determining what was mapped here.
1353 */
Roland McGrath92dc07b2009-02-06 17:34:07 -08001354 if (FILTER(ELF_HEADERS) &&
1355 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
Roland McGrath82df3972007-10-16 23:27:02 -07001356 u32 __user *header = (u32 __user *) vma->vm_start;
1357 u32 word;
Roland McGrath92dc07b2009-02-06 17:34:07 -08001358 mm_segment_t fs = get_fs();
Roland McGrath82df3972007-10-16 23:27:02 -07001359 /*
1360 * Doing it this way gets the constant folded by GCC.
1361 */
1362 union {
1363 u32 cmp;
1364 char elfmag[SELFMAG];
1365 } magic;
1366 BUILD_BUG_ON(SELFMAG != sizeof word);
1367 magic.elfmag[EI_MAG0] = ELFMAG0;
1368 magic.elfmag[EI_MAG1] = ELFMAG1;
1369 magic.elfmag[EI_MAG2] = ELFMAG2;
1370 magic.elfmag[EI_MAG3] = ELFMAG3;
Roland McGrath92dc07b2009-02-06 17:34:07 -08001371 /*
1372 * Switch to the user "segment" for get_user(),
1373 * then put back what elf_core_dump() had in place.
1374 */
1375 set_fs(USER_DS);
1376 if (unlikely(get_user(word, header)))
1377 word = 0;
1378 set_fs(fs);
1379 if (word == magic.cmp)
Roland McGrath82df3972007-10-16 23:27:02 -07001380 return PAGE_SIZE;
1381 }
1382
1383#undef FILTER
1384
1385 return 0;
1386
1387whole:
1388 return vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389}
1390
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391/* An ELF note in memory */
1392struct memelfnote
1393{
1394 const char *name;
1395 int type;
1396 unsigned int datasz;
1397 void *data;
1398};
1399
1400static int notesize(struct memelfnote *en)
1401{
1402 int sz;
1403
1404 sz = sizeof(struct elf_note);
1405 sz += roundup(strlen(en->name) + 1, 4);
1406 sz += roundup(en->datasz, 4);
1407
1408 return sz;
1409}
1410
Al Viroecc8c772013-10-05 15:32:35 -04001411static int writenote(struct memelfnote *men, struct coredump_params *cprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412{
1413 struct elf_note en;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 en.n_namesz = strlen(men->name) + 1;
1415 en.n_descsz = men->datasz;
1416 en.n_type = men->type;
1417
Al Viroecc8c772013-10-05 15:32:35 -04001418 return dump_emit(cprm, &en, sizeof(en)) &&
Al Viro22a8cb82013-10-08 11:05:01 -04001419 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1420 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
Roland McGrath3aba4812008-01-30 13:31:44 +01001423static void fill_elf_header(struct elfhdr *elf, int segs,
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001424 u16 machine, u32 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425{
Cyrill Gorcunov6970c8e2008-04-29 01:01:18 -07001426 memset(elf, 0, sizeof(*elf));
1427
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1429 elf->e_ident[EI_CLASS] = ELF_CLASS;
1430 elf->e_ident[EI_DATA] = ELF_DATA;
1431 elf->e_ident[EI_VERSION] = EV_CURRENT;
1432 elf->e_ident[EI_OSABI] = ELF_OSABI;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433
1434 elf->e_type = ET_CORE;
Roland McGrath3aba4812008-01-30 13:31:44 +01001435 elf->e_machine = machine;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 elf->e_version = EV_CURRENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 elf->e_phoff = sizeof(struct elfhdr);
Roland McGrath3aba4812008-01-30 13:31:44 +01001438 elf->e_flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 elf->e_ehsize = sizeof(struct elfhdr);
1440 elf->e_phentsize = sizeof(struct elf_phdr);
1441 elf->e_phnum = segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442}
1443
Andrew Morton8d6b5eee2006-09-25 23:32:04 -07001444static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445{
1446 phdr->p_type = PT_NOTE;
1447 phdr->p_offset = offset;
1448 phdr->p_vaddr = 0;
1449 phdr->p_paddr = 0;
1450 phdr->p_filesz = sz;
1451 phdr->p_memsz = 0;
1452 phdr->p_flags = 0;
1453 phdr->p_align = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454}
1455
1456static void fill_note(struct memelfnote *note, const char *name, int type,
1457 unsigned int sz, void *data)
1458{
1459 note->name = name;
1460 note->type = type;
1461 note->datasz = sz;
1462 note->data = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463}
1464
1465/*
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001466 * fill up all the fields in prstatus from the given task struct, except
1467 * registers which need to be filled up separately.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 */
1469static void fill_prstatus(struct elf_prstatus *prstatus,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001470 struct task_struct *p, long signr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471{
1472 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1473 prstatus->pr_sigpend = p->pending.signal.sig[0];
1474 prstatus->pr_sighold = p->blocked.sig[0];
Oleg Nesterov3b34fc52009-06-17 16:27:38 -07001475 rcu_read_lock();
1476 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1477 rcu_read_unlock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001478 prstatus->pr_pid = task_pid_vnr(p);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001479 prstatus->pr_pgrp = task_pgrp_vnr(p);
1480 prstatus->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 if (thread_group_leader(p)) {
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001482 struct task_cputime cputime;
Frank Mayharf06febc2008-09-12 09:54:39 -07001483
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 /*
Frank Mayharf06febc2008-09-12 09:54:39 -07001485 * This is the record for the group leader. It shows the
1486 * group-wide total, not its individual thread total.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 */
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001488 thread_group_cputime(p, &cputime);
Arnd Bergmanne2bb80d2017-11-23 13:46:33 +01001489 prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime);
1490 prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 } else {
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001492 u64 utime, stime;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001493
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001494 task_cputime(p, &utime, &stime);
Arnd Bergmanne2bb80d2017-11-23 13:46:33 +01001495 prstatus->pr_utime = ns_to_kernel_old_timeval(utime);
1496 prstatus->pr_stime = ns_to_kernel_old_timeval(stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 }
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001498
Arnd Bergmanne2bb80d2017-11-23 13:46:33 +01001499 prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime);
1500 prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501}
1502
1503static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1504 struct mm_struct *mm)
1505{
David Howellsc69e8d92008-11-14 10:39:19 +11001506 const struct cred *cred;
Greg Kroah-Hartmana84a5052005-05-11 00:10:44 -07001507 unsigned int i, len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508
1509 /* first copy the parameters from user space */
1510 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1511
1512 len = mm->arg_end - mm->arg_start;
1513 if (len >= ELF_PRARGSZ)
1514 len = ELF_PRARGSZ-1;
1515 if (copy_from_user(&psinfo->pr_psargs,
1516 (const char __user *)mm->arg_start, len))
1517 return -EFAULT;
1518 for(i = 0; i < len; i++)
1519 if (psinfo->pr_psargs[i] == 0)
1520 psinfo->pr_psargs[i] = ' ';
1521 psinfo->pr_psargs[len] = 0;
1522
Oleg Nesterov3b34fc52009-06-17 16:27:38 -07001523 rcu_read_lock();
1524 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1525 rcu_read_unlock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001526 psinfo->pr_pid = task_pid_vnr(p);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001527 psinfo->pr_pgrp = task_pgrp_vnr(p);
1528 psinfo->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529
1530 i = p->state ? ffz(~p->state) + 1 : 0;
1531 psinfo->pr_state = i;
Carsten Otte55148542006-03-25 03:08:22 -08001532 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1534 psinfo->pr_nice = task_nice(p);
1535 psinfo->pr_flag = p->flags;
David Howellsc69e8d92008-11-14 10:39:19 +11001536 rcu_read_lock();
1537 cred = __task_cred(p);
Eric W. Biedermanebc887b2012-02-07 18:36:10 -08001538 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1539 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
David Howellsc69e8d92008-11-14 10:39:19 +11001540 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1542
1543 return 0;
1544}
1545
Roland McGrath3aba4812008-01-30 13:31:44 +01001546static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1547{
1548 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1549 int i = 0;
1550 do
1551 i += 2;
1552 while (auxv[i - 2] != AT_NULL);
1553 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1554}
1555
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001556static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001557 const kernel_siginfo_t *siginfo)
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001558{
1559 mm_segment_t old_fs = get_fs();
1560 set_fs(KERNEL_DS);
1561 copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
1562 set_fs(old_fs);
1563 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1564}
1565
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001566#define MAX_FILE_NOTE_SIZE (4*1024*1024)
1567/*
1568 * Format of NT_FILE note:
1569 *
1570 * long count -- how many files are mapped
1571 * long page_size -- units for file_ofs
1572 * array of [COUNT] elements of
1573 * long start
1574 * long end
1575 * long file_ofs
1576 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1577 */
Dan Aloni72023652013-09-30 13:45:02 -07001578static int fill_files_note(struct memelfnote *note)
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001579{
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001580 struct mm_struct *mm = current->mm;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001581 struct vm_area_struct *vma;
1582 unsigned count, size, names_ofs, remaining, n;
1583 user_long_t *data;
1584 user_long_t *start_end_ofs;
1585 char *name_base, *name_curpos;
1586
1587 /* *Estimated* file count and total data size needed */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001588 count = mm->map_count;
Alexey Dobriyan60c9d922018-02-06 15:39:13 -08001589 if (count > UINT_MAX / 64)
1590 return -EINVAL;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001591 size = count * 64;
1592
1593 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1594 alloc:
1595 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
Dan Aloni72023652013-09-30 13:45:02 -07001596 return -EINVAL;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001597 size = round_up(size, PAGE_SIZE);
Alexey Dobriyan1fbede62020-01-30 22:17:10 -08001598 /*
1599 * "size" can be 0 here legitimately.
1600 * Let it ENOMEM and omit NT_FILE section which will be empty anyway.
1601 */
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07001602 data = kvmalloc(size, GFP_KERNEL);
1603 if (ZERO_OR_NULL_PTR(data))
Dan Aloni72023652013-09-30 13:45:02 -07001604 return -ENOMEM;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001605
1606 start_end_ofs = data + 2;
1607 name_base = name_curpos = ((char *)data) + names_ofs;
1608 remaining = size - names_ofs;
1609 count = 0;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001610 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001611 struct file *file;
1612 const char *filename;
1613
1614 file = vma->vm_file;
1615 if (!file)
1616 continue;
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02001617 filename = file_path(file, name_curpos, remaining);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001618 if (IS_ERR(filename)) {
1619 if (PTR_ERR(filename) == -ENAMETOOLONG) {
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07001620 kvfree(data);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001621 size = size * 5 / 4;
1622 goto alloc;
1623 }
1624 continue;
1625 }
1626
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02001627 /* file_path() fills at the end, move name down */
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001628 /* n = strlen(filename) + 1: */
1629 n = (name_curpos + remaining) - filename;
1630 remaining = filename - name_curpos;
1631 memmove(name_curpos, filename, n);
1632 name_curpos += n;
1633
1634 *start_end_ofs++ = vma->vm_start;
1635 *start_end_ofs++ = vma->vm_end;
1636 *start_end_ofs++ = vma->vm_pgoff;
1637 count++;
1638 }
1639
1640 /* Now we know exact count of files, can store it */
1641 data[0] = count;
1642 data[1] = PAGE_SIZE;
1643 /*
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001644 * Count usually is less than mm->map_count,
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001645 * we need to move filenames down.
1646 */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001647 n = mm->map_count - count;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001648 if (n != 0) {
1649 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1650 memmove(name_base - shift_bytes, name_base,
1651 name_curpos - name_base);
1652 name_curpos -= shift_bytes;
1653 }
1654
1655 size = name_curpos - (char *)data;
1656 fill_note(note, "CORE", NT_FILE, size, data);
Dan Aloni72023652013-09-30 13:45:02 -07001657 return 0;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001658}
1659
Roland McGrath4206d3a2008-01-30 13:31:45 +01001660#ifdef CORE_DUMP_USE_REGSET
1661#include <linux/regset.h>
1662
1663struct elf_thread_core_info {
1664 struct elf_thread_core_info *next;
1665 struct task_struct *task;
1666 struct elf_prstatus prstatus;
1667 struct memelfnote notes[0];
1668};
1669
1670struct elf_note_info {
1671 struct elf_thread_core_info *thread;
1672 struct memelfnote psinfo;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001673 struct memelfnote signote;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001674 struct memelfnote auxv;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001675 struct memelfnote files;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001676 user_siginfo_t csigdata;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001677 size_t size;
1678 int thread_notes;
1679};
1680
Roland McGrathd31472b2008-03-04 14:28:30 -08001681/*
1682 * When a regset has a writeback hook, we call it on each thread before
1683 * dumping user memory. On register window machines, this makes sure the
1684 * user memory backing the register data is up to date before we read it.
1685 */
1686static void do_thread_regset_writeback(struct task_struct *task,
1687 const struct user_regset *regset)
1688{
1689 if (regset->writeback)
1690 regset->writeback(task, regset, 1);
1691}
1692
H. J. Lu0953f65d2012-02-14 13:34:52 -08001693#ifndef PRSTATUS_SIZE
Dmitry Safonov90954e72016-09-05 16:33:06 +03001694#define PRSTATUS_SIZE(S, R) sizeof(S)
H. J. Lu0953f65d2012-02-14 13:34:52 -08001695#endif
1696
1697#ifndef SET_PR_FPVALID
Dmitry Safonov90954e72016-09-05 16:33:06 +03001698#define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V))
H. J. Lu0953f65d2012-02-14 13:34:52 -08001699#endif
1700
Roland McGrath4206d3a2008-01-30 13:31:45 +01001701static int fill_thread_core_info(struct elf_thread_core_info *t,
1702 const struct user_regset_view *view,
1703 long signr, size_t *total)
1704{
1705 unsigned int i;
Dave Martin27e64b42017-10-31 15:50:53 +00001706 unsigned int regset0_size = regset_size(t->task, &view->regsets[0]);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001707
1708 /*
1709 * NT_PRSTATUS is the one special case, because the regset data
1710 * goes into the pr_reg field inside the note contents, rather
1711 * than being the whole note contents. We fill the reset in here.
1712 * We assume that regset 0 is NT_PRSTATUS.
1713 */
1714 fill_prstatus(&t->prstatus, t->task, signr);
Dave Martin27e64b42017-10-31 15:50:53 +00001715 (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset0_size,
Dmitry Safonov90954e72016-09-05 16:33:06 +03001716 &t->prstatus.pr_reg, NULL);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001717
1718 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
Dave Martin27e64b42017-10-31 15:50:53 +00001719 PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001720 *total += notesize(&t->notes[0]);
1721
Roland McGrathd31472b2008-03-04 14:28:30 -08001722 do_thread_regset_writeback(t->task, &view->regsets[0]);
1723
Roland McGrath4206d3a2008-01-30 13:31:45 +01001724 /*
1725 * Each other regset might generate a note too. For each regset
1726 * that has no core_note_type or is inactive, we leave t->notes[i]
1727 * all zero and we'll know to skip writing it later.
1728 */
1729 for (i = 1; i < view->n; ++i) {
1730 const struct user_regset *regset = &view->regsets[i];
Roland McGrathd31472b2008-03-04 14:28:30 -08001731 do_thread_regset_writeback(t->task, regset);
H. Peter Anvinc8e25252012-03-02 10:43:48 -08001732 if (regset->core_note_type && regset->get &&
Maciej W. Rozycki2f819db2018-05-15 23:32:45 +01001733 (!regset->active || regset->active(t->task, regset) > 0)) {
Roland McGrath4206d3a2008-01-30 13:31:45 +01001734 int ret;
Dave Martin27e64b42017-10-31 15:50:53 +00001735 size_t size = regset_size(t->task, regset);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001736 void *data = kmalloc(size, GFP_KERNEL);
1737 if (unlikely(!data))
1738 return 0;
1739 ret = regset->get(t->task, regset,
1740 0, size, data, NULL);
1741 if (unlikely(ret))
1742 kfree(data);
1743 else {
1744 if (regset->core_note_type != NT_PRFPREG)
1745 fill_note(&t->notes[i], "LINUX",
1746 regset->core_note_type,
1747 size, data);
1748 else {
Dmitry Safonov90954e72016-09-05 16:33:06 +03001749 SET_PR_FPVALID(&t->prstatus,
Dave Martin27e64b42017-10-31 15:50:53 +00001750 1, regset0_size);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001751 fill_note(&t->notes[i], "CORE",
1752 NT_PRFPREG, size, data);
1753 }
1754 *total += notesize(&t->notes[i]);
1755 }
1756 }
1757 }
1758
1759 return 1;
1760}
1761
1762static int fill_note_info(struct elfhdr *elf, int phdrs,
1763 struct elf_note_info *info,
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001764 const kernel_siginfo_t *siginfo, struct pt_regs *regs)
Roland McGrath4206d3a2008-01-30 13:31:45 +01001765{
1766 struct task_struct *dump_task = current;
1767 const struct user_regset_view *view = task_user_regset_view(dump_task);
1768 struct elf_thread_core_info *t;
1769 struct elf_prpsinfo *psinfo;
Oleg Nesterov83914442008-07-25 01:47:45 -07001770 struct core_thread *ct;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001771 unsigned int i;
1772
1773 info->size = 0;
1774 info->thread = NULL;
1775
1776 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
Alan Cox6899e922012-12-17 16:02:09 -08001777 if (psinfo == NULL) {
1778 info->psinfo.data = NULL; /* So we don't free this wrongly */
Roland McGrath4206d3a2008-01-30 13:31:45 +01001779 return 0;
Alan Cox6899e922012-12-17 16:02:09 -08001780 }
Roland McGrath4206d3a2008-01-30 13:31:45 +01001781
Amerigo Wange2dbe122009-07-01 01:06:26 -04001782 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1783
Roland McGrath4206d3a2008-01-30 13:31:45 +01001784 /*
1785 * Figure out how many notes we're going to need for each thread.
1786 */
1787 info->thread_notes = 0;
1788 for (i = 0; i < view->n; ++i)
1789 if (view->regsets[i].core_note_type != 0)
1790 ++info->thread_notes;
1791
1792 /*
1793 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1794 * since it is our one special case.
1795 */
1796 if (unlikely(info->thread_notes == 0) ||
1797 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1798 WARN_ON(1);
1799 return 0;
1800 }
1801
1802 /*
1803 * Initialize the ELF file header.
1804 */
1805 fill_elf_header(elf, phdrs,
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001806 view->e_machine, view->e_flags);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001807
1808 /*
1809 * Allocate a structure for each thread.
1810 */
Oleg Nesterov83914442008-07-25 01:47:45 -07001811 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1812 t = kzalloc(offsetof(struct elf_thread_core_info,
1813 notes[info->thread_notes]),
1814 GFP_KERNEL);
1815 if (unlikely(!t))
1816 return 0;
Oleg Nesterov24d52882008-07-25 01:47:40 -07001817
Oleg Nesterov83914442008-07-25 01:47:45 -07001818 t->task = ct->task;
1819 if (ct->task == dump_task || !info->thread) {
1820 t->next = info->thread;
1821 info->thread = t;
1822 } else {
1823 /*
1824 * Make sure to keep the original task at
1825 * the head of the list.
1826 */
1827 t->next = info->thread->next;
1828 info->thread->next = t;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001829 }
Oleg Nesterov83914442008-07-25 01:47:45 -07001830 }
Roland McGrath4206d3a2008-01-30 13:31:45 +01001831
1832 /*
1833 * Now fill in each thread's information.
1834 */
1835 for (t = info->thread; t != NULL; t = t->next)
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07001836 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001837 return 0;
1838
1839 /*
1840 * Fill in the two process-wide notes.
1841 */
1842 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1843 info->size += notesize(&info->psinfo);
1844
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001845 fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
1846 info->size += notesize(&info->signote);
1847
Roland McGrath4206d3a2008-01-30 13:31:45 +01001848 fill_auxv_note(&info->auxv, current->mm);
1849 info->size += notesize(&info->auxv);
1850
Dan Aloni72023652013-09-30 13:45:02 -07001851 if (fill_files_note(&info->files) == 0)
1852 info->size += notesize(&info->files);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001853
Roland McGrath4206d3a2008-01-30 13:31:45 +01001854 return 1;
1855}
1856
1857static size_t get_note_info_size(struct elf_note_info *info)
1858{
1859 return info->size;
1860}
1861
1862/*
1863 * Write all the notes for each thread. When writing the first thread, the
1864 * process-wide notes are interleaved after the first thread-specific note.
1865 */
1866static int write_note_info(struct elf_note_info *info,
Al Viroecc8c772013-10-05 15:32:35 -04001867 struct coredump_params *cprm)
Roland McGrath4206d3a2008-01-30 13:31:45 +01001868{
Fabian Frederickb219e252014-06-04 16:12:14 -07001869 bool first = true;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001870 struct elf_thread_core_info *t = info->thread;
1871
1872 do {
1873 int i;
1874
Al Viroecc8c772013-10-05 15:32:35 -04001875 if (!writenote(&t->notes[0], cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001876 return 0;
1877
Al Viroecc8c772013-10-05 15:32:35 -04001878 if (first && !writenote(&info->psinfo, cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001879 return 0;
Al Viroecc8c772013-10-05 15:32:35 -04001880 if (first && !writenote(&info->signote, cprm))
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001881 return 0;
Al Viroecc8c772013-10-05 15:32:35 -04001882 if (first && !writenote(&info->auxv, cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001883 return 0;
Dan Aloni72023652013-09-30 13:45:02 -07001884 if (first && info->files.data &&
Al Viroecc8c772013-10-05 15:32:35 -04001885 !writenote(&info->files, cprm))
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001886 return 0;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001887
1888 for (i = 1; i < info->thread_notes; ++i)
1889 if (t->notes[i].data &&
Al Viroecc8c772013-10-05 15:32:35 -04001890 !writenote(&t->notes[i], cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001891 return 0;
1892
Fabian Frederickb219e252014-06-04 16:12:14 -07001893 first = false;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001894 t = t->next;
1895 } while (t);
1896
1897 return 1;
1898}
1899
1900static void free_note_info(struct elf_note_info *info)
1901{
1902 struct elf_thread_core_info *threads = info->thread;
1903 while (threads) {
1904 unsigned int i;
1905 struct elf_thread_core_info *t = threads;
1906 threads = t->next;
1907 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1908 for (i = 1; i < info->thread_notes; ++i)
1909 kfree(t->notes[i].data);
1910 kfree(t);
1911 }
1912 kfree(info->psinfo.data);
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07001913 kvfree(info->files.data);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001914}
1915
1916#else
1917
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918/* Here is the structure in which status of each thread is captured. */
1919struct elf_thread_status
1920{
1921 struct list_head list;
1922 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1923 elf_fpregset_t fpu; /* NT_PRFPREG */
1924 struct task_struct *thread;
1925#ifdef ELF_CORE_COPY_XFPREGS
Mark Nelson5b20cd82007-10-16 23:25:39 -07001926 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927#endif
1928 struct memelfnote notes[3];
1929 int num_notes;
1930};
1931
1932/*
1933 * In order to add the specific thread information for the elf file format,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001934 * we need to keep a linked list of every threads pr_status and then create
1935 * a single section for them in the final core file.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 */
1937static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1938{
1939 int sz = 0;
1940 struct task_struct *p = t->thread;
1941 t->num_notes = 0;
1942
1943 fill_prstatus(&t->prstatus, p, signr);
1944 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1945
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001946 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1947 &(t->prstatus));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 t->num_notes++;
1949 sz += notesize(&t->notes[0]);
1950
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001951 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1952 &t->fpu))) {
1953 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1954 &(t->fpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 t->num_notes++;
1956 sz += notesize(&t->notes[1]);
1957 }
1958
1959#ifdef ELF_CORE_COPY_XFPREGS
1960 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
Mark Nelson5b20cd82007-10-16 23:25:39 -07001961 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1962 sizeof(t->xfpu), &t->xfpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 t->num_notes++;
1964 sz += notesize(&t->notes[2]);
1965 }
1966#endif
1967 return sz;
1968}
1969
Roland McGrath3aba4812008-01-30 13:31:44 +01001970struct elf_note_info {
1971 struct memelfnote *notes;
Dan Aloni72023652013-09-30 13:45:02 -07001972 struct memelfnote *notes_files;
Roland McGrath3aba4812008-01-30 13:31:44 +01001973 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1974 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1975 struct list_head thread_list;
1976 elf_fpregset_t *fpu;
1977#ifdef ELF_CORE_COPY_XFPREGS
1978 elf_fpxregset_t *xfpu;
1979#endif
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001980 user_siginfo_t csigdata;
Roland McGrath3aba4812008-01-30 13:31:44 +01001981 int thread_status_size;
1982 int numnote;
1983};
1984
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001985static int elf_note_info_init(struct elf_note_info *info)
Roland McGrath3aba4812008-01-30 13:31:44 +01001986{
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001987 memset(info, 0, sizeof(*info));
Roland McGrath3aba4812008-01-30 13:31:44 +01001988 INIT_LIST_HEAD(&info->thread_list);
1989
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001990 /* Allocate space for ELF notes */
Kees Cook6da2ec52018-06-12 13:55:00 -07001991 info->notes = kmalloc_array(8, sizeof(struct memelfnote), GFP_KERNEL);
Roland McGrath3aba4812008-01-30 13:31:44 +01001992 if (!info->notes)
1993 return 0;
1994 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
1995 if (!info->psinfo)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10001996 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01001997 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
1998 if (!info->prstatus)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10001999 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01002000 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
2001 if (!info->fpu)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10002002 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01002003#ifdef ELF_CORE_COPY_XFPREGS
2004 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
2005 if (!info->xfpu)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10002006 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01002007#endif
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002008 return 1;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002009}
Roland McGrath3aba4812008-01-30 13:31:44 +01002010
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002011static int fill_note_info(struct elfhdr *elf, int phdrs,
2012 struct elf_note_info *info,
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02002013 const kernel_siginfo_t *siginfo, struct pt_regs *regs)
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002014{
Al Viroafabada2013-10-14 07:39:56 -04002015 struct core_thread *ct;
2016 struct elf_thread_status *ets;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002017
2018 if (!elf_note_info_init(info))
2019 return 0;
2020
Al Viroafabada2013-10-14 07:39:56 -04002021 for (ct = current->mm->core_state->dumper.next;
2022 ct; ct = ct->next) {
2023 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
2024 if (!ets)
2025 return 0;
Oleg Nesterov24d52882008-07-25 01:47:40 -07002026
Al Viroafabada2013-10-14 07:39:56 -04002027 ets->thread = ct->task;
2028 list_add(&ets->list, &info->thread_list);
2029 }
Oleg Nesterov83914442008-07-25 01:47:45 -07002030
Alexey Dobriyan93f044e2019-03-07 16:28:59 -08002031 list_for_each_entry(ets, &info->thread_list, list) {
Al Viroafabada2013-10-14 07:39:56 -04002032 int sz;
Oleg Nesterov83914442008-07-25 01:47:45 -07002033
Al Viroafabada2013-10-14 07:39:56 -04002034 sz = elf_dump_thread_status(siginfo->si_signo, ets);
2035 info->thread_status_size += sz;
Roland McGrath3aba4812008-01-30 13:31:44 +01002036 }
2037 /* now collect the dump for the current */
2038 memset(info->prstatus, 0, sizeof(*info->prstatus));
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07002039 fill_prstatus(info->prstatus, current, siginfo->si_signo);
Roland McGrath3aba4812008-01-30 13:31:44 +01002040 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
2041
2042 /* Set up header */
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08002043 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
Roland McGrath3aba4812008-01-30 13:31:44 +01002044
2045 /*
2046 * Set up the notes in similar form to SVR4 core dumps made
2047 * with info from their /proc.
2048 */
2049
2050 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2051 sizeof(*info->prstatus), info->prstatus);
2052 fill_psinfo(info->psinfo, current->group_leader, current->mm);
2053 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2054 sizeof(*info->psinfo), info->psinfo);
2055
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002056 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
2057 fill_auxv_note(info->notes + 3, current->mm);
Dan Aloni72023652013-09-30 13:45:02 -07002058 info->numnote = 4;
Roland McGrath3aba4812008-01-30 13:31:44 +01002059
Dan Aloni72023652013-09-30 13:45:02 -07002060 if (fill_files_note(info->notes + info->numnote) == 0) {
2061 info->notes_files = info->notes + info->numnote;
2062 info->numnote++;
2063 }
Roland McGrath3aba4812008-01-30 13:31:44 +01002064
2065 /* Try to dump the FPU. */
2066 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
2067 info->fpu);
2068 if (info->prstatus->pr_fpvalid)
2069 fill_note(info->notes + info->numnote++,
2070 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
2071#ifdef ELF_CORE_COPY_XFPREGS
2072 if (elf_core_copy_task_xfpregs(current, info->xfpu))
2073 fill_note(info->notes + info->numnote++,
2074 "LINUX", ELF_CORE_XFPREG_TYPE,
2075 sizeof(*info->xfpu), info->xfpu);
2076#endif
2077
2078 return 1;
Roland McGrath3aba4812008-01-30 13:31:44 +01002079}
2080
2081static size_t get_note_info_size(struct elf_note_info *info)
2082{
2083 int sz = 0;
2084 int i;
2085
2086 for (i = 0; i < info->numnote; i++)
2087 sz += notesize(info->notes + i);
2088
2089 sz += info->thread_status_size;
2090
2091 return sz;
2092}
2093
2094static int write_note_info(struct elf_note_info *info,
Al Viroecc8c772013-10-05 15:32:35 -04002095 struct coredump_params *cprm)
Roland McGrath3aba4812008-01-30 13:31:44 +01002096{
Alexey Dobriyan93f044e2019-03-07 16:28:59 -08002097 struct elf_thread_status *ets;
Roland McGrath3aba4812008-01-30 13:31:44 +01002098 int i;
Roland McGrath3aba4812008-01-30 13:31:44 +01002099
2100 for (i = 0; i < info->numnote; i++)
Al Viroecc8c772013-10-05 15:32:35 -04002101 if (!writenote(info->notes + i, cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002102 return 0;
2103
2104 /* write out the thread status notes section */
Alexey Dobriyan93f044e2019-03-07 16:28:59 -08002105 list_for_each_entry(ets, &info->thread_list, list) {
2106 for (i = 0; i < ets->num_notes; i++)
2107 if (!writenote(&ets->notes[i], cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002108 return 0;
2109 }
2110
2111 return 1;
2112}
2113
2114static void free_note_info(struct elf_note_info *info)
2115{
2116 while (!list_empty(&info->thread_list)) {
2117 struct list_head *tmp = info->thread_list.next;
2118 list_del(tmp);
2119 kfree(list_entry(tmp, struct elf_thread_status, list));
2120 }
2121
Dan Aloni72023652013-09-30 13:45:02 -07002122 /* Free data possibly allocated by fill_files_note(): */
2123 if (info->notes_files)
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002124 kvfree(info->notes_files->data);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002125
Roland McGrath3aba4812008-01-30 13:31:44 +01002126 kfree(info->prstatus);
2127 kfree(info->psinfo);
2128 kfree(info->notes);
2129 kfree(info->fpu);
2130#ifdef ELF_CORE_COPY_XFPREGS
2131 kfree(info->xfpu);
2132#endif
2133}
2134
Roland McGrath4206d3a2008-01-30 13:31:45 +01002135#endif
2136
Roland McGrathf47aef52007-01-26 00:56:49 -08002137static struct vm_area_struct *first_vma(struct task_struct *tsk,
2138 struct vm_area_struct *gate_vma)
2139{
2140 struct vm_area_struct *ret = tsk->mm->mmap;
2141
2142 if (ret)
2143 return ret;
2144 return gate_vma;
2145}
2146/*
2147 * Helper function for iterating across a vma list. It ensures that the caller
2148 * will visit `gate_vma' prior to terminating the search.
2149 */
2150static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
2151 struct vm_area_struct *gate_vma)
2152{
2153 struct vm_area_struct *ret;
2154
2155 ret = this_vma->vm_next;
2156 if (ret)
2157 return ret;
2158 if (this_vma == gate_vma)
2159 return NULL;
2160 return gate_vma;
2161}
2162
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002163static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2164 elf_addr_t e_shoff, int segs)
2165{
2166 elf->e_shoff = e_shoff;
2167 elf->e_shentsize = sizeof(*shdr4extnum);
2168 elf->e_shnum = 1;
2169 elf->e_shstrndx = SHN_UNDEF;
2170
2171 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2172
2173 shdr4extnum->sh_type = SHT_NULL;
2174 shdr4extnum->sh_size = elf->e_shnum;
2175 shdr4extnum->sh_link = elf->e_shstrndx;
2176 shdr4extnum->sh_info = segs;
2177}
2178
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179/*
2180 * Actual dumper
2181 *
2182 * This is a two-pass process; first we find the offsets of the bits,
2183 * and then they are actually written out. If we run out of core limit
2184 * we just truncate.
2185 */
Masami Hiramatsuf6151df2009-12-17 15:27:16 -08002186static int elf_core_dump(struct coredump_params *cprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 int has_dumped = 0;
2189 mm_segment_t fs;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002190 int segs, i;
2191 size_t vma_data_size = 0;
Roland McGrathf47aef52007-01-26 00:56:49 -08002192 struct vm_area_struct *vma, *gate_vma;
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002193 struct elfhdr elf;
Al Virocdc3d562013-10-05 22:24:29 -04002194 loff_t offset = 0, dataoff;
Dan Aloni72023652013-09-30 13:45:02 -07002195 struct elf_note_info info = { };
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002196 struct elf_phdr *phdr4note = NULL;
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002197 struct elf_shdr *shdr4extnum = NULL;
2198 Elf_Half e_phnum;
2199 elf_addr_t e_shoff;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002200 elf_addr_t *vma_filesz = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
2202 /*
2203 * We no longer stop all VM operations.
2204 *
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002205 * This is because those proceses that could possibly change map_count
2206 * or the mmap / vma pages are now blocked in do_exit on current
2207 * finishing this core dump.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 *
2209 * Only ptrace can touch these memory addresses, but it doesn't change
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002210 * the map_count or the pages allocated. So no possibility of crashing
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 * exists while dumping the mm->vm_next areas to the core file.
2212 */
2213
KAMEZAWA Hiroyuki341c87b2009-06-30 11:41:23 -07002214 /*
2215 * The number of segs are recored into ELF header as 16bit value.
2216 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2217 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 segs = current->mm->map_count;
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002219 segs += elf_core_extra_phdrs();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220
Stephen Wilson31db58b2011-03-13 15:49:15 -04002221 gate_vma = get_gate_vma(current->mm);
Roland McGrathf47aef52007-01-26 00:56:49 -08002222 if (gate_vma != NULL)
2223 segs++;
2224
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002225 /* for notes section */
2226 segs++;
2227
2228 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2229 * this, kernel supports extended numbering. Have a look at
2230 * include/linux/elf.h for further information. */
2231 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2232
Roland McGrath3aba4812008-01-30 13:31:44 +01002233 /*
2234 * Collect all the non-memory information about the process for the
2235 * notes. This also sets up the file header.
2236 */
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002237 if (!fill_note_info(&elf, e_phnum, &info, cprm->siginfo, cprm->regs))
Roland McGrath3aba4812008-01-30 13:31:44 +01002238 goto cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239
2240 has_dumped = 1;
Oleg Nesterov079148b2013-04-30 15:28:16 -07002241
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 fs = get_fs();
2243 set_fs(KERNEL_DS);
2244
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002245 offset += sizeof(elf); /* Elf header */
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002246 offset += segs * sizeof(struct elf_phdr); /* Program headers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247
2248 /* Write notes phdr entry */
2249 {
Roland McGrath3aba4812008-01-30 13:31:44 +01002250 size_t sz = get_note_info_size(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251
Michael Ellermane5501492007-09-19 14:38:12 +10002252 sz += elf_coredump_extra_notes_size();
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002253
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002254 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2255 if (!phdr4note)
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -08002256 goto end_coredump;
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002257
2258 fill_elf_note_phdr(phdr4note, sz, offset);
2259 offset += sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 }
2261
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2263
Alexey Dobriyan1fbede62020-01-30 22:17:10 -08002264 /*
2265 * Zero vma process will get ZERO_SIZE_PTR here.
2266 * Let coredump continue for register state at least.
2267 */
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002268 vma_filesz = kvmalloc(array_size(sizeof(*vma_filesz), (segs - 1)),
2269 GFP_KERNEL);
Alexey Dobriyan1fbede62020-01-30 22:17:10 -08002270 if (!vma_filesz)
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002271 goto end_coredump;
2272
2273 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2274 vma = next_vma(vma, gate_vma)) {
2275 unsigned long dump_size;
2276
2277 dump_size = vma_dump_size(vma, cprm->mm_flags);
2278 vma_filesz[i++] = dump_size;
2279 vma_data_size += dump_size;
2280 }
2281
2282 offset += vma_data_size;
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002283 offset += elf_core_extra_data_size();
2284 e_shoff = offset;
2285
2286 if (e_phnum == PN_XNUM) {
2287 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2288 if (!shdr4extnum)
2289 goto end_coredump;
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002290 fill_extnum_info(&elf, shdr4extnum, e_shoff, segs);
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002291 }
2292
2293 offset = dataoff;
2294
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002295 if (!dump_emit(cprm, &elf, sizeof(elf)))
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002296 goto end_coredump;
2297
Al Viroecc8c772013-10-05 15:32:35 -04002298 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002299 goto end_coredump;
2300
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 /* Write program headers for segments dump */
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002302 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
Roland McGrathf47aef52007-01-26 00:56:49 -08002303 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 struct elf_phdr phdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305
2306 phdr.p_type = PT_LOAD;
2307 phdr.p_offset = offset;
2308 phdr.p_vaddr = vma->vm_start;
2309 phdr.p_paddr = 0;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002310 phdr.p_filesz = vma_filesz[i++];
Roland McGrath82df3972007-10-16 23:27:02 -07002311 phdr.p_memsz = vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 offset += phdr.p_filesz;
2313 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002314 if (vma->vm_flags & VM_WRITE)
2315 phdr.p_flags |= PF_W;
2316 if (vma->vm_flags & VM_EXEC)
2317 phdr.p_flags |= PF_X;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 phdr.p_align = ELF_EXEC_PAGESIZE;
2319
Al Viroecc8c772013-10-05 15:32:35 -04002320 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -08002321 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 }
2323
Al Viro506f21c2013-10-05 17:22:57 -04002324 if (!elf_core_write_extra_phdrs(cprm, offset))
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002325 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326
2327 /* write out the notes section */
Al Viroecc8c772013-10-05 15:32:35 -04002328 if (!write_note_info(&info, cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002329 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330
Al Virocdc3d562013-10-05 22:24:29 -04002331 if (elf_coredump_extra_notes_write(cprm))
Michael Ellermane5501492007-09-19 14:38:12 +10002332 goto end_coredump;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002333
Andi Kleend025c9d2006-09-30 23:29:28 -07002334 /* Align to page */
Mateusz Guzik1607f092016-06-05 23:14:14 +02002335 if (!dump_skip(cprm, dataoff - cprm->pos))
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002336 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002338 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
Roland McGrathf47aef52007-01-26 00:56:49 -08002339 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 unsigned long addr;
Roland McGrath82df3972007-10-16 23:27:02 -07002341 unsigned long end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002343 end = vma->vm_start + vma_filesz[i++];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344
Roland McGrath82df3972007-10-16 23:27:02 -07002345 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002346 struct page *page;
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002347 int stop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002349 page = get_dump_page(addr);
2350 if (page) {
2351 void *kaddr = kmap(page);
Al Viro13046ec2013-10-05 18:08:47 -04002352 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002353 kunmap(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002354 put_page(page);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002355 } else
Al Viro9b56d542013-10-08 09:26:08 -04002356 stop = !dump_skip(cprm, PAGE_SIZE);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002357 if (stop)
2358 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 }
2360 }
Dave Kleikamp4d22c752017-01-11 13:25:00 -06002361 dump_truncate(cprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362
Al Viroaa3e7ea2013-10-05 17:50:15 -04002363 if (!elf_core_write_extra_data(cprm))
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002364 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002366 if (e_phnum == PN_XNUM) {
Al Viro13046ec2013-10-05 18:08:47 -04002367 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002368 goto end_coredump;
2369 }
2370
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371end_coredump:
2372 set_fs(fs);
2373
2374cleanup:
Roland McGrath3aba4812008-01-30 13:31:44 +01002375 free_note_info(&info);
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002376 kfree(shdr4extnum);
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002377 kvfree(vma_filesz);
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002378 kfree(phdr4note);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 return has_dumped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380}
2381
Christoph Hellwig698ba7b2009-12-15 16:47:37 -08002382#endif /* CONFIG_ELF_CORE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383
2384static int __init init_elf_binfmt(void)
2385{
Al Viro8fc3dc52012-03-17 03:05:16 -04002386 register_binfmt(&elf_format);
2387 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388}
2389
2390static void __exit exit_elf_binfmt(void)
2391{
2392 /* Remove the COFF and ELF loaders. */
2393 unregister_binfmt(&elf_format);
2394}
2395
2396core_initcall(init_elf_binfmt);
2397module_exit(exit_elf_binfmt);
2398MODULE_LICENSE("GPL");