blob: 1c060c0a2d72f25651d85ba2eee6af7360228432 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Alex Kelly10c28d92012-09-26 21:52:08 -04002#include <linux/slab.h>
3#include <linux/file.h>
4#include <linux/fdtable.h>
Andrey Ryabinin70d78fe2016-11-10 10:46:38 -08005#include <linux/freezer.h>
Alex Kelly10c28d92012-09-26 21:52:08 -04006#include <linux/mm.h>
7#include <linux/stat.h>
8#include <linux/fcntl.h>
9#include <linux/swap.h>
Paul Wise315c6922019-08-02 21:49:05 -070010#include <linux/ctype.h>
Alex Kelly10c28d92012-09-26 21:52:08 -040011#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/pagemap.h>
14#include <linux/perf_event.h>
15#include <linux/highmem.h>
16#include <linux/spinlock.h>
17#include <linux/key.h>
18#include <linux/personality.h>
19#include <linux/binfmts.h>
Alex Kelly179899f2012-10-04 17:15:24 -070020#include <linux/coredump.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +010021#include <linux/sched/coredump.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010022#include <linux/sched/signal.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010023#include <linux/sched/task_stack.h>
Alex Kelly10c28d92012-09-26 21:52:08 -040024#include <linux/utsname.h>
25#include <linux/pid_namespace.h>
26#include <linux/module.h>
27#include <linux/namei.h>
28#include <linux/mount.h>
29#include <linux/security.h>
30#include <linux/syscalls.h>
31#include <linux/tsacct_kern.h>
32#include <linux/cn_proc.h>
33#include <linux/audit.h>
34#include <linux/tracehook.h>
35#include <linux/kmod.h>
36#include <linux/fsnotify.h>
37#include <linux/fs_struct.h>
38#include <linux/pipe_fs_i.h>
39#include <linux/oom.h>
40#include <linux/compat.h>
Jann Horn378c6522016-03-22 14:25:36 -070041#include <linux/fs.h>
42#include <linux/path.h>
Arnd Bergmann03927c82015-11-25 16:22:25 +010043#include <linux/timekeeping.h>
Xiaoming Nif0bc21b2022-01-21 22:13:38 -080044#include <linux/sysctl.h>
Alex Kelly10c28d92012-09-26 21:52:08 -040045
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080046#include <linux/uaccess.h>
Alex Kelly10c28d92012-09-26 21:52:08 -040047#include <asm/mmu_context.h>
48#include <asm/tlb.h>
49#include <asm/exec.h>
50
51#include <trace/events/task.h>
52#include "internal.h"
Alex Kelly10c28d92012-09-26 21:52:08 -040053
54#include <trace/events/sched.h>
55
Xiaoming Nif0bc21b2022-01-21 22:13:38 -080056static int core_uses_pid;
57static unsigned int core_pipe_limit;
58static char core_pattern[CORENAME_MAX_SIZE] = "core";
Oleg Nesterov3ceadcf2013-07-03 15:08:22 -070059static int core_name_size = CORENAME_MAX_SIZE;
Alex Kelly10c28d92012-09-26 21:52:08 -040060
61struct core_name {
62 char *corename;
63 int used, size;
64};
Alex Kelly10c28d92012-09-26 21:52:08 -040065
Oleg Nesterov3ceadcf2013-07-03 15:08:22 -070066static int expand_corename(struct core_name *cn, int size)
Alex Kelly10c28d92012-09-26 21:52:08 -040067{
Oleg Nesterove7fd1542013-07-03 15:08:16 -070068 char *corename = krealloc(cn->corename, size, GFP_KERNEL);
Alex Kelly10c28d92012-09-26 21:52:08 -040069
Oleg Nesterove7fd1542013-07-03 15:08:16 -070070 if (!corename)
Alex Kelly10c28d92012-09-26 21:52:08 -040071 return -ENOMEM;
Alex Kelly10c28d92012-09-26 21:52:08 -040072
Oleg Nesterov3ceadcf2013-07-03 15:08:22 -070073 if (size > core_name_size) /* racy but harmless */
74 core_name_size = size;
75
76 cn->size = ksize(corename);
Oleg Nesterove7fd1542013-07-03 15:08:16 -070077 cn->corename = corename;
Alex Kelly10c28d92012-09-26 21:52:08 -040078 return 0;
79}
80
Nicolas Ioossb4176b72015-06-25 15:03:53 -070081static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt,
82 va_list arg)
Alex Kelly10c28d92012-09-26 21:52:08 -040083{
Oleg Nesterov5fe9d8c2013-07-03 15:08:19 -070084 int free, need;
Eric Dumazet404ca802014-04-19 10:15:07 -070085 va_list arg_copy;
Alex Kelly10c28d92012-09-26 21:52:08 -040086
Oleg Nesterov5fe9d8c2013-07-03 15:08:19 -070087again:
88 free = cn->size - cn->used;
Eric Dumazet404ca802014-04-19 10:15:07 -070089
90 va_copy(arg_copy, arg);
91 need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy);
92 va_end(arg_copy);
93
Oleg Nesterov5fe9d8c2013-07-03 15:08:19 -070094 if (need < free) {
95 cn->used += need;
96 return 0;
97 }
Alex Kelly10c28d92012-09-26 21:52:08 -040098
Oleg Nesterov3ceadcf2013-07-03 15:08:22 -070099 if (!expand_corename(cn, cn->size + need - free + 1))
Oleg Nesterov5fe9d8c2013-07-03 15:08:19 -0700100 goto again;
Alex Kelly10c28d92012-09-26 21:52:08 -0400101
Oleg Nesterov5fe9d8c2013-07-03 15:08:19 -0700102 return -ENOMEM;
Alex Kelly10c28d92012-09-26 21:52:08 -0400103}
104
Nicolas Ioossb4176b72015-06-25 15:03:53 -0700105static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...)
Oleg Nesterovbc03c692013-07-03 15:08:17 -0700106{
107 va_list arg;
108 int ret;
109
110 va_start(arg, fmt);
111 ret = cn_vprintf(cn, fmt, arg);
112 va_end(arg);
113
114 return ret;
115}
116
Nicolas Ioossb4176b72015-06-25 15:03:53 -0700117static __printf(2, 3)
118int cn_esc_printf(struct core_name *cn, const char *fmt, ...)
Alex Kelly10c28d92012-09-26 21:52:08 -0400119{
Oleg Nesterov923bed032013-07-03 15:08:20 -0700120 int cur = cn->used;
121 va_list arg;
122 int ret;
123
124 va_start(arg, fmt);
125 ret = cn_vprintf(cn, fmt, arg);
126 va_end(arg);
127
Jann Hornac94b6e2016-01-20 15:00:08 -0800128 if (ret == 0) {
129 /*
130 * Ensure that this coredump name component can't cause the
131 * resulting corefile path to consist of a ".." or ".".
132 */
133 if ((cn->used - cur == 1 && cn->corename[cur] == '.') ||
134 (cn->used - cur == 2 && cn->corename[cur] == '.'
135 && cn->corename[cur+1] == '.'))
136 cn->corename[cur] = '!';
137
138 /*
139 * Empty names are fishy and could be used to create a "//" in a
140 * corefile name, causing the coredump to happen one directory
141 * level too high. Enforce that all components of the core
142 * pattern are at least one character long.
143 */
144 if (cn->used == cur)
145 ret = cn_printf(cn, "!");
146 }
147
Oleg Nesterov923bed032013-07-03 15:08:20 -0700148 for (; cur < cn->used; ++cur) {
149 if (cn->corename[cur] == '/')
150 cn->corename[cur] = '!';
151 }
152 return ret;
Alex Kelly10c28d92012-09-26 21:52:08 -0400153}
154
Lepton Wuf38c85f2020-08-11 18:36:20 -0700155static int cn_print_exe_file(struct core_name *cn, bool name_only)
Alex Kelly10c28d92012-09-26 21:52:08 -0400156{
157 struct file *exe_file;
Lepton Wuf38c85f2020-08-11 18:36:20 -0700158 char *pathbuf, *path, *ptr;
Alex Kelly10c28d92012-09-26 21:52:08 -0400159 int ret;
160
161 exe_file = get_mm_exe_file(current->mm);
Oleg Nesterov923bed032013-07-03 15:08:20 -0700162 if (!exe_file)
163 return cn_esc_printf(cn, "%s (path unknown)", current->comm);
Alex Kelly10c28d92012-09-26 21:52:08 -0400164
Michal Hocko0ee931c2017-09-13 16:28:29 -0700165 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
Alex Kelly10c28d92012-09-26 21:52:08 -0400166 if (!pathbuf) {
167 ret = -ENOMEM;
168 goto put_exe_file;
169 }
170
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +0200171 path = file_path(exe_file, pathbuf, PATH_MAX);
Alex Kelly10c28d92012-09-26 21:52:08 -0400172 if (IS_ERR(path)) {
173 ret = PTR_ERR(path);
174 goto free_buf;
175 }
176
Lepton Wuf38c85f2020-08-11 18:36:20 -0700177 if (name_only) {
178 ptr = strrchr(path, '/');
179 if (ptr)
180 path = ptr + 1;
181 }
Oleg Nesterov923bed032013-07-03 15:08:20 -0700182 ret = cn_esc_printf(cn, "%s", path);
Alex Kelly10c28d92012-09-26 21:52:08 -0400183
184free_buf:
185 kfree(pathbuf);
186put_exe_file:
187 fput(exe_file);
188 return ret;
189}
190
191/* format_corename will inspect the pattern parameter, and output a
192 * name into corename, which must have space for at least
193 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
194 */
Paul Wise315c6922019-08-02 21:49:05 -0700195static int format_corename(struct core_name *cn, struct coredump_params *cprm,
196 size_t **argv, int *argc)
Alex Kelly10c28d92012-09-26 21:52:08 -0400197{
198 const struct cred *cred = current_cred();
199 const char *pat_ptr = core_pattern;
200 int ispipe = (*pat_ptr == '|');
Paul Wise315c6922019-08-02 21:49:05 -0700201 bool was_space = false;
Alex Kelly10c28d92012-09-26 21:52:08 -0400202 int pid_in_pattern = 0;
203 int err = 0;
204
Oleg Nesterove7fd1542013-07-03 15:08:16 -0700205 cn->used = 0;
Oleg Nesterov3ceadcf2013-07-03 15:08:22 -0700206 cn->corename = NULL;
207 if (expand_corename(cn, core_name_size))
Alex Kelly10c28d92012-09-26 21:52:08 -0400208 return -ENOMEM;
Oleg Nesterov888ffc52013-07-03 15:08:23 -0700209 cn->corename[0] = '\0';
210
Paul Wise315c6922019-08-02 21:49:05 -0700211 if (ispipe) {
212 int argvs = sizeof(core_pattern) / 2;
213 (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL);
214 if (!(*argv))
215 return -ENOMEM;
216 (*argv)[(*argc)++] = 0;
Oleg Nesterov888ffc52013-07-03 15:08:23 -0700217 ++pat_ptr;
Sudip Mukherjeedb973a72020-04-20 18:14:20 -0700218 if (!(*pat_ptr))
219 return -ENOMEM;
Paul Wise315c6922019-08-02 21:49:05 -0700220 }
Alex Kelly10c28d92012-09-26 21:52:08 -0400221
222 /* Repeat as long as we have more pattern to process and more output
223 space */
224 while (*pat_ptr) {
Paul Wise315c6922019-08-02 21:49:05 -0700225 /*
226 * Split on spaces before doing template expansion so that
227 * %e and %E don't get split if they have spaces in them
228 */
229 if (ispipe) {
230 if (isspace(*pat_ptr)) {
Menglong Dong2bf509d2020-12-05 22:14:42 -0800231 if (cn->used != 0)
232 was_space = true;
Paul Wise315c6922019-08-02 21:49:05 -0700233 pat_ptr++;
234 continue;
235 } else if (was_space) {
236 was_space = false;
237 err = cn_printf(cn, "%c", '\0');
238 if (err)
239 return err;
240 (*argv)[(*argc)++] = cn->used;
241 }
242 }
Alex Kelly10c28d92012-09-26 21:52:08 -0400243 if (*pat_ptr != '%') {
Alex Kelly10c28d92012-09-26 21:52:08 -0400244 err = cn_printf(cn, "%c", *pat_ptr++);
245 } else {
246 switch (*++pat_ptr) {
247 /* single % at the end, drop that */
248 case 0:
249 goto out;
250 /* Double percent, output one percent */
251 case '%':
252 err = cn_printf(cn, "%c", '%');
253 break;
254 /* pid */
255 case 'p':
256 pid_in_pattern = 1;
257 err = cn_printf(cn, "%d",
258 task_tgid_vnr(current));
259 break;
Stéphane Graber65aafb12013-09-11 14:24:32 -0700260 /* global pid */
261 case 'P':
262 err = cn_printf(cn, "%d",
263 task_tgid_nr(current));
264 break;
Oleg Nesterovb03023e2014-10-13 15:53:35 -0700265 case 'i':
266 err = cn_printf(cn, "%d",
267 task_pid_vnr(current));
268 break;
269 case 'I':
270 err = cn_printf(cn, "%d",
271 task_pid_nr(current));
272 break;
Alex Kelly10c28d92012-09-26 21:52:08 -0400273 /* uid */
274 case 'u':
Nicolas Iooss5202efe2015-06-25 15:03:51 -0700275 err = cn_printf(cn, "%u",
276 from_kuid(&init_user_ns,
277 cred->uid));
Alex Kelly10c28d92012-09-26 21:52:08 -0400278 break;
279 /* gid */
280 case 'g':
Nicolas Iooss5202efe2015-06-25 15:03:51 -0700281 err = cn_printf(cn, "%u",
282 from_kgid(&init_user_ns,
283 cred->gid));
Alex Kelly10c28d92012-09-26 21:52:08 -0400284 break;
Oleg Nesterov12a2b4b2012-10-04 17:15:25 -0700285 case 'd':
286 err = cn_printf(cn, "%d",
287 __get_dumpable(cprm->mm_flags));
288 break;
Alex Kelly10c28d92012-09-26 21:52:08 -0400289 /* signal that caused the coredump */
290 case 's':
Nicolas Ioossb4176b72015-06-25 15:03:53 -0700291 err = cn_printf(cn, "%d",
292 cprm->siginfo->si_signo);
Alex Kelly10c28d92012-09-26 21:52:08 -0400293 break;
294 /* UNIX time of coredump */
295 case 't': {
Arnd Bergmann03927c82015-11-25 16:22:25 +0100296 time64_t time;
297
298 time = ktime_get_real_seconds();
299 err = cn_printf(cn, "%lld", time);
Alex Kelly10c28d92012-09-26 21:52:08 -0400300 break;
301 }
302 /* hostname */
Oleg Nesterov923bed032013-07-03 15:08:20 -0700303 case 'h':
Alex Kelly10c28d92012-09-26 21:52:08 -0400304 down_read(&uts_sem);
Oleg Nesterov923bed032013-07-03 15:08:20 -0700305 err = cn_esc_printf(cn, "%s",
Alex Kelly10c28d92012-09-26 21:52:08 -0400306 utsname()->nodename);
307 up_read(&uts_sem);
Alex Kelly10c28d92012-09-26 21:52:08 -0400308 break;
Lepton Wuf38c85f2020-08-11 18:36:20 -0700309 /* executable, could be changed by prctl PR_SET_NAME etc */
Oleg Nesterov923bed032013-07-03 15:08:20 -0700310 case 'e':
311 err = cn_esc_printf(cn, "%s", current->comm);
Alex Kelly10c28d92012-09-26 21:52:08 -0400312 break;
Lepton Wuf38c85f2020-08-11 18:36:20 -0700313 /* file name of executable */
314 case 'f':
315 err = cn_print_exe_file(cn, true);
316 break;
Alex Kelly10c28d92012-09-26 21:52:08 -0400317 case 'E':
Lepton Wuf38c85f2020-08-11 18:36:20 -0700318 err = cn_print_exe_file(cn, false);
Alex Kelly10c28d92012-09-26 21:52:08 -0400319 break;
320 /* core limit size */
321 case 'c':
322 err = cn_printf(cn, "%lu",
323 rlimit(RLIMIT_CORE));
324 break;
325 default:
326 break;
327 }
328 ++pat_ptr;
329 }
330
331 if (err)
332 return err;
333 }
334
Oleg Nesterov888ffc52013-07-03 15:08:23 -0700335out:
Alex Kelly10c28d92012-09-26 21:52:08 -0400336 /* Backward compatibility with core_uses_pid:
337 *
338 * If core_pattern does not include a %p (as is the default)
339 * and core_uses_pid is set, then .%pid will be appended to
340 * the filename. Do not do this for piped commands. */
341 if (!ispipe && !pid_in_pattern && core_uses_pid) {
342 err = cn_printf(cn, ".%d", task_tgid_vnr(current));
343 if (err)
344 return err;
345 }
Alex Kelly10c28d92012-09-26 21:52:08 -0400346 return ispipe;
347}
348
Eric W. Biederman752dc972022-01-08 09:44:58 -0600349static int zap_process(struct task_struct *start, int exit_code)
Alex Kelly10c28d92012-09-26 21:52:08 -0400350{
351 struct task_struct *t;
352 int nr = 0;
353
Oleg Nesterov5fa534c2015-11-06 16:32:31 -0800354 /* ignore all signals except SIGKILL, see prepare_signal() */
Eric W. Biederman2f824d42022-01-08 09:48:31 -0600355 start->signal->flags = SIGNAL_GROUP_EXIT;
Alex Kelly10c28d92012-09-26 21:52:08 -0400356 start->signal->group_exit_code = exit_code;
357 start->signal->group_stop_count = 0;
358
Oleg Nesterovd61ba582015-11-06 16:32:34 -0800359 for_each_thread(start, t) {
Alex Kelly10c28d92012-09-26 21:52:08 -0400360 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Eric W. Biederman92307382021-09-01 11:33:50 -0500361 if (t != current && !(t->flags & PF_POSTCOREDUMP)) {
Alex Kelly10c28d92012-09-26 21:52:08 -0400362 sigaddset(&t->pending.signal, SIGKILL);
363 signal_wake_up(t, 1);
364 nr++;
365 }
Oleg Nesterovd61ba582015-11-06 16:32:34 -0800366 }
Alex Kelly10c28d92012-09-26 21:52:08 -0400367
368 return nr;
369}
370
Eric W. Biederman0258b5f2021-09-22 11:24:02 -0500371static int zap_threads(struct task_struct *tsk,
Oleg Nesterov403bad72013-04-30 15:28:10 -0700372 struct core_state *core_state, int exit_code)
Alex Kelly10c28d92012-09-26 21:52:08 -0400373{
Eric W. Biederman49697332021-06-24 02:14:30 -0500374 struct signal_struct *signal = tsk->signal;
Alex Kelly10c28d92012-09-26 21:52:08 -0400375 int nr = -EAGAIN;
376
377 spin_lock_irq(&tsk->sighand->siglock);
Eric W. Biederman49697332021-06-24 02:14:30 -0500378 if (!(signal->flags & SIGNAL_GROUP_EXIT) && !signal->group_exec_task) {
379 signal->core_state = core_state;
Eric W. Biederman752dc972022-01-08 09:44:58 -0600380 nr = zap_process(tsk, exit_code);
Oleg Nesterov403bad72013-04-30 15:28:10 -0700381 clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
Eric W. Biederman0258b5f2021-09-22 11:24:02 -0500382 tsk->flags |= PF_DUMPCORE;
383 atomic_set(&core_state->nr_threads, nr);
Alex Kelly10c28d92012-09-26 21:52:08 -0400384 }
385 spin_unlock_irq(&tsk->sighand->siglock);
Alex Kelly10c28d92012-09-26 21:52:08 -0400386 return nr;
387}
388
389static int coredump_wait(int exit_code, struct core_state *core_state)
390{
391 struct task_struct *tsk = current;
Alex Kelly10c28d92012-09-26 21:52:08 -0400392 int core_waiters = -EBUSY;
393
394 init_completion(&core_state->startup);
395 core_state->dumper.task = tsk;
396 core_state->dumper.next = NULL;
397
Eric W. Biederman0258b5f2021-09-22 11:24:02 -0500398 core_waiters = zap_threads(tsk, core_state, exit_code);
Alex Kelly10c28d92012-09-26 21:52:08 -0400399 if (core_waiters > 0) {
400 struct core_thread *ptr;
401
Andrey Ryabinin70d78fe2016-11-10 10:46:38 -0800402 freezer_do_not_count();
Alex Kelly10c28d92012-09-26 21:52:08 -0400403 wait_for_completion(&core_state->startup);
Andrey Ryabinin70d78fe2016-11-10 10:46:38 -0800404 freezer_count();
Alex Kelly10c28d92012-09-26 21:52:08 -0400405 /*
406 * Wait for all the threads to become inactive, so that
407 * all the thread context (extended register state, like
408 * fpu etc) gets copied to the memory.
409 */
410 ptr = core_state->dumper.next;
411 while (ptr != NULL) {
412 wait_task_inactive(ptr->task, 0);
413 ptr = ptr->next;
414 }
415 }
416
417 return core_waiters;
418}
419
Eric W. Biederman0258b5f2021-09-22 11:24:02 -0500420static void coredump_finish(bool core_dumped)
Alex Kelly10c28d92012-09-26 21:52:08 -0400421{
422 struct core_thread *curr, *next;
423 struct task_struct *task;
424
Oleg Nesterov6cd8f0a2013-04-30 15:28:12 -0700425 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterovacdedd92013-04-30 15:28:13 -0700426 if (core_dumped && !__fatal_signal_pending(current))
427 current->signal->group_exit_code |= 0x80;
Eric W. Biederman0258b5f2021-09-22 11:24:02 -0500428 next = current->signal->core_state->dumper.next;
429 current->signal->core_state = NULL;
Oleg Nesterov6cd8f0a2013-04-30 15:28:12 -0700430 spin_unlock_irq(&current->sighand->siglock);
431
Alex Kelly10c28d92012-09-26 21:52:08 -0400432 while ((curr = next) != NULL) {
433 next = curr->next;
434 task = curr->task;
435 /*
Eric W. Biederman92307382021-09-01 11:33:50 -0500436 * see coredump_task_exit(), curr->task must not see
Alex Kelly10c28d92012-09-26 21:52:08 -0400437 * ->task == NULL before we read ->next.
438 */
439 smp_mb();
440 curr->task = NULL;
441 wake_up_process(task);
442 }
Alex Kelly10c28d92012-09-26 21:52:08 -0400443}
444
Oleg Nesterov528f8272013-04-30 15:28:15 -0700445static bool dump_interrupted(void)
446{
447 /*
448 * SIGKILL or freezing() interrupt the coredumping. Perhaps we
449 * can do try_to_freeze() and check __fatal_signal_pending(),
450 * but then we need to teach dump_write() to restart and clear
451 * TIF_SIGPENDING.
452 */
Eric W. Biederman06af8672021-06-10 15:11:11 -0500453 return fatal_signal_pending(current) || freezing(current);
Oleg Nesterov528f8272013-04-30 15:28:15 -0700454}
455
Alex Kelly10c28d92012-09-26 21:52:08 -0400456static void wait_for_dump_helpers(struct file *file)
457{
Al Virode32ec42013-03-21 11:16:56 -0400458 struct pipe_inode_info *pipe = file->private_data;
Alex Kelly10c28d92012-09-26 21:52:08 -0400459
460 pipe_lock(pipe);
461 pipe->readers++;
462 pipe->writers--;
Linus Torvalds0ddad212019-12-09 09:48:27 -0800463 wake_up_interruptible_sync(&pipe->rd_wait);
Oleg Nesterovdc7ee2a2013-04-30 15:28:17 -0700464 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
465 pipe_unlock(pipe);
Alex Kelly10c28d92012-09-26 21:52:08 -0400466
Oleg Nesterovdc7ee2a2013-04-30 15:28:17 -0700467 /*
468 * We actually want wait_event_freezable() but then we need
469 * to clear TIF_SIGPENDING and improve dump_interrupted().
470 */
Linus Torvalds0ddad212019-12-09 09:48:27 -0800471 wait_event_interruptible(pipe->rd_wait, pipe->readers == 1);
Alex Kelly10c28d92012-09-26 21:52:08 -0400472
Oleg Nesterovdc7ee2a2013-04-30 15:28:17 -0700473 pipe_lock(pipe);
Alex Kelly10c28d92012-09-26 21:52:08 -0400474 pipe->readers--;
475 pipe->writers++;
476 pipe_unlock(pipe);
Alex Kelly10c28d92012-09-26 21:52:08 -0400477}
478
479/*
480 * umh_pipe_setup
481 * helper function to customize the process used
482 * to collect the core in userspace. Specifically
483 * it sets up a pipe and installs it as fd 0 (stdin)
484 * for the process. Returns 0 on success, or
485 * PTR_ERR on failure.
486 * Note that it also sets the core limit to 1. This
487 * is a special value that we use to trap recursive
488 * core dumps
489 */
490static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
491{
492 struct file *files[2];
493 struct coredump_params *cp = (struct coredump_params *)info->data;
494 int err = create_pipe_files(files, 0);
495 if (err)
496 return err;
497
498 cp->file = files[1];
499
Al Viro45525b22012-10-16 13:30:07 -0400500 err = replace_fd(0, files[0], 0);
501 fput(files[0]);
Alex Kelly10c28d92012-09-26 21:52:08 -0400502 /* and disallow core files too */
503 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
504
Al Viro45525b22012-10-16 13:30:07 -0400505 return err;
Alex Kelly10c28d92012-09-26 21:52:08 -0400506}
507
Eric W. Biedermanae7795b2018-09-25 11:27:20 +0200508void do_coredump(const kernel_siginfo_t *siginfo)
Alex Kelly10c28d92012-09-26 21:52:08 -0400509{
510 struct core_state core_state;
511 struct core_name cn;
512 struct mm_struct *mm = current->mm;
513 struct linux_binfmt * binfmt;
514 const struct cred *old_cred;
515 struct cred *cred;
516 int retval = 0;
Alex Kelly10c28d92012-09-26 21:52:08 -0400517 int ispipe;
Paul Wise315c6922019-08-02 21:49:05 -0700518 size_t *argv = NULL;
519 int argc = 0;
Jann Hornfbb18162015-09-09 15:38:28 -0700520 /* require nonrelative corefile path and be extra careful */
521 bool need_suid_safe = false;
Oleg Nesterovacdedd92013-04-30 15:28:13 -0700522 bool core_dumped = false;
Alex Kelly10c28d92012-09-26 21:52:08 -0400523 static atomic_t core_dump_count = ATOMIC_INIT(0);
524 struct coredump_params cprm = {
Denys Vlasenko5ab1c302012-10-04 17:15:29 -0700525 .siginfo = siginfo,
Al Viro541880d2012-11-05 13:11:26 -0500526 .regs = signal_pt_regs(),
Alex Kelly10c28d92012-09-26 21:52:08 -0400527 .limit = rlimit(RLIMIT_CORE),
528 /*
529 * We must use the same mm->flags while dumping core to avoid
530 * inconsistency of bit flags, since this flag is not protected
531 * by any locks.
532 */
533 .mm_flags = mm->flags,
534 };
535
Denys Vlasenko5ab1c302012-10-04 17:15:29 -0700536 audit_core_dumps(siginfo->si_signo);
Alex Kelly10c28d92012-09-26 21:52:08 -0400537
538 binfmt = mm->binfmt;
539 if (!binfmt || !binfmt->core_dump)
540 goto fail;
541 if (!__get_dumpable(cprm.mm_flags))
542 goto fail;
543
544 cred = prepare_creds();
545 if (!cred)
546 goto fail;
547 /*
548 * We cannot trust fsuid as being the "true" uid of the process
549 * nor do we know its entire history. We only know it was tainted
550 * so we dump it as root in mode 2, and only into a controlled
551 * environment (pipe handler or fully qualified path).
552 */
Kees Cooke579d2c2013-02-27 17:03:15 -0800553 if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
Alex Kelly10c28d92012-09-26 21:52:08 -0400554 /* Setuid core dump mode */
Alex Kelly10c28d92012-09-26 21:52:08 -0400555 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
Jann Hornfbb18162015-09-09 15:38:28 -0700556 need_suid_safe = true;
Alex Kelly10c28d92012-09-26 21:52:08 -0400557 }
558
Denys Vlasenko5ab1c302012-10-04 17:15:29 -0700559 retval = coredump_wait(siginfo->si_signo, &core_state);
Alex Kelly10c28d92012-09-26 21:52:08 -0400560 if (retval < 0)
561 goto fail_creds;
562
563 old_cred = override_creds(cred);
564
Paul Wise315c6922019-08-02 21:49:05 -0700565 ispipe = format_corename(&cn, &cprm, &argv, &argc);
Alex Kelly10c28d92012-09-26 21:52:08 -0400566
Lucas De Marchifb96c472013-04-30 15:28:06 -0700567 if (ispipe) {
Paul Wise315c6922019-08-02 21:49:05 -0700568 int argi;
Alex Kelly10c28d92012-09-26 21:52:08 -0400569 int dump_count;
570 char **helper_argv;
Lucas De Marchi907ed132013-04-30 15:28:07 -0700571 struct subprocess_info *sub_info;
Alex Kelly10c28d92012-09-26 21:52:08 -0400572
573 if (ispipe < 0) {
574 printk(KERN_WARNING "format_corename failed\n");
575 printk(KERN_WARNING "Aborting core\n");
Oleg Nesterove7fd1542013-07-03 15:08:16 -0700576 goto fail_unlock;
Alex Kelly10c28d92012-09-26 21:52:08 -0400577 }
578
579 if (cprm.limit == 1) {
580 /* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
581 *
582 * Normally core limits are irrelevant to pipes, since
583 * we're not writing to the file system, but we use
Bastien Nocerafcbc32b2015-02-05 14:35:05 +0100584 * cprm.limit of 1 here as a special value, this is a
Alex Kelly10c28d92012-09-26 21:52:08 -0400585 * consistent way to catch recursive crashes.
586 * We can still crash if the core_pattern binary sets
587 * RLIM_CORE = !1, but it runs as root, and can do
588 * lots of stupid things.
589 *
590 * Note that we use task_tgid_vnr here to grab the pid
591 * of the process group leader. That way we get the
592 * right pid if a thread in a multi-threaded
593 * core_pattern process dies.
594 */
595 printk(KERN_WARNING
596 "Process %d(%s) has RLIMIT_CORE set to 1\n",
597 task_tgid_vnr(current), current->comm);
598 printk(KERN_WARNING "Aborting core\n");
599 goto fail_unlock;
600 }
601 cprm.limit = RLIM_INFINITY;
602
603 dump_count = atomic_inc_return(&core_dump_count);
604 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
605 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
606 task_tgid_vnr(current), current->comm);
607 printk(KERN_WARNING "Skipping core dump\n");
608 goto fail_dropcount;
609 }
610
Paul Wise315c6922019-08-02 21:49:05 -0700611 helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv),
612 GFP_KERNEL);
Alex Kelly10c28d92012-09-26 21:52:08 -0400613 if (!helper_argv) {
614 printk(KERN_WARNING "%s failed to allocate memory\n",
615 __func__);
616 goto fail_dropcount;
617 }
Paul Wise315c6922019-08-02 21:49:05 -0700618 for (argi = 0; argi < argc; argi++)
619 helper_argv[argi] = cn.corename + argv[argi];
620 helper_argv[argi] = NULL;
Alex Kelly10c28d92012-09-26 21:52:08 -0400621
Lucas De Marchi907ed132013-04-30 15:28:07 -0700622 retval = -ENOMEM;
623 sub_info = call_usermodehelper_setup(helper_argv[0],
624 helper_argv, NULL, GFP_KERNEL,
625 umh_pipe_setup, NULL, &cprm);
626 if (sub_info)
627 retval = call_usermodehelper_exec(sub_info,
628 UMH_WAIT_EXEC);
629
Paul Wise315c6922019-08-02 21:49:05 -0700630 kfree(helper_argv);
Alex Kelly10c28d92012-09-26 21:52:08 -0400631 if (retval) {
Oleg Nesterov888ffc52013-07-03 15:08:23 -0700632 printk(KERN_INFO "Core dump to |%s pipe failed\n",
Alex Kelly10c28d92012-09-26 21:52:08 -0400633 cn.corename);
634 goto close_fail;
Lucas De Marchifb96c472013-04-30 15:28:06 -0700635 }
Alex Kelly10c28d92012-09-26 21:52:08 -0400636 } else {
Christian Brauner643fe552021-01-21 14:19:34 +0100637 struct user_namespace *mnt_userns;
Alex Kelly10c28d92012-09-26 21:52:08 -0400638 struct inode *inode;
Jann Horn378c6522016-03-22 14:25:36 -0700639 int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
640 O_LARGEFILE | O_EXCL;
Alex Kelly10c28d92012-09-26 21:52:08 -0400641
642 if (cprm.limit < binfmt->min_coredump)
643 goto fail_unlock;
644
Jann Hornfbb18162015-09-09 15:38:28 -0700645 if (need_suid_safe && cn.corename[0] != '/') {
Alex Kelly10c28d92012-09-26 21:52:08 -0400646 printk(KERN_WARNING "Pid %d(%s) can only dump core "\
647 "to fully qualified path!\n",
648 task_tgid_vnr(current), current->comm);
649 printk(KERN_WARNING "Skipping core dump\n");
650 goto fail_unlock;
651 }
652
Jann Hornfbb18162015-09-09 15:38:28 -0700653 /*
654 * Unlink the file if it exists unless this is a SUID
655 * binary - in that case, we're running around with root
656 * privs and don't want to unlink another user's coredump.
657 */
658 if (!need_suid_safe) {
Jann Hornfbb18162015-09-09 15:38:28 -0700659 /*
660 * If it doesn't exist, that's fine. If there's some
661 * other problem, we'll catch it at the filp_open().
662 */
Christoph Hellwig96271652017-11-04 13:44:46 +0300663 do_unlinkat(AT_FDCWD, getname_kernel(cn.corename));
Jann Hornfbb18162015-09-09 15:38:28 -0700664 }
665
666 /*
667 * There is a race between unlinking and creating the
668 * file, but if that causes an EEXIST here, that's
669 * fine - another process raced with us while creating
670 * the corefile, and the other process won. To userspace,
671 * what matters is that at least one of the two processes
672 * writes its coredump successfully, not which one.
673 */
Jann Horn378c6522016-03-22 14:25:36 -0700674 if (need_suid_safe) {
675 /*
676 * Using user namespaces, normal user tasks can change
677 * their current->fs->root to point to arbitrary
678 * directories. Since the intention of the "only dump
679 * with a fully qualified path" rule is to control where
680 * coredumps may be placed using root privileges,
681 * current->fs->root must not be used. Instead, use the
682 * root directory of init_task.
683 */
684 struct path root;
685
686 task_lock(&init_task);
687 get_fs_root(init_task.fs, &root);
688 task_unlock(&init_task);
Al Viroffb37ca2021-04-01 19:00:57 -0400689 cprm.file = file_open_root(&root, cn.corename,
690 open_flags, 0600);
Jann Horn378c6522016-03-22 14:25:36 -0700691 path_put(&root);
692 } else {
693 cprm.file = filp_open(cn.corename, open_flags, 0600);
694 }
Alex Kelly10c28d92012-09-26 21:52:08 -0400695 if (IS_ERR(cprm.file))
696 goto fail_unlock;
697
Al Viro496ad9a2013-01-23 17:07:38 -0500698 inode = file_inode(cprm.file);
Alex Kelly10c28d92012-09-26 21:52:08 -0400699 if (inode->i_nlink > 1)
700 goto close_fail;
701 if (d_unhashed(cprm.file->f_path.dentry))
702 goto close_fail;
703 /*
704 * AK: actually i see no reason to not allow this for named
705 * pipes etc, but keep the previous behaviour for now.
706 */
707 if (!S_ISREG(inode->i_mode))
708 goto close_fail;
709 /*
Jann Horn40f705a2015-09-09 15:38:30 -0700710 * Don't dump core if the filesystem changed owner or mode
711 * of the file during file creation. This is an issue when
712 * a process dumps core while its cwd is e.g. on a vfat
713 * filesystem.
Alex Kelly10c28d92012-09-26 21:52:08 -0400714 */
Christian Brauner643fe552021-01-21 14:19:34 +0100715 mnt_userns = file_mnt_user_ns(cprm.file);
David Oberhollenzerdbd9d6f2021-09-07 20:00:29 -0700716 if (!uid_eq(i_uid_into_mnt(mnt_userns, inode),
717 current_fsuid())) {
718 pr_info_ratelimited("Core dump to %s aborted: cannot preserve file owner\n",
719 cn.corename);
Alex Kelly10c28d92012-09-26 21:52:08 -0400720 goto close_fail;
David Oberhollenzerdbd9d6f2021-09-07 20:00:29 -0700721 }
722 if ((inode->i_mode & 0677) != 0600) {
723 pr_info_ratelimited("Core dump to %s aborted: cannot preserve file permissions\n",
724 cn.corename);
Jann Horn40f705a2015-09-09 15:38:30 -0700725 goto close_fail;
David Oberhollenzerdbd9d6f2021-09-07 20:00:29 -0700726 }
Al Viro86cc0582015-04-03 15:23:17 -0400727 if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
Alex Kelly10c28d92012-09-26 21:52:08 -0400728 goto close_fail;
Christian Brauner643fe552021-01-21 14:19:34 +0100729 if (do_truncate(mnt_userns, cprm.file->f_path.dentry,
730 0, 0, cprm.file))
Alex Kelly10c28d92012-09-26 21:52:08 -0400731 goto close_fail;
732 }
733
734 /* get us an unshared descriptor table; almost always a no-op */
Eric W. Biedermanc39ab6d2020-11-25 15:51:32 -0600735 /* The cell spufs coredump code reads the file descriptor tables */
Eric W. Biederman1f702602020-11-20 17:14:19 -0600736 retval = unshare_files();
Alex Kelly10c28d92012-09-26 21:52:08 -0400737 if (retval)
738 goto close_fail;
Al Viroe86d35c2013-05-04 14:45:54 -0400739 if (!dump_interrupted()) {
Luis Chamberlain3740d932020-04-16 16:28:59 +0000740 /*
741 * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
742 * have this set to NULL.
743 */
744 if (!cprm.file) {
745 pr_info("Core dump to |%s disabled\n", cn.corename);
746 goto close_fail;
747 }
Al Viroe86d35c2013-05-04 14:45:54 -0400748 file_start_write(cprm.file);
749 core_dumped = binfmt->core_dump(&cprm);
Al Virod0f10882020-03-08 09:16:37 -0400750 /*
751 * Ensures that file size is big enough to contain the current
752 * file postion. This prevents gdb from complaining about
753 * a truncated file if the last "write" to the file was
754 * dump_skip.
755 */
756 if (cprm.to_skip) {
757 cprm.to_skip--;
758 dump_emit(&cprm, "", 1);
759 }
Al Viroe86d35c2013-05-04 14:45:54 -0400760 file_end_write(cprm.file);
761 }
Alex Kelly10c28d92012-09-26 21:52:08 -0400762 if (ispipe && core_pipe_limit)
763 wait_for_dump_helpers(cprm.file);
764close_fail:
765 if (cprm.file)
766 filp_close(cprm.file, NULL);
767fail_dropcount:
768 if (ispipe)
769 atomic_dec(&core_dump_count);
770fail_unlock:
Paul Wise315c6922019-08-02 21:49:05 -0700771 kfree(argv);
Alex Kelly10c28d92012-09-26 21:52:08 -0400772 kfree(cn.corename);
Eric W. Biederman0258b5f2021-09-22 11:24:02 -0500773 coredump_finish(core_dumped);
Alex Kelly10c28d92012-09-26 21:52:08 -0400774 revert_creds(old_cred);
775fail_creds:
776 put_cred(cred);
777fail:
778 return;
779}
780
781/*
782 * Core dumping helper functions. These are the only things you should
783 * do on a core-file: use only these functions to write out all the
784 * necessary info.
785 */
Al Virod0f10882020-03-08 09:16:37 -0400786static int __dump_emit(struct coredump_params *cprm, const void *addr, int nr)
Al Viroecc8c772013-10-05 15:32:35 -0400787{
788 struct file *file = cprm->file;
Al Viro2507a4f2013-10-08 09:11:48 -0400789 loff_t pos = file->f_pos;
790 ssize_t n;
Omar Sandoval2c4cb042016-05-11 15:16:37 -0700791 if (cprm->written + nr > cprm->limit)
Al Viroecc8c772013-10-05 15:32:35 -0400792 return 0;
Jann Horndf0c09c2020-10-15 20:12:43 -0700793
794
795 if (dump_interrupted())
796 return 0;
797 n = __kernel_write(file, addr, nr, &pos);
798 if (n != nr)
799 return 0;
800 file->f_pos = pos;
801 cprm->written += n;
802 cprm->pos += n;
803
Al Viroecc8c772013-10-05 15:32:35 -0400804 return 1;
805}
Al Viroecc8c772013-10-05 15:32:35 -0400806
Al Virod0f10882020-03-08 09:16:37 -0400807static int __dump_skip(struct coredump_params *cprm, size_t nr)
Alex Kelly10c28d92012-09-26 21:52:08 -0400808{
Al Viro9b56d542013-10-08 09:26:08 -0400809 static char zeroes[PAGE_SIZE];
810 struct file *file = cprm->file;
Alex Kelly10c28d92012-09-26 21:52:08 -0400811 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
Oleg Nesterov528f8272013-04-30 15:28:15 -0700812 if (dump_interrupted() ||
Al Viro9b56d542013-10-08 09:26:08 -0400813 file->f_op->llseek(file, nr, SEEK_CUR) < 0)
Alex Kelly10c28d92012-09-26 21:52:08 -0400814 return 0;
Mateusz Guzik1607f092016-06-05 23:14:14 +0200815 cprm->pos += nr;
Al Viro9b56d542013-10-08 09:26:08 -0400816 return 1;
Alex Kelly10c28d92012-09-26 21:52:08 -0400817 } else {
Al Viro9b56d542013-10-08 09:26:08 -0400818 while (nr > PAGE_SIZE) {
Al Virod0f10882020-03-08 09:16:37 -0400819 if (!__dump_emit(cprm, zeroes, PAGE_SIZE))
Al Viro9b56d542013-10-08 09:26:08 -0400820 return 0;
821 nr -= PAGE_SIZE;
Alex Kelly10c28d92012-09-26 21:52:08 -0400822 }
Al Virod0f10882020-03-08 09:16:37 -0400823 return __dump_emit(cprm, zeroes, nr);
Alex Kelly10c28d92012-09-26 21:52:08 -0400824 }
Alex Kelly10c28d92012-09-26 21:52:08 -0400825}
Al Virod0f10882020-03-08 09:16:37 -0400826
827int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
828{
829 if (cprm->to_skip) {
830 if (!__dump_skip(cprm, cprm->to_skip))
831 return 0;
832 cprm->to_skip = 0;
833 }
834 return __dump_emit(cprm, addr, nr);
835}
836EXPORT_SYMBOL(dump_emit);
837
838void dump_skip_to(struct coredump_params *cprm, unsigned long pos)
839{
840 cprm->to_skip = pos - cprm->pos;
841}
842EXPORT_SYMBOL(dump_skip_to);
843
844void dump_skip(struct coredump_params *cprm, size_t nr)
845{
846 cprm->to_skip += nr;
847}
Al Viro9b56d542013-10-08 09:26:08 -0400848EXPORT_SYMBOL(dump_skip);
Al Viro22a8cb82013-10-08 11:05:01 -0400849
Jann Hornafc63a97b2020-10-15 20:12:46 -0700850#ifdef CONFIG_ELF_CORE
851int dump_user_range(struct coredump_params *cprm, unsigned long start,
852 unsigned long len)
853{
854 unsigned long addr;
855
856 for (addr = start; addr < start + len; addr += PAGE_SIZE) {
857 struct page *page;
858 int stop;
859
860 /*
861 * To avoid having to allocate page tables for virtual address
862 * ranges that have never been used yet, and also to make it
863 * easy to generate sparse core files, use a helper that returns
864 * NULL when encountering an empty page table entry that would
865 * otherwise have been filled with the zero page.
866 */
867 page = get_dump_page(addr);
868 if (page) {
Ira Weiny3159ed52021-02-25 17:22:22 -0800869 void *kaddr = kmap_local_page(page);
Jann Hornafc63a97b2020-10-15 20:12:46 -0700870
871 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
Ira Weiny3159ed52021-02-25 17:22:22 -0800872 kunmap_local(kaddr);
Jann Hornafc63a97b2020-10-15 20:12:46 -0700873 put_page(page);
Al Virod0f10882020-03-08 09:16:37 -0400874 if (stop)
875 return 0;
Jann Hornafc63a97b2020-10-15 20:12:46 -0700876 } else {
Al Virod0f10882020-03-08 09:16:37 -0400877 dump_skip(cprm, PAGE_SIZE);
Jann Hornafc63a97b2020-10-15 20:12:46 -0700878 }
Jann Hornafc63a97b2020-10-15 20:12:46 -0700879 }
880 return 1;
881}
882#endif
883
Al Viro22a8cb82013-10-08 11:05:01 -0400884int dump_align(struct coredump_params *cprm, int align)
885{
Al Virod0f10882020-03-08 09:16:37 -0400886 unsigned mod = (cprm->pos + cprm->to_skip) & (align - 1);
Al Viro22a8cb82013-10-08 11:05:01 -0400887 if (align & (align - 1))
Al Virodb512422013-11-15 21:55:52 -0500888 return 0;
Al Virod0f10882020-03-08 09:16:37 -0400889 if (mod)
890 cprm->to_skip += align - mod;
891 return 1;
Al Viro22a8cb82013-10-08 11:05:01 -0400892}
893EXPORT_SYMBOL(dump_align);
Dave Kleikamp4d22c752017-01-11 13:25:00 -0600894
Xiaoming Nif0bc21b2022-01-21 22:13:38 -0800895#ifdef CONFIG_SYSCTL
896
897void validate_coredump_safety(void)
898{
899 if (suid_dumpable == SUID_DUMP_ROOT &&
900 core_pattern[0] != '/' && core_pattern[0] != '|') {
901 pr_warn(
902"Unsafe core_pattern used with fs.suid_dumpable=2.\n"
903"Pipe handler or fully qualified core dump path required.\n"
904"Set kernel.core_pattern before fs.suid_dumpable.\n"
905 );
906 }
907}
908
909static int proc_dostring_coredump(struct ctl_table *table, int write,
910 void *buffer, size_t *lenp, loff_t *ppos)
911{
912 int error = proc_dostring(table, write, buffer, lenp, ppos);
913
914 if (!error)
915 validate_coredump_safety();
916 return error;
917}
918
919static struct ctl_table coredump_sysctls[] = {
920 {
921 .procname = "core_uses_pid",
922 .data = &core_uses_pid,
923 .maxlen = sizeof(int),
924 .mode = 0644,
925 .proc_handler = proc_dointvec,
926 },
927 {
928 .procname = "core_pattern",
929 .data = core_pattern,
930 .maxlen = CORENAME_MAX_SIZE,
931 .mode = 0644,
932 .proc_handler = proc_dostring_coredump,
933 },
934 {
935 .procname = "core_pipe_limit",
936 .data = &core_pipe_limit,
937 .maxlen = sizeof(unsigned int),
938 .mode = 0644,
939 .proc_handler = proc_dointvec,
940 },
941 { }
942};
943
944static int __init init_fs_coredump_sysctls(void)
945{
946 register_sysctl_init("kernel", coredump_sysctls);
947 return 0;
948}
949fs_initcall(init_fs_coredump_sysctls);
950#endif /* CONFIG_SYSCTL */
951
Dave Kleikamp4d22c752017-01-11 13:25:00 -0600952/*
Jann Horn429a22e2020-10-15 20:12:50 -0700953 * The purpose of always_dump_vma() is to make sure that special kernel mappings
954 * that are useful for post-mortem analysis are included in every core dump.
955 * In that way we ensure that the core dump is fully interpretable later
956 * without matching up the same kernel and hardware config to see what PC values
957 * meant. These special mappings include - vDSO, vsyscall, and other
958 * architecture specific mappings
959 */
960static bool always_dump_vma(struct vm_area_struct *vma)
961{
962 /* Any vsyscall mappings? */
963 if (vma == get_gate_vma(vma->vm_mm))
964 return true;
965
966 /*
967 * Assume that all vmas with a .name op should always be dumped.
968 * If this changes, a new vm_ops field can easily be added.
969 */
970 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
971 return true;
972
973 /*
974 * arch_vma_name() returns non-NULL for special architecture mappings,
975 * such as vDSO sections.
976 */
977 if (arch_vma_name(vma))
978 return true;
979
980 return false;
981}
982
983/*
984 * Decide how much of @vma's contents should be included in a core dump.
985 */
Jann Horna07279c2020-10-15 20:12:54 -0700986static unsigned long vma_dump_size(struct vm_area_struct *vma,
987 unsigned long mm_flags)
Jann Horn429a22e2020-10-15 20:12:50 -0700988{
989#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
990
991 /* always dump the vdso and vsyscall sections */
992 if (always_dump_vma(vma))
993 goto whole;
994
995 if (vma->vm_flags & VM_DONTDUMP)
996 return 0;
997
998 /* support for DAX */
999 if (vma_is_dax(vma)) {
1000 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1001 goto whole;
1002 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1003 goto whole;
1004 return 0;
1005 }
1006
1007 /* Hugetlb memory check */
1008 if (is_vm_hugetlb_page(vma)) {
1009 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1010 goto whole;
1011 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1012 goto whole;
1013 return 0;
1014 }
1015
1016 /* Do not dump I/O mapped devices or special mappings */
1017 if (vma->vm_flags & VM_IO)
1018 return 0;
1019
1020 /* By default, dump shared memory if mapped from an anonymous file. */
1021 if (vma->vm_flags & VM_SHARED) {
1022 if (file_inode(vma->vm_file)->i_nlink == 0 ?
1023 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1024 goto whole;
1025 return 0;
1026 }
1027
1028 /* Dump segments that have been written to. */
1029 if ((!IS_ENABLED(CONFIG_MMU) || vma->anon_vma) && FILTER(ANON_PRIVATE))
1030 goto whole;
1031 if (vma->vm_file == NULL)
1032 return 0;
1033
1034 if (FILTER(MAPPED_PRIVATE))
1035 goto whole;
1036
1037 /*
1038 * If this is the beginning of an executable file mapping,
1039 * dump the first page to aid in determining what was mapped here.
1040 */
1041 if (FILTER(ELF_HEADERS) &&
1042 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ) &&
1043 (READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0)
1044 return PAGE_SIZE;
1045
1046#undef FILTER
1047
1048 return 0;
1049
1050whole:
1051 return vma->vm_end - vma->vm_start;
1052}
Jann Horna07279c2020-10-15 20:12:54 -07001053
1054static struct vm_area_struct *first_vma(struct task_struct *tsk,
1055 struct vm_area_struct *gate_vma)
1056{
1057 struct vm_area_struct *ret = tsk->mm->mmap;
1058
1059 if (ret)
1060 return ret;
1061 return gate_vma;
1062}
1063
1064/*
1065 * Helper function for iterating across a vma list. It ensures that the caller
1066 * will visit `gate_vma' prior to terminating the search.
1067 */
1068static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
1069 struct vm_area_struct *gate_vma)
1070{
1071 struct vm_area_struct *ret;
1072
1073 ret = this_vma->vm_next;
1074 if (ret)
1075 return ret;
1076 if (this_vma == gate_vma)
1077 return NULL;
1078 return gate_vma;
1079}
1080
1081/*
1082 * Under the mmap_lock, take a snapshot of relevant information about the task's
1083 * VMAs.
1084 */
1085int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count,
1086 struct core_vma_metadata **vma_meta,
1087 size_t *vma_data_size_ptr)
1088{
1089 struct vm_area_struct *vma, *gate_vma;
1090 struct mm_struct *mm = current->mm;
1091 int i;
1092 size_t vma_data_size = 0;
1093
1094 /*
1095 * Once the stack expansion code is fixed to not change VMA bounds
1096 * under mmap_lock in read mode, this can be changed to take the
1097 * mmap_lock in read mode.
1098 */
1099 if (mmap_write_lock_killable(mm))
1100 return -EINTR;
1101
1102 gate_vma = get_gate_vma(mm);
1103 *vma_count = mm->map_count + (gate_vma ? 1 : 0);
1104
1105 *vma_meta = kvmalloc_array(*vma_count, sizeof(**vma_meta), GFP_KERNEL);
1106 if (!*vma_meta) {
1107 mmap_write_unlock(mm);
1108 return -ENOMEM;
1109 }
1110
1111 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
1112 vma = next_vma(vma, gate_vma), i++) {
1113 struct core_vma_metadata *m = (*vma_meta) + i;
1114
1115 m->start = vma->vm_start;
1116 m->end = vma->vm_end;
1117 m->flags = vma->vm_flags;
1118 m->dump_size = vma_dump_size(vma, cprm->mm_flags);
1119
1120 vma_data_size += m->dump_size;
1121 }
1122
1123 mmap_write_unlock(mm);
1124
QiuXi6fcac872021-09-07 20:00:32 -07001125 if (WARN_ON(i != *vma_count)) {
1126 kvfree(*vma_meta);
Jann Horna07279c2020-10-15 20:12:54 -07001127 return -EFAULT;
QiuXi6fcac872021-09-07 20:00:32 -07001128 }
Jann Horna07279c2020-10-15 20:12:54 -07001129
1130 *vma_data_size_ptr = vma_data_size;
1131 return 0;
1132}