Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/kernel/fork.c |
| 4 | * |
| 5 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 6 | */ |
| 7 | |
| 8 | /* |
| 9 | * 'fork.c' contains the help-routines for the 'fork' system call |
| 10 | * (see also entry.S and others). |
| 11 | * Fork is rather simple, once you get the hang of it, but the memory |
| 12 | * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' |
| 13 | */ |
| 14 | |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 15 | #include <linux/anon_inodes.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/slab.h> |
Ingo Molnar | 4eb5aaa | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 17 | #include <linux/sched/autogroup.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 18 | #include <linux/sched/mm.h> |
Ingo Molnar | f7ccbae | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 19 | #include <linux/sched/coredump.h> |
Ingo Molnar | 8703e8a | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 20 | #include <linux/sched/user.h> |
Ingo Molnar | 6a3827d | 2017-02-08 18:51:31 +0100 | [diff] [blame] | 21 | #include <linux/sched/numa_balancing.h> |
Ingo Molnar | 03441a3 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 22 | #include <linux/sched/stat.h> |
Ingo Molnar | 2993002 | 2017-02-08 18:51:36 +0100 | [diff] [blame] | 23 | #include <linux/sched/task.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 24 | #include <linux/sched/task_stack.h> |
Ingo Molnar | 32ef551 | 2017-02-05 11:48:36 +0100 | [diff] [blame] | 25 | #include <linux/sched/cputime.h> |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 26 | #include <linux/seq_file.h> |
Ingo Molnar | 037741a | 2017-02-03 10:08:30 +0100 | [diff] [blame] | 27 | #include <linux/rtmutex.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <linux/init.h> |
| 29 | #include <linux/unistd.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <linux/module.h> |
| 31 | #include <linux/vmalloc.h> |
| 32 | #include <linux/completion.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <linux/personality.h> |
| 34 | #include <linux/mempolicy.h> |
| 35 | #include <linux/sem.h> |
| 36 | #include <linux/file.h> |
Al Viro | 9f3acc3 | 2008-04-24 07:44:08 -0400 | [diff] [blame] | 37 | #include <linux/fdtable.h> |
Jens Axboe | da9cbc8 | 2008-06-30 20:42:08 +0200 | [diff] [blame] | 38 | #include <linux/iocontext.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include <linux/key.h> |
| 40 | #include <linux/binfmts.h> |
| 41 | #include <linux/mman.h> |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 42 | #include <linux/mmu_notifier.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #include <linux/fs.h> |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 44 | #include <linux/mm.h> |
Arnd Bergmann | 17fca13 | 2022-01-14 14:06:07 -0800 | [diff] [blame] | 45 | #include <linux/mm_inline.h> |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 46 | #include <linux/vmacache.h> |
Serge E. Hallyn | ab51601 | 2006-10-02 02:18:06 -0700 | [diff] [blame] | 47 | #include <linux/nsproxy.h> |
Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 48 | #include <linux/capability.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | #include <linux/cpu.h> |
Paul Menage | b4f48b6 | 2007-10-18 23:39:33 -0700 | [diff] [blame] | 50 | #include <linux/cgroup.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #include <linux/security.h> |
Mel Gorman | a1e7877 | 2008-07-23 21:27:23 -0700 | [diff] [blame] | 52 | #include <linux/hugetlb.h> |
Will Drewry | e2cfabdf | 2012-04-12 16:47:57 -0500 | [diff] [blame] | 53 | #include <linux/seccomp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #include <linux/swap.h> |
| 55 | #include <linux/syscalls.h> |
| 56 | #include <linux/jiffies.h> |
| 57 | #include <linux/futex.h> |
Linus Torvalds | 8141c7f | 2008-11-15 10:20:36 -0800 | [diff] [blame] | 58 | #include <linux/compat.h> |
Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 59 | #include <linux/kthread.h> |
Andrew Morton | 7c3ab738 | 2006-12-10 02:19:19 -0800 | [diff] [blame] | 60 | #include <linux/task_io_accounting_ops.h> |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 61 | #include <linux/rcupdate.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | #include <linux/ptrace.h> |
| 63 | #include <linux/mount.h> |
| 64 | #include <linux/audit.h> |
Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 65 | #include <linux/memcontrol.h> |
Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 66 | #include <linux/ftrace.h> |
Mike Galbraith | 5e2bf01 | 2012-05-10 13:01:45 -0700 | [diff] [blame] | 67 | #include <linux/proc_fs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | #include <linux/profile.h> |
| 69 | #include <linux/rmap.h> |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 70 | #include <linux/ksm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | #include <linux/acct.h> |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 72 | #include <linux/userfaultfd_k.h> |
Jay Lan | 8f0ab51 | 2006-09-30 23:28:59 -0700 | [diff] [blame] | 73 | #include <linux/tsacct_kern.h> |
Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 74 | #include <linux/cn_proc.h> |
Rafael J. Wysocki | ba96a0c | 2007-05-23 13:57:25 -0700 | [diff] [blame] | 75 | #include <linux/freezer.h> |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 76 | #include <linux/delayacct.h> |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 77 | #include <linux/taskstats_kern.h> |
Arjan van de Ven | 0a425405 | 2006-09-26 10:52:38 +0200 | [diff] [blame] | 78 | #include <linux/random.h> |
Miloslav Trmac | 522ed77 | 2007-07-15 23:40:56 -0700 | [diff] [blame] | 79 | #include <linux/tty.h> |
Al Viro | 5ad4e53 | 2009-03-29 19:50:06 -0400 | [diff] [blame] | 80 | #include <linux/fs_struct.h> |
Eric Sandeen | 7c9f886 | 2008-04-22 16:38:23 -0500 | [diff] [blame] | 81 | #include <linux/magic.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 82 | #include <linux/perf_event.h> |
Stanislaw Gruszka | 42c4ab4 | 2009-07-29 12:15:26 +0200 | [diff] [blame] | 83 | #include <linux/posix-timers.h> |
Avi Kivity | 8e7cac7 | 2009-11-29 16:34:48 +0200 | [diff] [blame] | 84 | #include <linux/user-return-notifier.h> |
Ying Han | 3d5992d | 2010-10-26 14:21:23 -0700 | [diff] [blame] | 85 | #include <linux/oom.h> |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 86 | #include <linux/khugepaged.h> |
Oleg Nesterov | d80e731 | 2012-02-24 20:07:11 +0100 | [diff] [blame] | 87 | #include <linux/signalfd.h> |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 88 | #include <linux/uprobes.h> |
Kent Overstreet | a27bb33 | 2013-05-07 16:19:08 -0700 | [diff] [blame] | 89 | #include <linux/aio.h> |
Gideon Israel Dsouza | 52f5684c | 2014-04-07 15:39:20 -0700 | [diff] [blame] | 90 | #include <linux/compiler.h> |
Heinrich Schuchardt | 16db3d3 | 2015-04-16 12:47:50 -0700 | [diff] [blame] | 91 | #include <linux/sysctl.h> |
Dmitry Vyukov | 5c9a875 | 2016-03-22 14:27:30 -0700 | [diff] [blame] | 92 | #include <linux/kcov.h> |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 93 | #include <linux/livepatch.h> |
Mark Rutland | 48ac3c1 | 2017-07-14 12:23:09 +0100 | [diff] [blame] | 94 | #include <linux/thread_info.h> |
Alexander Popov | afaef01 | 2018-08-17 01:16:58 +0300 | [diff] [blame] | 95 | #include <linux/stackleak.h> |
Daniel Axtens | eafb149 | 2019-11-30 17:54:57 -0800 | [diff] [blame] | 96 | #include <linux/kasan.h> |
Sami Tolvanen | d08b9f0 | 2020-04-27 09:00:07 -0700 | [diff] [blame] | 97 | #include <linux/scs.h> |
Jens Axboe | 0f21220 | 2020-09-13 13:09:39 -0600 | [diff] [blame] | 98 | #include <linux/io_uring.h> |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 99 | #include <linux/bpf.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | #include <asm/pgalloc.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 102 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | #include <asm/mmu_context.h> |
| 104 | #include <asm/cacheflush.h> |
| 105 | #include <asm/tlbflush.h> |
| 106 | |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 107 | #include <trace/events/sched.h> |
| 108 | |
KAMEZAWA Hiroyuki | 43d2b11 | 2012-01-10 15:08:09 -0800 | [diff] [blame] | 109 | #define CREATE_TRACE_POINTS |
| 110 | #include <trace/events/task.h> |
| 111 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | /* |
Heinrich Schuchardt | ac1b398 | 2015-04-16 12:47:47 -0700 | [diff] [blame] | 113 | * Minimum number of threads to boot the kernel |
| 114 | */ |
| 115 | #define MIN_THREADS 20 |
| 116 | |
| 117 | /* |
| 118 | * Maximum number of threads |
| 119 | */ |
| 120 | #define MAX_THREADS FUTEX_TID_MASK |
| 121 | |
| 122 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | * Protected counters by write_lock_irq(&tasklist_lock) |
| 124 | */ |
| 125 | unsigned long total_forks; /* Handle normal Linux uptimes. */ |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 126 | int nr_threads; /* The idle threads do not count.. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | |
Kefeng Wang | 8856ae4 | 2019-05-31 22:30:12 -0700 | [diff] [blame] | 128 | static int max_threads; /* tunable limit on nr_threads */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | |
Sai Praneeth Prakhya | 8495f7e | 2019-09-25 16:47:27 -0700 | [diff] [blame] | 130 | #define NAMED_ARRAY_INDEX(x) [x] = __stringify(x) |
| 131 | |
| 132 | static const char * const resident_page_types[] = { |
| 133 | NAMED_ARRAY_INDEX(MM_FILEPAGES), |
| 134 | NAMED_ARRAY_INDEX(MM_ANONPAGES), |
| 135 | NAMED_ARRAY_INDEX(MM_SWAPENTS), |
| 136 | NAMED_ARRAY_INDEX(MM_SHMEMPAGES), |
| 137 | }; |
| 138 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | DEFINE_PER_CPU(unsigned long, process_counts) = 0; |
| 140 | |
Christoph Hellwig | c59923a | 2006-07-10 04:45:40 -0700 | [diff] [blame] | 141 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ |
Paul E. McKenney | db1466b | 2010-03-03 07:46:56 -0800 | [diff] [blame] | 142 | |
| 143 | #ifdef CONFIG_PROVE_RCU |
| 144 | int lockdep_tasklist_lock_is_held(void) |
| 145 | { |
| 146 | return lockdep_is_held(&tasklist_lock); |
| 147 | } |
| 148 | EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); |
| 149 | #endif /* #ifdef CONFIG_PROVE_RCU */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
| 151 | int nr_processes(void) |
| 152 | { |
| 153 | int cpu; |
| 154 | int total = 0; |
| 155 | |
Ian Campbell | 1d51075 | 2009-11-03 10:11:14 +0000 | [diff] [blame] | 156 | for_each_possible_cpu(cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | total += per_cpu(process_counts, cpu); |
| 158 | |
| 159 | return total; |
| 160 | } |
| 161 | |
Akinobu Mita | f19b9f7 | 2012-07-30 14:42:33 -0700 | [diff] [blame] | 162 | void __weak arch_release_task_struct(struct task_struct *tsk) |
| 163 | { |
| 164 | } |
| 165 | |
Thomas Gleixner | f5e1028 | 2012-05-05 15:05:48 +0000 | [diff] [blame] | 166 | #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 167 | static struct kmem_cache *task_struct_cachep; |
Thomas Gleixner | 4110180 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 168 | |
| 169 | static inline struct task_struct *alloc_task_struct_node(int node) |
| 170 | { |
| 171 | return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); |
| 172 | } |
| 173 | |
Thomas Gleixner | 4110180 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 174 | static inline void free_task_struct(struct task_struct *tsk) |
| 175 | { |
Thomas Gleixner | 4110180 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 176 | kmem_cache_free(task_struct_cachep, tsk); |
| 177 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | #endif |
| 179 | |
Linus Torvalds | b235bee | 2016-06-24 15:09:37 -0700 | [diff] [blame] | 180 | #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR |
Thomas Gleixner | 4110180 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 181 | |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 182 | /* |
| 183 | * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a |
| 184 | * kmemcache based allocator. |
| 185 | */ |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 186 | # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 187 | |
| 188 | #ifdef CONFIG_VMAP_STACK |
| 189 | /* |
| 190 | * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB |
| 191 | * flush. Try to minimize the number of calls by caching stacks. |
| 192 | */ |
| 193 | #define NR_CACHED_STACKS 2 |
| 194 | static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]); |
Hoeun Ryu | 19659c5 | 2017-05-08 15:56:11 -0700 | [diff] [blame] | 195 | |
| 196 | static int free_vm_stack_cache(unsigned int cpu) |
| 197 | { |
| 198 | struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu); |
| 199 | int i; |
| 200 | |
| 201 | for (i = 0; i < NR_CACHED_STACKS; i++) { |
| 202 | struct vm_struct *vm_stack = cached_vm_stacks[i]; |
| 203 | |
| 204 | if (!vm_stack) |
| 205 | continue; |
| 206 | |
| 207 | vfree(vm_stack->addr); |
| 208 | cached_vm_stacks[i] = NULL; |
| 209 | } |
| 210 | |
| 211 | return 0; |
| 212 | } |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 213 | #endif |
| 214 | |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 215 | static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) |
FUJITA Tomonori | b69c49b | 2008-07-25 01:45:40 -0700 | [diff] [blame] | 216 | { |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 217 | #ifdef CONFIG_VMAP_STACK |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 218 | void *stack; |
| 219 | int i; |
| 220 | |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 221 | for (i = 0; i < NR_CACHED_STACKS; i++) { |
Christoph Lameter | 112166f | 2017-07-12 14:33:11 -0700 | [diff] [blame] | 222 | struct vm_struct *s; |
| 223 | |
| 224 | s = this_cpu_xchg(cached_stacks[i], NULL); |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 225 | |
| 226 | if (!s) |
| 227 | continue; |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 228 | |
Andrey Konovalov | cebd0eb | 2020-12-22 12:00:21 -0800 | [diff] [blame] | 229 | /* Mark stack accessible for KASAN. */ |
| 230 | kasan_unpoison_range(s->addr, THREAD_SIZE); |
Daniel Axtens | eafb149 | 2019-11-30 17:54:57 -0800 | [diff] [blame] | 231 | |
Konstantin Khlebnikov | ca18255 | 2017-10-13 15:58:22 -0700 | [diff] [blame] | 232 | /* Clear stale pointers from reused stack. */ |
| 233 | memset(s->addr, 0, THREAD_SIZE); |
Kees Cook | e01e806 | 2018-04-20 14:55:31 -0700 | [diff] [blame] | 234 | |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 235 | tsk->stack_vm_area = s; |
Shakeel Butt | ba4a457 | 2019-01-08 15:22:57 -0800 | [diff] [blame] | 236 | tsk->stack = s->addr; |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 237 | return s->addr; |
| 238 | } |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 239 | |
Roman Gushchin | 9b6f7e1 | 2018-10-26 15:03:19 -0700 | [diff] [blame] | 240 | /* |
| 241 | * Allocated stacks are cached and later reused by new threads, |
| 242 | * so memcg accounting is performed manually on assigning/releasing |
| 243 | * stacks to tasks. Drop __GFP_ACCOUNT. |
| 244 | */ |
Mark Rutland | 48ac3c1 | 2017-07-14 12:23:09 +0100 | [diff] [blame] | 245 | stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 246 | VMALLOC_START, VMALLOC_END, |
Roman Gushchin | 9b6f7e1 | 2018-10-26 15:03:19 -0700 | [diff] [blame] | 247 | THREADINFO_GFP & ~__GFP_ACCOUNT, |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 248 | PAGE_KERNEL, |
| 249 | 0, node, __builtin_return_address(0)); |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 250 | |
| 251 | /* |
| 252 | * We can't call find_vm_area() in interrupt context, and |
| 253 | * free_thread_stack() can be called in interrupt context, |
| 254 | * so cache the vm_struct. |
| 255 | */ |
Rik van Riel | 5eed6f1 | 2018-12-21 14:30:54 -0800 | [diff] [blame] | 256 | if (stack) { |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 257 | tsk->stack_vm_area = find_vm_area(stack); |
Rik van Riel | 5eed6f1 | 2018-12-21 14:30:54 -0800 | [diff] [blame] | 258 | tsk->stack = stack; |
| 259 | } |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 260 | return stack; |
| 261 | #else |
Vladimir Davydov | 4949148 | 2016-07-26 15:24:24 -0700 | [diff] [blame] | 262 | struct page *page = alloc_pages_node(node, THREADINFO_GFP, |
| 263 | THREAD_SIZE_ORDER); |
Eric Dumazet | b6a8401 | 2011-03-22 16:30:42 -0700 | [diff] [blame] | 264 | |
Andrea Arcangeli | 1bf4580 | 2019-06-28 12:07:14 -0700 | [diff] [blame] | 265 | if (likely(page)) { |
Andrey Konovalov | 8dcc1d3 | 2020-08-06 23:24:57 -0700 | [diff] [blame] | 266 | tsk->stack = kasan_reset_tag(page_address(page)); |
Andrea Arcangeli | 1bf4580 | 2019-06-28 12:07:14 -0700 | [diff] [blame] | 267 | return tsk->stack; |
| 268 | } |
| 269 | return NULL; |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 270 | #endif |
FUJITA Tomonori | b69c49b | 2008-07-25 01:45:40 -0700 | [diff] [blame] | 271 | } |
| 272 | |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 273 | static inline void free_thread_stack(struct task_struct *tsk) |
FUJITA Tomonori | b69c49b | 2008-07-25 01:45:40 -0700 | [diff] [blame] | 274 | { |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 275 | #ifdef CONFIG_VMAP_STACK |
Roman Gushchin | 9b6f7e1 | 2018-10-26 15:03:19 -0700 | [diff] [blame] | 276 | struct vm_struct *vm = task_stack_vm_area(tsk); |
| 277 | |
| 278 | if (vm) { |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 279 | int i; |
| 280 | |
Shakeel Butt | 991e767 | 2020-08-06 23:21:37 -0700 | [diff] [blame] | 281 | for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) |
Roman Gushchin | f4b00ea | 2020-04-01 21:06:46 -0700 | [diff] [blame] | 282 | memcg_kmem_uncharge_page(vm->pages[i], 0); |
Roman Gushchin | 9b6f7e1 | 2018-10-26 15:03:19 -0700 | [diff] [blame] | 283 | |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 284 | for (i = 0; i < NR_CACHED_STACKS; i++) { |
Christoph Lameter | 112166f | 2017-07-12 14:33:11 -0700 | [diff] [blame] | 285 | if (this_cpu_cmpxchg(cached_stacks[i], |
| 286 | NULL, tsk->stack_vm_area) != NULL) |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 287 | continue; |
| 288 | |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 289 | return; |
| 290 | } |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 291 | |
Andrey Ryabinin | 0f110a9 | 2016-12-12 16:44:14 -0800 | [diff] [blame] | 292 | vfree_atomic(tsk->stack); |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 293 | return; |
| 294 | } |
| 295 | #endif |
| 296 | |
| 297 | __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER); |
FUJITA Tomonori | b69c49b | 2008-07-25 01:45:40 -0700 | [diff] [blame] | 298 | } |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 299 | # else |
Linus Torvalds | b235bee | 2016-06-24 15:09:37 -0700 | [diff] [blame] | 300 | static struct kmem_cache *thread_stack_cache; |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 301 | |
Michael Ellerman | 9521d39 | 2016-06-25 21:53:30 +1000 | [diff] [blame] | 302 | static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 303 | int node) |
| 304 | { |
Rik van Riel | 5eed6f1 | 2018-12-21 14:30:54 -0800 | [diff] [blame] | 305 | unsigned long *stack; |
| 306 | stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); |
Andrey Konovalov | 8dcc1d3 | 2020-08-06 23:24:57 -0700 | [diff] [blame] | 307 | stack = kasan_reset_tag(stack); |
Rik van Riel | 5eed6f1 | 2018-12-21 14:30:54 -0800 | [diff] [blame] | 308 | tsk->stack = stack; |
| 309 | return stack; |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 310 | } |
| 311 | |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 312 | static void free_thread_stack(struct task_struct *tsk) |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 313 | { |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 314 | kmem_cache_free(thread_stack_cache, tsk->stack); |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 315 | } |
| 316 | |
Linus Torvalds | b235bee | 2016-06-24 15:09:37 -0700 | [diff] [blame] | 317 | void thread_stack_cache_init(void) |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 318 | { |
David Windsor | f9d29946 | 2017-06-10 22:50:41 -0400 | [diff] [blame] | 319 | thread_stack_cache = kmem_cache_create_usercopy("thread_stack", |
| 320 | THREAD_SIZE, THREAD_SIZE, 0, 0, |
| 321 | THREAD_SIZE, NULL); |
Linus Torvalds | b235bee | 2016-06-24 15:09:37 -0700 | [diff] [blame] | 322 | BUG_ON(thread_stack_cache == NULL); |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 323 | } |
| 324 | # endif |
FUJITA Tomonori | b69c49b | 2008-07-25 01:45:40 -0700 | [diff] [blame] | 325 | #endif |
| 326 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | /* SLAB cache for signal_struct structures (tsk->signal) */ |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 328 | static struct kmem_cache *signal_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | |
| 330 | /* SLAB cache for sighand_struct structures (tsk->sighand) */ |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 331 | struct kmem_cache *sighand_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | |
| 333 | /* SLAB cache for files_struct structures (tsk->files) */ |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 334 | struct kmem_cache *files_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | |
| 336 | /* SLAB cache for fs_struct structures (tsk->fs) */ |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 337 | struct kmem_cache *fs_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | |
| 339 | /* SLAB cache for vm_area_struct structures */ |
Linus Torvalds | 3928d4f | 2018-07-21 13:48:51 -0700 | [diff] [blame] | 340 | static struct kmem_cache *vm_area_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | |
| 342 | /* SLAB cache for mm_struct structures (tsk->mm) */ |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 343 | static struct kmem_cache *mm_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | |
Linus Torvalds | 490fc05 | 2018-07-21 15:24:03 -0700 | [diff] [blame] | 345 | struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) |
Linus Torvalds | 3928d4f | 2018-07-21 13:48:51 -0700 | [diff] [blame] | 346 | { |
Andrew Morton | a670468 | 2018-08-21 21:53:06 -0700 | [diff] [blame] | 347 | struct vm_area_struct *vma; |
Linus Torvalds | 490fc05 | 2018-07-21 15:24:03 -0700 | [diff] [blame] | 348 | |
Andrew Morton | a670468 | 2018-08-21 21:53:06 -0700 | [diff] [blame] | 349 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
Kirill A. Shutemov | 027232d | 2018-07-26 16:37:25 -0700 | [diff] [blame] | 350 | if (vma) |
| 351 | vma_init(vma, mm); |
Linus Torvalds | 490fc05 | 2018-07-21 15:24:03 -0700 | [diff] [blame] | 352 | return vma; |
Linus Torvalds | 3928d4f | 2018-07-21 13:48:51 -0700 | [diff] [blame] | 353 | } |
| 354 | |
| 355 | struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) |
| 356 | { |
Linus Torvalds | 95faf69 | 2018-07-21 14:48:45 -0700 | [diff] [blame] | 357 | struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
| 358 | |
| 359 | if (new) { |
Qian Cai | cda099b | 2020-02-19 11:00:54 -0800 | [diff] [blame] | 360 | ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); |
| 361 | ASSERT_EXCLUSIVE_WRITER(orig->vm_file); |
| 362 | /* |
| 363 | * orig->shared.rb may be modified concurrently, but the clone |
| 364 | * will be reinitialized. |
| 365 | */ |
| 366 | *new = data_race(*orig); |
Linus Torvalds | 95faf69 | 2018-07-21 14:48:45 -0700 | [diff] [blame] | 367 | INIT_LIST_HEAD(&new->anon_vma_chain); |
Li Xinhai | e39a4b3 | 2020-04-06 20:03:39 -0700 | [diff] [blame] | 368 | new->vm_next = new->vm_prev = NULL; |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 369 | dup_vma_anon_name(orig, new); |
Linus Torvalds | 95faf69 | 2018-07-21 14:48:45 -0700 | [diff] [blame] | 370 | } |
| 371 | return new; |
Linus Torvalds | 3928d4f | 2018-07-21 13:48:51 -0700 | [diff] [blame] | 372 | } |
| 373 | |
| 374 | void vm_area_free(struct vm_area_struct *vma) |
| 375 | { |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 376 | free_vma_anon_name(vma); |
Linus Torvalds | 3928d4f | 2018-07-21 13:48:51 -0700 | [diff] [blame] | 377 | kmem_cache_free(vm_area_cachep, vma); |
| 378 | } |
| 379 | |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 380 | static void account_kernel_stack(struct task_struct *tsk, int account) |
KOSAKI Motohiro | c6a7f57 | 2009-09-21 17:01:32 -0700 | [diff] [blame] | 381 | { |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 382 | void *stack = task_stack_page(tsk); |
| 383 | struct vm_struct *vm = task_stack_vm_area(tsk); |
KOSAKI Motohiro | c6a7f57 | 2009-09-21 17:01:32 -0700 | [diff] [blame] | 384 | |
Muchun Song | 27faca8 | 2021-04-29 22:56:02 -0700 | [diff] [blame] | 385 | if (vm) { |
| 386 | int i; |
Andy Lutomirski | efdc949 | 2016-07-28 15:48:17 -0700 | [diff] [blame] | 387 | |
Muchun Song | 27faca8 | 2021-04-29 22:56:02 -0700 | [diff] [blame] | 388 | for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) |
| 389 | mod_lruvec_page_state(vm->pages[i], NR_KERNEL_STACK_KB, |
| 390 | account * (PAGE_SIZE / 1024)); |
| 391 | } else { |
| 392 | /* All stack pages are in the same node. */ |
Muchun Song | da3ceef | 2020-12-14 19:07:04 -0800 | [diff] [blame] | 393 | mod_lruvec_kmem_state(stack, NR_KERNEL_STACK_KB, |
Shakeel Butt | 991e767 | 2020-08-06 23:21:37 -0700 | [diff] [blame] | 394 | account * (THREAD_SIZE / 1024)); |
Muchun Song | 27faca8 | 2021-04-29 22:56:02 -0700 | [diff] [blame] | 395 | } |
KOSAKI Motohiro | c6a7f57 | 2009-09-21 17:01:32 -0700 | [diff] [blame] | 396 | } |
| 397 | |
Roman Gushchin | 9b6f7e1 | 2018-10-26 15:03:19 -0700 | [diff] [blame] | 398 | static int memcg_charge_kernel_stack(struct task_struct *tsk) |
| 399 | { |
| 400 | #ifdef CONFIG_VMAP_STACK |
| 401 | struct vm_struct *vm = task_stack_vm_area(tsk); |
| 402 | int ret; |
| 403 | |
Shakeel Butt | 991e767 | 2020-08-06 23:21:37 -0700 | [diff] [blame] | 404 | BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0); |
| 405 | |
Roman Gushchin | 9b6f7e1 | 2018-10-26 15:03:19 -0700 | [diff] [blame] | 406 | if (vm) { |
| 407 | int i; |
| 408 | |
Shakeel Butt | 991e767 | 2020-08-06 23:21:37 -0700 | [diff] [blame] | 409 | BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); |
| 410 | |
Roman Gushchin | 9b6f7e1 | 2018-10-26 15:03:19 -0700 | [diff] [blame] | 411 | for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { |
| 412 | /* |
Roman Gushchin | bcfe06b | 2020-12-01 13:58:27 -0800 | [diff] [blame] | 413 | * If memcg_kmem_charge_page() fails, page's |
| 414 | * memory cgroup pointer is NULL, and |
| 415 | * memcg_kmem_uncharge_page() in free_thread_stack() |
| 416 | * will ignore this page. |
Roman Gushchin | 9b6f7e1 | 2018-10-26 15:03:19 -0700 | [diff] [blame] | 417 | */ |
Roman Gushchin | f4b00ea | 2020-04-01 21:06:46 -0700 | [diff] [blame] | 418 | ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, |
| 419 | 0); |
Roman Gushchin | 9b6f7e1 | 2018-10-26 15:03:19 -0700 | [diff] [blame] | 420 | if (ret) |
| 421 | return ret; |
Roman Gushchin | 9b6f7e1 | 2018-10-26 15:03:19 -0700 | [diff] [blame] | 422 | } |
| 423 | } |
| 424 | #endif |
| 425 | return 0; |
| 426 | } |
| 427 | |
Andy Lutomirski | 68f24b08 | 2016-09-15 22:45:48 -0700 | [diff] [blame] | 428 | static void release_task_stack(struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | { |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 430 | if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD)) |
Andy Lutomirski | 405c075 | 2016-10-31 08:11:43 -0700 | [diff] [blame] | 431 | return; /* Better to leak the stack than to free prematurely */ |
| 432 | |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 433 | account_kernel_stack(tsk, -1); |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 434 | free_thread_stack(tsk); |
Andy Lutomirski | 68f24b08 | 2016-09-15 22:45:48 -0700 | [diff] [blame] | 435 | tsk->stack = NULL; |
| 436 | #ifdef CONFIG_VMAP_STACK |
| 437 | tsk->stack_vm_area = NULL; |
| 438 | #endif |
| 439 | } |
| 440 | |
| 441 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 442 | void put_task_stack(struct task_struct *tsk) |
| 443 | { |
Elena Reshetova | f0b89d3 | 2019-01-18 14:27:30 +0200 | [diff] [blame] | 444 | if (refcount_dec_and_test(&tsk->stack_refcount)) |
Andy Lutomirski | 68f24b08 | 2016-09-15 22:45:48 -0700 | [diff] [blame] | 445 | release_task_stack(tsk); |
| 446 | } |
| 447 | #endif |
| 448 | |
| 449 | void free_task(struct task_struct *tsk) |
| 450 | { |
Will Deacon | b90ca8b | 2021-07-30 12:24:33 +0100 | [diff] [blame] | 451 | release_user_cpus_ptr(tsk); |
Sami Tolvanen | d08b9f0 | 2020-04-27 09:00:07 -0700 | [diff] [blame] | 452 | scs_release(tsk); |
| 453 | |
Andy Lutomirski | 68f24b08 | 2016-09-15 22:45:48 -0700 | [diff] [blame] | 454 | #ifndef CONFIG_THREAD_INFO_IN_TASK |
| 455 | /* |
| 456 | * The task is finally done with both the stack and thread_info, |
| 457 | * so free both. |
| 458 | */ |
| 459 | release_task_stack(tsk); |
| 460 | #else |
| 461 | /* |
| 462 | * If the task had a separate stack allocation, it should be gone |
| 463 | * by now. |
| 464 | */ |
Elena Reshetova | f0b89d3 | 2019-01-18 14:27:30 +0200 | [diff] [blame] | 465 | WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0); |
Andy Lutomirski | 68f24b08 | 2016-09-15 22:45:48 -0700 | [diff] [blame] | 466 | #endif |
Ingo Molnar | 23f78d4a | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 467 | rt_mutex_debug_task_free(tsk); |
Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 468 | ftrace_graph_exit_task(tsk); |
Akinobu Mita | f19b9f7 | 2012-07-30 14:42:33 -0700 | [diff] [blame] | 469 | arch_release_task_struct(tsk); |
Oleg Nesterov | 1da5c46 | 2016-11-29 18:50:57 +0100 | [diff] [blame] | 470 | if (tsk->flags & PF_KTHREAD) |
| 471 | free_kthread_struct(tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | free_task_struct(tsk); |
| 473 | } |
| 474 | EXPORT_SYMBOL(free_task); |
| 475 | |
David Hildenbrand | fe69d56 | 2021-04-23 10:29:59 +0200 | [diff] [blame] | 476 | static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm) |
| 477 | { |
| 478 | struct file *exe_file; |
| 479 | |
| 480 | exe_file = get_mm_exe_file(oldmm); |
| 481 | RCU_INIT_POINTER(mm->exe_file, exe_file); |
| 482 | /* |
| 483 | * We depend on the oldmm having properly denied write access to the |
| 484 | * exe_file already. |
| 485 | */ |
| 486 | if (exe_file && deny_write_access(exe_file)) |
| 487 | pr_warn_once("deny_write_access() failed in %s\n", __func__); |
| 488 | } |
| 489 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | #ifdef CONFIG_MMU |
Emese Revfy | 0766f78 | 2016-06-20 20:42:34 +0200 | [diff] [blame] | 491 | static __latent_entropy int dup_mmap(struct mm_struct *mm, |
| 492 | struct mm_struct *oldmm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | { |
Linus Torvalds | 297c5ee | 2010-08-20 16:24:55 -0700 | [diff] [blame] | 494 | struct vm_area_struct *mpnt, *tmp, *prev, **pprev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | struct rb_node **rb_link, *rb_parent; |
| 496 | int retval; |
| 497 | unsigned long charge; |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 498 | LIST_HEAD(uf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | |
Oleg Nesterov | 32cdba1 | 2012-11-14 19:03:42 +0100 | [diff] [blame] | 500 | uprobe_start_dup_mmap(); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 501 | if (mmap_write_lock_killable(oldmm)) { |
Michal Hocko | 7c05126 | 2016-05-23 16:25:48 -0700 | [diff] [blame] | 502 | retval = -EINTR; |
| 503 | goto fail_uprobe_end; |
| 504 | } |
Ralf Baechle | ec8c044 | 2006-12-12 17:14:57 +0000 | [diff] [blame] | 505 | flush_cache_dup_mm(oldmm); |
Oleg Nesterov | f8ac4ec | 2012-08-08 17:11:42 +0200 | [diff] [blame] | 506 | uprobe_dup_mmap(oldmm, mm); |
Ingo Molnar | ad33945 | 2006-07-03 00:25:15 -0700 | [diff] [blame] | 507 | /* |
| 508 | * Not linked in yet - no deadlock potential: |
| 509 | */ |
Michel Lespinasse | aaa2cc5 | 2020-06-08 21:33:33 -0700 | [diff] [blame] | 510 | mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING); |
Hugh Dickins | 7ee7823 | 2005-10-29 18:16:08 -0700 | [diff] [blame] | 511 | |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 512 | /* No ordering required: file already has been exposed. */ |
David Hildenbrand | fe69d56 | 2021-04-23 10:29:59 +0200 | [diff] [blame] | 513 | dup_mm_exe_file(mm, oldmm); |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 514 | |
Vladimir Davydov | 4f7d461 | 2014-08-08 14:22:01 -0700 | [diff] [blame] | 515 | mm->total_vm = oldmm->total_vm; |
Konstantin Khlebnikov | 8463833 | 2016-01-14 15:22:07 -0800 | [diff] [blame] | 516 | mm->data_vm = oldmm->data_vm; |
Vladimir Davydov | 4f7d461 | 2014-08-08 14:22:01 -0700 | [diff] [blame] | 517 | mm->exec_vm = oldmm->exec_vm; |
| 518 | mm->stack_vm = oldmm->stack_vm; |
| 519 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | rb_link = &mm->mm_rb.rb_node; |
| 521 | rb_parent = NULL; |
| 522 | pprev = &mm->mmap; |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 523 | retval = ksm_fork(mm, oldmm); |
| 524 | if (retval) |
| 525 | goto out; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 526 | retval = khugepaged_fork(mm, oldmm); |
| 527 | if (retval) |
| 528 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | |
Linus Torvalds | 297c5ee | 2010-08-20 16:24:55 -0700 | [diff] [blame] | 530 | prev = NULL; |
Hugh Dickins | fd3e42f | 2005-10-29 18:16:06 -0700 | [diff] [blame] | 531 | for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | struct file *file; |
| 533 | |
| 534 | if (mpnt->vm_flags & VM_DONTCOPY) { |
Konstantin Khlebnikov | 8463833 | 2016-01-14 15:22:07 -0800 | [diff] [blame] | 535 | vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | continue; |
| 537 | } |
| 538 | charge = 0; |
Tetsuo Handa | 655c79b | 2018-06-14 15:26:34 -0700 | [diff] [blame] | 539 | /* |
| 540 | * Don't duplicate many vmas if we've been oom-killed (for |
| 541 | * example) |
| 542 | */ |
| 543 | if (fatal_signal_pending(current)) { |
| 544 | retval = -EINTR; |
| 545 | goto out; |
| 546 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | if (mpnt->vm_flags & VM_ACCOUNT) { |
Huang Shijie | b2412b7 | 2012-07-30 14:42:30 -0700 | [diff] [blame] | 548 | unsigned long len = vma_pages(mpnt); |
| 549 | |
Al Viro | 191c542 | 2012-02-13 03:58:52 +0000 | [diff] [blame] | 550 | if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | goto fail_nomem; |
| 552 | charge = len; |
| 553 | } |
Linus Torvalds | 3928d4f | 2018-07-21 13:48:51 -0700 | [diff] [blame] | 554 | tmp = vm_area_dup(mpnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | if (!tmp) |
| 556 | goto fail_nomem; |
Oleg Nesterov | ef0855d | 2013-09-11 14:20:14 -0700 | [diff] [blame] | 557 | retval = vma_dup_policy(mpnt, tmp); |
| 558 | if (retval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | goto fail_nomem_policy; |
Andrea Arcangeli | a247c3a | 2010-09-22 13:05:12 -0700 | [diff] [blame] | 560 | tmp->vm_mm = mm; |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 561 | retval = dup_userfaultfd(tmp, &uf); |
| 562 | if (retval) |
| 563 | goto fail_nomem_anon_vma_fork; |
Rik van Riel | d2cd9ed | 2017-09-06 16:25:15 -0700 | [diff] [blame] | 564 | if (tmp->vm_flags & VM_WIPEONFORK) { |
Li Xinhai | 93949bb | 2020-04-06 20:03:33 -0700 | [diff] [blame] | 565 | /* |
| 566 | * VM_WIPEONFORK gets a clean slate in the child. |
| 567 | * Don't prepare anon_vma until fault since we don't |
| 568 | * copy page for current vma. |
| 569 | */ |
Rik van Riel | d2cd9ed | 2017-09-06 16:25:15 -0700 | [diff] [blame] | 570 | tmp->anon_vma = NULL; |
Rik van Riel | d2cd9ed | 2017-09-06 16:25:15 -0700 | [diff] [blame] | 571 | } else if (anon_vma_fork(tmp, mpnt)) |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 572 | goto fail_nomem_anon_vma_fork; |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 573 | tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | file = tmp->vm_file; |
| 575 | if (file) { |
Hugh Dickins | b88ed20 | 2008-12-10 20:48:52 +0000 | [diff] [blame] | 576 | struct address_space *mapping = file->f_mapping; |
| 577 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | get_file(file); |
Davidlohr Bueso | 83cde9e | 2014-12-12 16:54:21 -0800 | [diff] [blame] | 579 | i_mmap_lock_write(mapping); |
Hugh Dickins | b88ed20 | 2008-12-10 20:48:52 +0000 | [diff] [blame] | 580 | if (tmp->vm_flags & VM_SHARED) |
Miaohe Lin | cf508b5 | 2020-10-13 16:54:10 -0700 | [diff] [blame] | 581 | mapping_allow_writable(mapping); |
Hugh Dickins | b88ed20 | 2008-12-10 20:48:52 +0000 | [diff] [blame] | 582 | flush_dcache_mmap_lock(mapping); |
| 583 | /* insert tmp into the share list, just after mpnt */ |
Kirill A. Shutemov | 27ba064 | 2015-02-10 14:09:59 -0800 | [diff] [blame] | 584 | vma_interval_tree_insert_after(tmp, mpnt, |
| 585 | &mapping->i_mmap); |
Hugh Dickins | b88ed20 | 2008-12-10 20:48:52 +0000 | [diff] [blame] | 586 | flush_dcache_mmap_unlock(mapping); |
Davidlohr Bueso | 83cde9e | 2014-12-12 16:54:21 -0800 | [diff] [blame] | 587 | i_mmap_unlock_write(mapping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 588 | } |
| 589 | |
| 590 | /* |
Mel Gorman | a1e7877 | 2008-07-23 21:27:23 -0700 | [diff] [blame] | 591 | * Clear hugetlb-related page reserves for children. This only |
| 592 | * affects MAP_PRIVATE mappings. Faults generated by the child |
| 593 | * are not guaranteed to succeed, even if read-only |
| 594 | */ |
| 595 | if (is_vm_hugetlb_page(tmp)) |
| 596 | reset_vma_resv_huge_pages(tmp); |
| 597 | |
| 598 | /* |
Hugh Dickins | 7ee7823 | 2005-10-29 18:16:08 -0700 | [diff] [blame] | 599 | * Link in the new vma and copy the page table entries. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 600 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | *pprev = tmp; |
| 602 | pprev = &tmp->vm_next; |
Linus Torvalds | 297c5ee | 2010-08-20 16:24:55 -0700 | [diff] [blame] | 603 | tmp->vm_prev = prev; |
| 604 | prev = tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | |
| 606 | __vma_link_rb(mm, tmp, rb_link, rb_parent); |
| 607 | rb_link = &tmp->vm_rb.rb_right; |
| 608 | rb_parent = &tmp->vm_rb; |
| 609 | |
| 610 | mm->map_count++; |
Rik van Riel | d2cd9ed | 2017-09-06 16:25:15 -0700 | [diff] [blame] | 611 | if (!(tmp->vm_flags & VM_WIPEONFORK)) |
Peter Xu | c78f463 | 2020-10-13 16:54:21 -0700 | [diff] [blame] | 612 | retval = copy_page_range(tmp, mpnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | |
| 614 | if (tmp->vm_ops && tmp->vm_ops->open) |
| 615 | tmp->vm_ops->open(tmp); |
| 616 | |
| 617 | if (retval) |
| 618 | goto out; |
| 619 | } |
Jeremy Fitzhardinge | d6dd61c | 2007-05-02 19:27:14 +0200 | [diff] [blame] | 620 | /* a new mm has just been created */ |
Nadav Amit | 1ed0cc5 | 2018-09-04 15:45:41 -0700 | [diff] [blame] | 621 | retval = arch_dup_mmap(oldmm, mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | out: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 623 | mmap_write_unlock(mm); |
Hugh Dickins | fd3e42f | 2005-10-29 18:16:06 -0700 | [diff] [blame] | 624 | flush_tlb_mm(oldmm); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 625 | mmap_write_unlock(oldmm); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 626 | dup_userfaultfd_complete(&uf); |
Michal Hocko | 7c05126 | 2016-05-23 16:25:48 -0700 | [diff] [blame] | 627 | fail_uprobe_end: |
Oleg Nesterov | 32cdba1 | 2012-11-14 19:03:42 +0100 | [diff] [blame] | 628 | uprobe_end_dup_mmap(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | return retval; |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 630 | fail_nomem_anon_vma_fork: |
Oleg Nesterov | ef0855d | 2013-09-11 14:20:14 -0700 | [diff] [blame] | 631 | mpol_put(vma_policy(tmp)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 632 | fail_nomem_policy: |
Linus Torvalds | 3928d4f | 2018-07-21 13:48:51 -0700 | [diff] [blame] | 633 | vm_area_free(tmp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | fail_nomem: |
| 635 | retval = -ENOMEM; |
| 636 | vm_unacct_memory(charge); |
| 637 | goto out; |
| 638 | } |
| 639 | |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 640 | static inline int mm_alloc_pgd(struct mm_struct *mm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | { |
| 642 | mm->pgd = pgd_alloc(mm); |
| 643 | if (unlikely(!mm->pgd)) |
| 644 | return -ENOMEM; |
| 645 | return 0; |
| 646 | } |
| 647 | |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 648 | static inline void mm_free_pgd(struct mm_struct *mm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | { |
Benjamin Herrenschmidt | 5e54197 | 2008-02-04 22:29:14 -0800 | [diff] [blame] | 650 | pgd_free(mm, mm->pgd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | } |
| 652 | #else |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 653 | static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
| 654 | { |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 655 | mmap_write_lock(oldmm); |
David Hildenbrand | fe69d56 | 2021-04-23 10:29:59 +0200 | [diff] [blame] | 656 | dup_mm_exe_file(mm, oldmm); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 657 | mmap_write_unlock(oldmm); |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 658 | return 0; |
| 659 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | #define mm_alloc_pgd(mm) (0) |
| 661 | #define mm_free_pgd(mm) |
| 662 | #endif /* CONFIG_MMU */ |
| 663 | |
Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 664 | static void check_mm(struct mm_struct *mm) |
| 665 | { |
| 666 | int i; |
| 667 | |
Sai Praneeth Prakhya | 8495f7e | 2019-09-25 16:47:27 -0700 | [diff] [blame] | 668 | BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS, |
| 669 | "Please make sure 'struct resident_page_types[]' is updated as well"); |
| 670 | |
Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 671 | for (i = 0; i < NR_MM_COUNTERS; i++) { |
| 672 | long x = atomic_long_read(&mm->rss_stat.count[i]); |
| 673 | |
| 674 | if (unlikely(x)) |
Sai Praneeth Prakhya | 8495f7e | 2019-09-25 16:47:27 -0700 | [diff] [blame] | 675 | pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n", |
| 676 | mm, resident_page_types[i], x); |
Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 677 | } |
| 678 | |
| 679 | if (mm_pgtables_bytes(mm)) |
| 680 | pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n", |
| 681 | mm_pgtables_bytes(mm)); |
| 682 | |
| 683 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS |
| 684 | VM_BUG_ON_MM(mm->pmd_huge_pte, mm); |
| 685 | #endif |
| 686 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | |
Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 688 | #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) |
| 690 | |
Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 691 | /* |
| 692 | * Called when the last reference to the mm |
| 693 | * is dropped: either by a lazy thread or by |
| 694 | * mmput. Free the page directory and the mm. |
| 695 | */ |
Andrew Morton | d34bc48 | 2018-02-21 14:45:17 -0800 | [diff] [blame] | 696 | void __mmdrop(struct mm_struct *mm) |
Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 697 | { |
| 698 | BUG_ON(mm == &init_mm); |
Mark Rutland | 3eda69c | 2018-04-05 16:25:12 -0700 | [diff] [blame] | 699 | WARN_ON_ONCE(mm == current->mm); |
| 700 | WARN_ON_ONCE(mm == current->active_mm); |
Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 701 | mm_free_pgd(mm); |
| 702 | destroy_context(mm); |
Jason Gunthorpe | 984cfe4 | 2019-12-18 13:40:35 -0400 | [diff] [blame] | 703 | mmu_notifier_subscriptions_destroy(mm); |
Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 704 | check_mm(mm); |
| 705 | put_user_ns(mm->user_ns); |
| 706 | free_mm(mm); |
| 707 | } |
Andrew Morton | d34bc48 | 2018-02-21 14:45:17 -0800 | [diff] [blame] | 708 | EXPORT_SYMBOL_GPL(__mmdrop); |
Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 709 | |
| 710 | static void mmdrop_async_fn(struct work_struct *work) |
| 711 | { |
| 712 | struct mm_struct *mm; |
| 713 | |
| 714 | mm = container_of(work, struct mm_struct, async_put_work); |
| 715 | __mmdrop(mm); |
| 716 | } |
| 717 | |
| 718 | static void mmdrop_async(struct mm_struct *mm) |
| 719 | { |
| 720 | if (unlikely(atomic_dec_and_test(&mm->mm_count))) { |
| 721 | INIT_WORK(&mm->async_put_work, mmdrop_async_fn); |
| 722 | schedule_work(&mm->async_put_work); |
| 723 | } |
| 724 | } |
| 725 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 726 | static inline void free_signal_struct(struct signal_struct *sig) |
| 727 | { |
| 728 | taskstats_tgid_free(sig); |
| 729 | sched_autogroup_exit(sig); |
| 730 | /* |
| 731 | * __mmdrop is not safe to call from softirq context on x86 due to |
| 732 | * pgd_dtor so postpone it to the async context |
| 733 | */ |
| 734 | if (sig->oom_mm) |
| 735 | mmdrop_async(sig->oom_mm); |
| 736 | kmem_cache_free(signal_cachep, sig); |
| 737 | } |
| 738 | |
| 739 | static inline void put_signal_struct(struct signal_struct *sig) |
| 740 | { |
Elena Reshetova | 60d4de3 | 2019-01-18 14:27:27 +0200 | [diff] [blame] | 741 | if (refcount_dec_and_test(&sig->sigcnt)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | free_signal_struct(sig); |
| 743 | } |
| 744 | |
| 745 | void __put_task_struct(struct task_struct *tsk) |
| 746 | { |
| 747 | WARN_ON(!tsk->exit_state); |
Elena Reshetova | ec1d281 | 2019-01-18 14:27:29 +0200 | [diff] [blame] | 748 | WARN_ON(refcount_read(&tsk->usage)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 749 | WARN_ON(tsk == current); |
| 750 | |
Jens Axboe | 0f21220 | 2020-09-13 13:09:39 -0600 | [diff] [blame] | 751 | io_uring_free(tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 | cgroup_free(tsk); |
Jann Horn | 16d51a5 | 2019-07-16 17:20:45 +0200 | [diff] [blame] | 753 | task_numa_free(tsk, true); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 754 | security_task_free(tsk); |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 755 | bpf_task_storage_free(tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | exit_creds(tsk); |
| 757 | delayacct_tsk_free(tsk); |
| 758 | put_signal_struct(tsk->signal); |
Peter Zijlstra | 6e33cad | 2021-03-26 18:55:06 +0100 | [diff] [blame] | 759 | sched_core_free(tsk); |
Eric W. Biederman | 2873cd3 | 2022-01-08 10:03:24 -0600 | [diff] [blame] | 760 | free_task(tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 | } |
| 762 | EXPORT_SYMBOL_GPL(__put_task_struct); |
| 763 | |
| 764 | void __init __weak arch_task_cache_init(void) { } |
| 765 | |
| 766 | /* |
| 767 | * set_max_threads |
| 768 | */ |
| 769 | static void set_max_threads(unsigned int max_threads_suggested) |
| 770 | { |
| 771 | u64 threads; |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 772 | unsigned long nr_pages = totalram_pages(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | |
| 774 | /* |
| 775 | * The number of threads shall be limited such that the thread |
| 776 | * structures may only consume a small part of the available memory. |
| 777 | */ |
Arun KS | 3d6357d | 2018-12-28 00:34:20 -0800 | [diff] [blame] | 778 | if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 779 | threads = MAX_THREADS; |
| 780 | else |
Arun KS | 3d6357d | 2018-12-28 00:34:20 -0800 | [diff] [blame] | 781 | threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | (u64) THREAD_SIZE * 8UL); |
| 783 | |
| 784 | if (threads > max_threads_suggested) |
| 785 | threads = max_threads_suggested; |
| 786 | |
| 787 | max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); |
| 788 | } |
| 789 | |
| 790 | #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT |
| 791 | /* Initialized by the architecture: */ |
| 792 | int arch_task_struct_size __read_mostly; |
| 793 | #endif |
| 794 | |
Christoph Hellwig | 4189ff2 | 2019-08-12 08:55:24 +0200 | [diff] [blame] | 795 | #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR |
Kees Cook | 5905429 | 2017-08-16 13:00:58 -0700 | [diff] [blame] | 796 | static void task_struct_whitelist(unsigned long *offset, unsigned long *size) |
| 797 | { |
| 798 | /* Fetch thread_struct whitelist for the architecture. */ |
| 799 | arch_thread_struct_whitelist(offset, size); |
| 800 | |
| 801 | /* |
| 802 | * Handle zero-sized whitelist or empty thread_struct, otherwise |
| 803 | * adjust offset to position of thread_struct in task_struct. |
| 804 | */ |
| 805 | if (unlikely(*size == 0)) |
| 806 | *offset = 0; |
| 807 | else |
| 808 | *offset += offsetof(struct task_struct, thread); |
| 809 | } |
Christoph Hellwig | 4189ff2 | 2019-08-12 08:55:24 +0200 | [diff] [blame] | 810 | #endif /* CONFIG_ARCH_TASK_STRUCT_ALLOCATOR */ |
Kees Cook | 5905429 | 2017-08-16 13:00:58 -0700 | [diff] [blame] | 811 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 | void __init fork_init(void) |
| 813 | { |
| 814 | int i; |
| 815 | #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR |
| 816 | #ifndef ARCH_MIN_TASKALIGN |
| 817 | #define ARCH_MIN_TASKALIGN 0 |
| 818 | #endif |
| 819 | int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); |
Kees Cook | 5905429 | 2017-08-16 13:00:58 -0700 | [diff] [blame] | 820 | unsigned long useroffset, usersize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 | |
| 822 | /* create a slab on which task_structs can be allocated */ |
Kees Cook | 5905429 | 2017-08-16 13:00:58 -0700 | [diff] [blame] | 823 | task_struct_whitelist(&useroffset, &usersize); |
| 824 | task_struct_cachep = kmem_cache_create_usercopy("task_struct", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 825 | arch_task_struct_size, align, |
Kees Cook | 5905429 | 2017-08-16 13:00:58 -0700 | [diff] [blame] | 826 | SLAB_PANIC|SLAB_ACCOUNT, |
| 827 | useroffset, usersize, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 828 | #endif |
| 829 | |
| 830 | /* do the arch specific task caches init */ |
| 831 | arch_task_cache_init(); |
| 832 | |
| 833 | set_max_threads(MAX_THREADS); |
| 834 | |
| 835 | init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; |
| 836 | init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; |
| 837 | init_task.signal->rlim[RLIMIT_SIGPENDING] = |
| 838 | init_task.signal->rlim[RLIMIT_NPROC]; |
| 839 | |
Alexey Gladkov | 21d1c5e | 2021-04-22 14:27:11 +0200 | [diff] [blame] | 840 | for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 841 | init_user_ns.ucount_max[i] = max_threads/2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 842 | |
Eric W. Biederman | 5ddf994 | 2021-08-23 11:12:17 -0500 | [diff] [blame] | 843 | set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, RLIM_INFINITY); |
| 844 | set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY); |
| 845 | set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY); |
| 846 | set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY); |
Alexey Gladkov | 21d1c5e | 2021-04-22 14:27:11 +0200 | [diff] [blame] | 847 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | #ifdef CONFIG_VMAP_STACK |
| 849 | cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", |
| 850 | NULL, free_vm_stack_cache); |
| 851 | #endif |
| 852 | |
Sami Tolvanen | d08b9f0 | 2020-04-27 09:00:07 -0700 | [diff] [blame] | 853 | scs_init(); |
| 854 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | lockdep_init_task(&init_task); |
Nadav Amit | aad42dd | 2019-04-26 16:22:44 -0700 | [diff] [blame] | 856 | uprobes_init(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 857 | } |
| 858 | |
| 859 | int __weak arch_dup_task_struct(struct task_struct *dst, |
| 860 | struct task_struct *src) |
| 861 | { |
| 862 | *dst = *src; |
| 863 | return 0; |
| 864 | } |
| 865 | |
| 866 | void set_task_stack_end_magic(struct task_struct *tsk) |
| 867 | { |
| 868 | unsigned long *stackend; |
| 869 | |
| 870 | stackend = end_of_stack(tsk); |
| 871 | *stackend = STACK_END_MAGIC; /* for overflow detection */ |
| 872 | } |
| 873 | |
| 874 | static struct task_struct *dup_task_struct(struct task_struct *orig, int node) |
| 875 | { |
| 876 | struct task_struct *tsk; |
| 877 | unsigned long *stack; |
YueHaibing | 0f4991e | 2018-12-28 00:40:00 -0800 | [diff] [blame] | 878 | struct vm_struct *stack_vm_area __maybe_unused; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 | int err; |
| 880 | |
| 881 | if (node == NUMA_NO_NODE) |
| 882 | node = tsk_fork_get_node(orig); |
| 883 | tsk = alloc_task_struct_node(node); |
| 884 | if (!tsk) |
| 885 | return NULL; |
| 886 | |
| 887 | stack = alloc_thread_stack_node(tsk, node); |
| 888 | if (!stack) |
| 889 | goto free_tsk; |
| 890 | |
Roman Gushchin | 9b6f7e1 | 2018-10-26 15:03:19 -0700 | [diff] [blame] | 891 | if (memcg_charge_kernel_stack(tsk)) |
| 892 | goto free_stack; |
| 893 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 | stack_vm_area = task_stack_vm_area(tsk); |
| 895 | |
| 896 | err = arch_dup_task_struct(tsk, orig); |
| 897 | |
| 898 | /* |
| 899 | * arch_dup_task_struct() clobbers the stack-related fields. Make |
| 900 | * sure they're properly initialized before using any stack-related |
| 901 | * functions again. |
| 902 | */ |
| 903 | tsk->stack = stack; |
| 904 | #ifdef CONFIG_VMAP_STACK |
| 905 | tsk->stack_vm_area = stack_vm_area; |
| 906 | #endif |
| 907 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
Elena Reshetova | f0b89d3 | 2019-01-18 14:27:30 +0200 | [diff] [blame] | 908 | refcount_set(&tsk->stack_refcount, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 909 | #endif |
| 910 | |
| 911 | if (err) |
| 912 | goto free_stack; |
| 913 | |
Sami Tolvanen | d08b9f0 | 2020-04-27 09:00:07 -0700 | [diff] [blame] | 914 | err = scs_prepare(tsk, node); |
| 915 | if (err) |
| 916 | goto free_stack; |
| 917 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | #ifdef CONFIG_SECCOMP |
| 919 | /* |
| 920 | * We must handle setting up seccomp filters once we're under |
| 921 | * the sighand lock in case orig has changed between now and |
| 922 | * then. Until then, filter must be NULL to avoid messing up |
| 923 | * the usage counts on the error path calling free_task. |
| 924 | */ |
| 925 | tsk->seccomp.filter = NULL; |
| 926 | #endif |
| 927 | |
| 928 | setup_thread_stack(tsk, orig); |
| 929 | clear_user_return_notifier(tsk); |
| 930 | clear_tsk_need_resched(tsk); |
| 931 | set_task_stack_end_magic(tsk); |
Gabriel Krisman Bertazi | 1446e1d | 2020-11-27 14:32:34 -0500 | [diff] [blame] | 932 | clear_syscall_work_syscall_user_dispatch(tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 933 | |
Linus Torvalds | 050e9ba | 2018-06-14 12:21:18 +0900 | [diff] [blame] | 934 | #ifdef CONFIG_STACKPROTECTOR |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 935 | tsk->stack_canary = get_random_canary(); |
| 936 | #endif |
Sebastian Andrzej Siewior | 3bd3706 | 2019-04-23 16:26:36 +0200 | [diff] [blame] | 937 | if (orig->cpus_ptr == &orig->cpus_mask) |
| 938 | tsk->cpus_ptr = &tsk->cpus_mask; |
Will Deacon | b90ca8b | 2021-07-30 12:24:33 +0100 | [diff] [blame] | 939 | dup_user_cpus_ptr(tsk, orig, node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 940 | |
| 941 | /* |
Eric W. Biederman | 0ff7b2c | 2019-09-14 07:33:58 -0500 | [diff] [blame] | 942 | * One for the user space visible state that goes away when reaped. |
| 943 | * One for the scheduler. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 | */ |
Eric W. Biederman | 0ff7b2c | 2019-09-14 07:33:58 -0500 | [diff] [blame] | 945 | refcount_set(&tsk->rcu_users, 2); |
| 946 | /* One for the rcu users */ |
| 947 | refcount_set(&tsk->usage, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
| 949 | tsk->btrace_seq = 0; |
| 950 | #endif |
| 951 | tsk->splice_pipe = NULL; |
| 952 | tsk->task_frag.page = NULL; |
| 953 | tsk->wake_q.next = NULL; |
Eric W. Biederman | e32cf5d | 2021-12-22 22:10:09 -0600 | [diff] [blame] | 954 | tsk->worker_private = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | |
| 956 | account_kernel_stack(tsk, 1); |
| 957 | |
| 958 | kcov_task_init(tsk); |
Thomas Gleixner | 5fbda3e | 2020-11-18 20:48:43 +0100 | [diff] [blame] | 959 | kmap_local_fork(tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | |
| 961 | #ifdef CONFIG_FAULT_INJECTION |
| 962 | tsk->fail_nth = 0; |
| 963 | #endif |
| 964 | |
Josef Bacik | 2c32301 | 2018-07-31 12:39:04 -0400 | [diff] [blame] | 965 | #ifdef CONFIG_BLK_CGROUP |
| 966 | tsk->throttle_queue = NULL; |
| 967 | tsk->use_memdelay = 0; |
| 968 | #endif |
| 969 | |
Shakeel Butt | d46eb14b | 2018-08-17 15:46:39 -0700 | [diff] [blame] | 970 | #ifdef CONFIG_MEMCG |
| 971 | tsk->active_memcg = NULL; |
| 972 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 973 | return tsk; |
| 974 | |
| 975 | free_stack: |
| 976 | free_thread_stack(tsk); |
| 977 | free_tsk: |
| 978 | free_task_struct(tsk); |
| 979 | return NULL; |
| 980 | } |
| 981 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 982 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); |
| 983 | |
Hidehiro Kawai | 4cb0e11 | 2009-01-06 14:42:47 -0800 | [diff] [blame] | 984 | static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; |
| 985 | |
| 986 | static int __init coredump_filter_setup(char *s) |
| 987 | { |
| 988 | default_dump_filter = |
| 989 | (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & |
| 990 | MMF_DUMP_FILTER_MASK; |
| 991 | return 1; |
| 992 | } |
| 993 | |
| 994 | __setup("coredump_filter=", coredump_filter_setup); |
| 995 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 996 | #include <linux/init_task.h> |
| 997 | |
Alexey Dobriyan | 858f099 | 2009-09-23 15:57:32 -0700 | [diff] [blame] | 998 | static void mm_init_aio(struct mm_struct *mm) |
| 999 | { |
| 1000 | #ifdef CONFIG_AIO |
| 1001 | spin_lock_init(&mm->ioctx_lock); |
Benjamin LaHaise | db446a0 | 2013-07-30 12:54:40 -0400 | [diff] [blame] | 1002 | mm->ioctx_table = NULL; |
Alexey Dobriyan | 858f099 | 2009-09-23 15:57:32 -0700 | [diff] [blame] | 1003 | #endif |
| 1004 | } |
| 1005 | |
Andrea Arcangeli | c3f3ce0 | 2019-05-14 15:40:46 -0700 | [diff] [blame] | 1006 | static __always_inline void mm_clear_owner(struct mm_struct *mm, |
| 1007 | struct task_struct *p) |
| 1008 | { |
| 1009 | #ifdef CONFIG_MEMCG |
| 1010 | if (mm->owner == p) |
| 1011 | WRITE_ONCE(mm->owner, NULL); |
| 1012 | #endif |
| 1013 | } |
| 1014 | |
Vladimir Davydov | 33144e8 | 2014-08-08 14:22:03 -0700 | [diff] [blame] | 1015 | static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) |
| 1016 | { |
| 1017 | #ifdef CONFIG_MEMCG |
| 1018 | mm->owner = p; |
| 1019 | #endif |
| 1020 | } |
| 1021 | |
Fenghua Yu | 82e69a1 | 2021-03-12 21:07:15 -0800 | [diff] [blame] | 1022 | static void mm_init_pasid(struct mm_struct *mm) |
| 1023 | { |
| 1024 | #ifdef CONFIG_IOMMU_SUPPORT |
| 1025 | mm->pasid = INIT_PASID; |
| 1026 | #endif |
| 1027 | } |
| 1028 | |
Eric Biggers | 355627f | 2017-08-31 16:15:26 -0700 | [diff] [blame] | 1029 | static void mm_init_uprobes_state(struct mm_struct *mm) |
| 1030 | { |
| 1031 | #ifdef CONFIG_UPROBES |
| 1032 | mm->uprobes_state.xol_area = NULL; |
| 1033 | #endif |
| 1034 | } |
| 1035 | |
Eric W. Biederman | bfedb58 | 2016-10-13 21:23:16 -0500 | [diff] [blame] | 1036 | static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, |
| 1037 | struct user_namespace *user_ns) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1038 | { |
Vladimir Davydov | 41f727f | 2014-08-08 14:21:56 -0700 | [diff] [blame] | 1039 | mm->mmap = NULL; |
| 1040 | mm->mm_rb = RB_ROOT; |
| 1041 | mm->vmacache_seqnum = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1042 | atomic_set(&mm->mm_users, 1); |
| 1043 | atomic_set(&mm->mm_count, 1); |
Jason Gunthorpe | 57efa1f | 2020-12-14 19:05:44 -0800 | [diff] [blame] | 1044 | seqcount_init(&mm->write_protect_seq); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1045 | mmap_init_lock(mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1046 | INIT_LIST_HEAD(&mm->mmlist); |
Kirill A. Shutemov | af5b0f6 | 2017-11-15 17:35:40 -0800 | [diff] [blame] | 1047 | mm_pgtables_bytes_init(mm); |
Vladimir Davydov | 41f727f | 2014-08-08 14:21:56 -0700 | [diff] [blame] | 1048 | mm->map_count = 0; |
| 1049 | mm->locked_vm = 0; |
Davidlohr Bueso | 70f8a3c | 2019-02-06 09:59:15 -0800 | [diff] [blame] | 1050 | atomic64_set(&mm->pinned_vm, 0); |
KAMEZAWA Hiroyuki | d559db0 | 2010-03-05 13:41:39 -0800 | [diff] [blame] | 1051 | memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1052 | spin_lock_init(&mm->page_table_lock); |
Yang Shi | 88aa7cc | 2018-06-07 17:05:28 -0700 | [diff] [blame] | 1053 | spin_lock_init(&mm->arg_lock); |
Vladimir Davydov | 41f727f | 2014-08-08 14:21:56 -0700 | [diff] [blame] | 1054 | mm_init_cpumask(mm); |
Alexey Dobriyan | 858f099 | 2009-09-23 15:57:32 -0700 | [diff] [blame] | 1055 | mm_init_aio(mm); |
Balbir Singh | cf475ad | 2008-04-29 01:00:16 -0700 | [diff] [blame] | 1056 | mm_init_owner(mm, p); |
Fenghua Yu | 82e69a1 | 2021-03-12 21:07:15 -0800 | [diff] [blame] | 1057 | mm_init_pasid(mm); |
Eric Biggers | 2b7e866 | 2017-08-25 15:55:43 -0700 | [diff] [blame] | 1058 | RCU_INIT_POINTER(mm->exe_file, NULL); |
Jason Gunthorpe | 984cfe4 | 2019-12-18 13:40:35 -0400 | [diff] [blame] | 1059 | mmu_notifier_subscriptions_init(mm); |
Nadav Amit | 16af97d | 2017-08-10 15:23:56 -0700 | [diff] [blame] | 1060 | init_tlb_flush_pending(mm); |
Vladimir Davydov | 41f727f | 2014-08-08 14:21:56 -0700 | [diff] [blame] | 1061 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS |
| 1062 | mm->pmd_huge_pte = NULL; |
| 1063 | #endif |
Eric Biggers | 355627f | 2017-08-31 16:15:26 -0700 | [diff] [blame] | 1064 | mm_init_uprobes_state(mm); |
Liu Zixian | 13db8c5 | 2021-09-08 18:10:05 -0700 | [diff] [blame] | 1065 | hugetlb_count_init(mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1066 | |
Alex Thorlton | a0715cc | 2014-04-07 15:37:10 -0700 | [diff] [blame] | 1067 | if (current->mm) { |
| 1068 | mm->flags = current->mm->flags & MMF_INIT_MASK; |
| 1069 | mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; |
| 1070 | } else { |
| 1071 | mm->flags = default_dump_filter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1072 | mm->def_flags = 0; |
Alex Thorlton | a0715cc | 2014-04-07 15:37:10 -0700 | [diff] [blame] | 1073 | } |
| 1074 | |
Vladimir Davydov | 41f727f | 2014-08-08 14:21:56 -0700 | [diff] [blame] | 1075 | if (mm_alloc_pgd(mm)) |
| 1076 | goto fail_nopgd; |
Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 1077 | |
Vladimir Davydov | 41f727f | 2014-08-08 14:21:56 -0700 | [diff] [blame] | 1078 | if (init_new_context(p, mm)) |
| 1079 | goto fail_nocontext; |
| 1080 | |
Eric W. Biederman | bfedb58 | 2016-10-13 21:23:16 -0500 | [diff] [blame] | 1081 | mm->user_ns = get_user_ns(user_ns); |
Vladimir Davydov | 41f727f | 2014-08-08 14:21:56 -0700 | [diff] [blame] | 1082 | return mm; |
| 1083 | |
| 1084 | fail_nocontext: |
| 1085 | mm_free_pgd(mm); |
| 1086 | fail_nopgd: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1087 | free_mm(mm); |
| 1088 | return NULL; |
| 1089 | } |
| 1090 | |
| 1091 | /* |
| 1092 | * Allocate and initialize an mm_struct. |
| 1093 | */ |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1094 | struct mm_struct *mm_alloc(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1095 | { |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1096 | struct mm_struct *mm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1097 | |
| 1098 | mm = allocate_mm(); |
KOSAKI Motohiro | de03c72 | 2011-05-24 17:12:15 -0700 | [diff] [blame] | 1099 | if (!mm) |
| 1100 | return NULL; |
| 1101 | |
| 1102 | memset(mm, 0, sizeof(*mm)); |
Eric W. Biederman | bfedb58 | 2016-10-13 21:23:16 -0500 | [diff] [blame] | 1103 | return mm_init(mm, current, current_user_ns()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1104 | } |
| 1105 | |
Michal Hocko | ec8d7c1 | 2016-05-20 16:57:21 -0700 | [diff] [blame] | 1106 | static inline void __mmput(struct mm_struct *mm) |
| 1107 | { |
| 1108 | VM_BUG_ON(atomic_read(&mm->mm_users)); |
| 1109 | |
| 1110 | uprobe_clear_state(mm); |
| 1111 | exit_aio(mm); |
| 1112 | ksm_exit(mm); |
| 1113 | khugepaged_exit(mm); /* must run before exit_mmap */ |
| 1114 | exit_mmap(mm); |
Aaron Lu | 6fcb52a | 2016-10-07 17:00:08 -0700 | [diff] [blame] | 1115 | mm_put_huge_zero_page(mm); |
Michal Hocko | ec8d7c1 | 2016-05-20 16:57:21 -0700 | [diff] [blame] | 1116 | set_mm_exe_file(mm, NULL); |
| 1117 | if (!list_empty(&mm->mmlist)) { |
| 1118 | spin_lock(&mmlist_lock); |
| 1119 | list_del(&mm->mmlist); |
| 1120 | spin_unlock(&mmlist_lock); |
| 1121 | } |
| 1122 | if (mm->binfmt) |
| 1123 | module_put(mm->binfmt->module); |
| 1124 | mmdrop(mm); |
| 1125 | } |
| 1126 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1127 | /* |
| 1128 | * Decrement the use count and release all resources for an mm. |
| 1129 | */ |
| 1130 | void mmput(struct mm_struct *mm) |
| 1131 | { |
Andrew Morton | 0ae26f1 | 2006-06-23 02:05:15 -0700 | [diff] [blame] | 1132 | might_sleep(); |
| 1133 | |
Michal Hocko | ec8d7c1 | 2016-05-20 16:57:21 -0700 | [diff] [blame] | 1134 | if (atomic_dec_and_test(&mm->mm_users)) |
| 1135 | __mmput(mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1136 | } |
| 1137 | EXPORT_SYMBOL_GPL(mmput); |
| 1138 | |
Sherry Yang | a1b2289 | 2017-10-03 16:15:00 -0700 | [diff] [blame] | 1139 | #ifdef CONFIG_MMU |
| 1140 | static void mmput_async_fn(struct work_struct *work) |
| 1141 | { |
| 1142 | struct mm_struct *mm = container_of(work, struct mm_struct, |
| 1143 | async_put_work); |
| 1144 | |
| 1145 | __mmput(mm); |
| 1146 | } |
| 1147 | |
| 1148 | void mmput_async(struct mm_struct *mm) |
| 1149 | { |
| 1150 | if (atomic_dec_and_test(&mm->mm_users)) { |
| 1151 | INIT_WORK(&mm->async_put_work, mmput_async_fn); |
| 1152 | schedule_work(&mm->async_put_work); |
| 1153 | } |
| 1154 | } |
| 1155 | #endif |
| 1156 | |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 1157 | /** |
| 1158 | * set_mm_exe_file - change a reference to the mm's executable file |
| 1159 | * |
| 1160 | * This changes mm's executable file (shown as symlink /proc/[pid]/exe). |
| 1161 | * |
Davidlohr Bueso | 6e399cd | 2015-04-16 12:47:59 -0700 | [diff] [blame] | 1162 | * Main users are mmput() and sys_execve(). Callers prevent concurrent |
| 1163 | * invocations: in mmput() nobody alive left, in execve task is single |
David Hildenbrand | 35d7bdc | 2021-04-23 10:20:25 +0200 | [diff] [blame] | 1164 | * threaded. |
David Hildenbrand | fe69d56 | 2021-04-23 10:29:59 +0200 | [diff] [blame] | 1165 | * |
| 1166 | * Can only fail if new_exe_file != NULL. |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 1167 | */ |
David Hildenbrand | fe69d56 | 2021-04-23 10:29:59 +0200 | [diff] [blame] | 1168 | int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) |
Jiri Slaby | 3864601 | 2011-05-26 16:25:46 -0700 | [diff] [blame] | 1169 | { |
Davidlohr Bueso | 6e399cd | 2015-04-16 12:47:59 -0700 | [diff] [blame] | 1170 | struct file *old_exe_file; |
| 1171 | |
| 1172 | /* |
| 1173 | * It is safe to dereference the exe_file without RCU as |
| 1174 | * this function is only called if nobody else can access |
| 1175 | * this mm -- see comment above for justification. |
| 1176 | */ |
| 1177 | old_exe_file = rcu_dereference_raw(mm->exe_file); |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 1178 | |
David Hildenbrand | fe69d56 | 2021-04-23 10:29:59 +0200 | [diff] [blame] | 1179 | if (new_exe_file) { |
| 1180 | /* |
| 1181 | * We expect the caller (i.e., sys_execve) to already denied |
| 1182 | * write access, so this is unlikely to fail. |
| 1183 | */ |
| 1184 | if (unlikely(deny_write_access(new_exe_file))) |
| 1185 | return -EACCES; |
Jiri Slaby | 3864601 | 2011-05-26 16:25:46 -0700 | [diff] [blame] | 1186 | get_file(new_exe_file); |
David Hildenbrand | fe69d56 | 2021-04-23 10:29:59 +0200 | [diff] [blame] | 1187 | } |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 1188 | rcu_assign_pointer(mm->exe_file, new_exe_file); |
David Hildenbrand | fe69d56 | 2021-04-23 10:29:59 +0200 | [diff] [blame] | 1189 | if (old_exe_file) { |
| 1190 | allow_write_access(old_exe_file); |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 1191 | fput(old_exe_file); |
David Hildenbrand | fe69d56 | 2021-04-23 10:29:59 +0200 | [diff] [blame] | 1192 | } |
| 1193 | return 0; |
Jiri Slaby | 3864601 | 2011-05-26 16:25:46 -0700 | [diff] [blame] | 1194 | } |
| 1195 | |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 1196 | /** |
David Hildenbrand | 35d7bdc | 2021-04-23 10:20:25 +0200 | [diff] [blame] | 1197 | * replace_mm_exe_file - replace a reference to the mm's executable file |
| 1198 | * |
| 1199 | * This changes mm's executable file (shown as symlink /proc/[pid]/exe), |
| 1200 | * dealing with concurrent invocation and without grabbing the mmap lock in |
| 1201 | * write mode. |
| 1202 | * |
| 1203 | * Main user is sys_prctl(PR_SET_MM_MAP/EXE_FILE). |
| 1204 | */ |
| 1205 | int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) |
| 1206 | { |
| 1207 | struct vm_area_struct *vma; |
| 1208 | struct file *old_exe_file; |
| 1209 | int ret = 0; |
| 1210 | |
| 1211 | /* Forbid mm->exe_file change if old file still mapped. */ |
| 1212 | old_exe_file = get_mm_exe_file(mm); |
| 1213 | if (old_exe_file) { |
| 1214 | mmap_read_lock(mm); |
| 1215 | for (vma = mm->mmap; vma && !ret; vma = vma->vm_next) { |
| 1216 | if (!vma->vm_file) |
| 1217 | continue; |
| 1218 | if (path_equal(&vma->vm_file->f_path, |
| 1219 | &old_exe_file->f_path)) |
| 1220 | ret = -EBUSY; |
| 1221 | } |
| 1222 | mmap_read_unlock(mm); |
| 1223 | fput(old_exe_file); |
| 1224 | if (ret) |
| 1225 | return ret; |
| 1226 | } |
| 1227 | |
| 1228 | /* set the new file, lockless */ |
David Hildenbrand | fe69d56 | 2021-04-23 10:29:59 +0200 | [diff] [blame] | 1229 | ret = deny_write_access(new_exe_file); |
| 1230 | if (ret) |
| 1231 | return -EACCES; |
David Hildenbrand | 35d7bdc | 2021-04-23 10:20:25 +0200 | [diff] [blame] | 1232 | get_file(new_exe_file); |
David Hildenbrand | fe69d56 | 2021-04-23 10:29:59 +0200 | [diff] [blame] | 1233 | |
David Hildenbrand | 35d7bdc | 2021-04-23 10:20:25 +0200 | [diff] [blame] | 1234 | old_exe_file = xchg(&mm->exe_file, new_exe_file); |
David Hildenbrand | fe69d56 | 2021-04-23 10:29:59 +0200 | [diff] [blame] | 1235 | if (old_exe_file) { |
| 1236 | /* |
| 1237 | * Don't race with dup_mmap() getting the file and disallowing |
| 1238 | * write access while someone might open the file writable. |
| 1239 | */ |
| 1240 | mmap_read_lock(mm); |
| 1241 | allow_write_access(old_exe_file); |
David Hildenbrand | 35d7bdc | 2021-04-23 10:20:25 +0200 | [diff] [blame] | 1242 | fput(old_exe_file); |
David Hildenbrand | fe69d56 | 2021-04-23 10:29:59 +0200 | [diff] [blame] | 1243 | mmap_read_unlock(mm); |
| 1244 | } |
David Hildenbrand | 35d7bdc | 2021-04-23 10:20:25 +0200 | [diff] [blame] | 1245 | return 0; |
Jiri Slaby | 3864601 | 2011-05-26 16:25:46 -0700 | [diff] [blame] | 1246 | } |
| 1247 | |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 1248 | /** |
| 1249 | * get_mm_exe_file - acquire a reference to the mm's executable file |
| 1250 | * |
| 1251 | * Returns %NULL if mm has no associated executable file. |
| 1252 | * User must release file via fput(). |
| 1253 | */ |
Jiri Slaby | 3864601 | 2011-05-26 16:25:46 -0700 | [diff] [blame] | 1254 | struct file *get_mm_exe_file(struct mm_struct *mm) |
| 1255 | { |
| 1256 | struct file *exe_file; |
| 1257 | |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 1258 | rcu_read_lock(); |
| 1259 | exe_file = rcu_dereference(mm->exe_file); |
| 1260 | if (exe_file && !get_file_rcu(exe_file)) |
| 1261 | exe_file = NULL; |
| 1262 | rcu_read_unlock(); |
Jiri Slaby | 3864601 | 2011-05-26 16:25:46 -0700 | [diff] [blame] | 1263 | return exe_file; |
| 1264 | } |
| 1265 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1266 | /** |
Mateusz Guzik | cd81a917 | 2016-08-23 16:20:38 +0200 | [diff] [blame] | 1267 | * get_task_exe_file - acquire a reference to the task's executable file |
| 1268 | * |
| 1269 | * Returns %NULL if task's mm (if any) has no associated executable file or |
| 1270 | * this is a kernel thread with borrowed mm (see the comment above get_task_mm). |
| 1271 | * User must release file via fput(). |
| 1272 | */ |
| 1273 | struct file *get_task_exe_file(struct task_struct *task) |
| 1274 | { |
| 1275 | struct file *exe_file = NULL; |
| 1276 | struct mm_struct *mm; |
| 1277 | |
| 1278 | task_lock(task); |
| 1279 | mm = task->mm; |
| 1280 | if (mm) { |
| 1281 | if (!(task->flags & PF_KTHREAD)) |
| 1282 | exe_file = get_mm_exe_file(mm); |
| 1283 | } |
| 1284 | task_unlock(task); |
| 1285 | return exe_file; |
| 1286 | } |
Mateusz Guzik | cd81a917 | 2016-08-23 16:20:38 +0200 | [diff] [blame] | 1287 | |
| 1288 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1289 | * get_task_mm - acquire a reference to the task's mm |
| 1290 | * |
Oleg Nesterov | 246bb0b | 2008-07-25 01:47:38 -0700 | [diff] [blame] | 1291 | * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1292 | * this kernel workthread has transiently adopted a user mm with use_mm, |
| 1293 | * to do its AIO) is not set and if so returns a reference to it, after |
| 1294 | * bumping up the use count. User must release the mm via mmput() |
| 1295 | * after use. Typically used by /proc and ptrace. |
| 1296 | */ |
| 1297 | struct mm_struct *get_task_mm(struct task_struct *task) |
| 1298 | { |
| 1299 | struct mm_struct *mm; |
| 1300 | |
| 1301 | task_lock(task); |
| 1302 | mm = task->mm; |
| 1303 | if (mm) { |
Oleg Nesterov | 246bb0b | 2008-07-25 01:47:38 -0700 | [diff] [blame] | 1304 | if (task->flags & PF_KTHREAD) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1305 | mm = NULL; |
| 1306 | else |
Vegard Nossum | 3fce371 | 2017-02-27 14:30:10 -0800 | [diff] [blame] | 1307 | mmget(mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1308 | } |
| 1309 | task_unlock(task); |
| 1310 | return mm; |
| 1311 | } |
| 1312 | EXPORT_SYMBOL_GPL(get_task_mm); |
| 1313 | |
Christopher Yeoh | 8cdb878 | 2012-02-02 11:34:09 +1030 | [diff] [blame] | 1314 | struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) |
| 1315 | { |
| 1316 | struct mm_struct *mm; |
| 1317 | int err; |
| 1318 | |
Eric W. Biederman | f7cfd87 | 2020-12-03 14:12:00 -0600 | [diff] [blame] | 1319 | err = down_read_killable(&task->signal->exec_update_lock); |
Christopher Yeoh | 8cdb878 | 2012-02-02 11:34:09 +1030 | [diff] [blame] | 1320 | if (err) |
| 1321 | return ERR_PTR(err); |
| 1322 | |
| 1323 | mm = get_task_mm(task); |
| 1324 | if (mm && mm != current->mm && |
| 1325 | !ptrace_may_access(task, mode)) { |
| 1326 | mmput(mm); |
| 1327 | mm = ERR_PTR(-EACCES); |
| 1328 | } |
Eric W. Biederman | f7cfd87 | 2020-12-03 14:12:00 -0600 | [diff] [blame] | 1329 | up_read(&task->signal->exec_update_lock); |
Christopher Yeoh | 8cdb878 | 2012-02-02 11:34:09 +1030 | [diff] [blame] | 1330 | |
| 1331 | return mm; |
| 1332 | } |
| 1333 | |
Oleg Nesterov | 57b59c4 | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 1334 | static void complete_vfork_done(struct task_struct *tsk) |
Oleg Nesterov | c415c3b | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 1335 | { |
Oleg Nesterov | d68b46f | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 1336 | struct completion *vfork; |
Oleg Nesterov | c415c3b | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 1337 | |
Oleg Nesterov | d68b46f | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 1338 | task_lock(tsk); |
| 1339 | vfork = tsk->vfork_done; |
| 1340 | if (likely(vfork)) { |
| 1341 | tsk->vfork_done = NULL; |
| 1342 | complete(vfork); |
| 1343 | } |
| 1344 | task_unlock(tsk); |
| 1345 | } |
| 1346 | |
| 1347 | static int wait_for_vfork_done(struct task_struct *child, |
| 1348 | struct completion *vfork) |
| 1349 | { |
| 1350 | int killed; |
| 1351 | |
| 1352 | freezer_do_not_count(); |
Roman Gushchin | 76f969e | 2019-04-19 10:03:04 -0700 | [diff] [blame] | 1353 | cgroup_enter_frozen(); |
Oleg Nesterov | d68b46f | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 1354 | killed = wait_for_completion_killable(vfork); |
Roman Gushchin | 76f969e | 2019-04-19 10:03:04 -0700 | [diff] [blame] | 1355 | cgroup_leave_frozen(false); |
Oleg Nesterov | d68b46f | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 1356 | freezer_count(); |
| 1357 | |
| 1358 | if (killed) { |
| 1359 | task_lock(child); |
| 1360 | child->vfork_done = NULL; |
| 1361 | task_unlock(child); |
| 1362 | } |
| 1363 | |
| 1364 | put_task_struct(child); |
| 1365 | return killed; |
Oleg Nesterov | c415c3b | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 1366 | } |
| 1367 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1368 | /* Please note the differences between mmput and mm_release. |
| 1369 | * mmput is called whenever we stop holding onto a mm_struct, |
| 1370 | * error success whatever. |
| 1371 | * |
| 1372 | * mm_release is called after a mm_struct has been removed |
| 1373 | * from the current process. |
| 1374 | * |
| 1375 | * This difference is important for error handling, when we |
| 1376 | * only half set up a mm_struct for a new process and need to restore |
| 1377 | * the old one. Because we mmput the new mm_struct before |
| 1378 | * restoring the old one. . . |
| 1379 | * Eric Biederman 10 January 1998 |
| 1380 | */ |
Thomas Gleixner | 4610ba7 | 2019-11-06 22:55:38 +0100 | [diff] [blame] | 1381 | static void mm_release(struct task_struct *tsk, struct mm_struct *mm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1382 | { |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1383 | uprobe_free_utask(tsk); |
| 1384 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1385 | /* Get rid of any cached register state */ |
| 1386 | deactivate_mm(tsk, mm); |
| 1387 | |
Roland McGrath | fec1d01 | 2006-12-06 20:36:34 -0800 | [diff] [blame] | 1388 | /* |
Michal Hocko | 735f277 | 2016-09-01 16:15:13 -0700 | [diff] [blame] | 1389 | * Signal userspace if we're not exiting with a core dump |
| 1390 | * because we want to leave the value intact for debugging |
| 1391 | * purposes. |
Roland McGrath | fec1d01 | 2006-12-06 20:36:34 -0800 | [diff] [blame] | 1392 | */ |
Eric Dumazet | 9c8a822 | 2009-08-06 15:09:28 -0700 | [diff] [blame] | 1393 | if (tsk->clear_child_tid) { |
Eric W. Biederman | 9230738 | 2021-09-01 11:33:50 -0500 | [diff] [blame] | 1394 | if (atomic_read(&mm->mm_users) > 1) { |
Eric Dumazet | 9c8a822 | 2009-08-06 15:09:28 -0700 | [diff] [blame] | 1395 | /* |
| 1396 | * We don't check the error code - if userspace has |
| 1397 | * not set up a proper pointer then tough luck. |
| 1398 | */ |
| 1399 | put_user(0, tsk->clear_child_tid); |
Dominik Brodowski | 2de0db9 | 2018-03-11 11:34:26 +0100 | [diff] [blame] | 1400 | do_futex(tsk->clear_child_tid, FUTEX_WAKE, |
| 1401 | 1, NULL, NULL, 0, 0); |
Eric Dumazet | 9c8a822 | 2009-08-06 15:09:28 -0700 | [diff] [blame] | 1402 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1403 | tsk->clear_child_tid = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1404 | } |
Konstantin Khlebnikov | f7505d64 | 2012-05-31 16:26:21 -0700 | [diff] [blame] | 1405 | |
| 1406 | /* |
| 1407 | * All done, finally we can wake up parent and return this mm to him. |
| 1408 | * Also kthread_stop() uses this completion for synchronization. |
| 1409 | */ |
| 1410 | if (tsk->vfork_done) |
| 1411 | complete_vfork_done(tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1412 | } |
| 1413 | |
Thomas Gleixner | 4610ba7 | 2019-11-06 22:55:38 +0100 | [diff] [blame] | 1414 | void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) |
| 1415 | { |
Thomas Gleixner | 150d715 | 2019-11-06 22:55:39 +0100 | [diff] [blame] | 1416 | futex_exit_release(tsk); |
Thomas Gleixner | 4610ba7 | 2019-11-06 22:55:38 +0100 | [diff] [blame] | 1417 | mm_release(tsk, mm); |
| 1418 | } |
| 1419 | |
| 1420 | void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) |
| 1421 | { |
Thomas Gleixner | 150d715 | 2019-11-06 22:55:39 +0100 | [diff] [blame] | 1422 | futex_exec_release(tsk); |
Thomas Gleixner | 4610ba7 | 2019-11-06 22:55:38 +0100 | [diff] [blame] | 1423 | mm_release(tsk, mm); |
| 1424 | } |
| 1425 | |
Nadav Amit | 13585fa | 2019-04-25 17:11:25 -0700 | [diff] [blame] | 1426 | /** |
| 1427 | * dup_mm() - duplicates an existing mm structure |
| 1428 | * @tsk: the task_struct with which the new mm will be associated. |
| 1429 | * @oldmm: the mm to duplicate. |
| 1430 | * |
| 1431 | * Allocates a new mm structure and duplicates the provided @oldmm structure |
| 1432 | * content into it. |
| 1433 | * |
| 1434 | * Return: the duplicated mm or NULL on failure. |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1435 | */ |
Nadav Amit | 13585fa | 2019-04-25 17:11:25 -0700 | [diff] [blame] | 1436 | static struct mm_struct *dup_mm(struct task_struct *tsk, |
| 1437 | struct mm_struct *oldmm) |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1438 | { |
Nadav Amit | 13585fa | 2019-04-25 17:11:25 -0700 | [diff] [blame] | 1439 | struct mm_struct *mm; |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1440 | int err; |
| 1441 | |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1442 | mm = allocate_mm(); |
| 1443 | if (!mm) |
| 1444 | goto fail_nomem; |
| 1445 | |
| 1446 | memcpy(mm, oldmm, sizeof(*mm)); |
| 1447 | |
Eric W. Biederman | bfedb58 | 2016-10-13 21:23:16 -0500 | [diff] [blame] | 1448 | if (!mm_init(mm, tsk, mm->user_ns)) |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1449 | goto fail_nomem; |
| 1450 | |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1451 | err = dup_mmap(mm, oldmm); |
| 1452 | if (err) |
| 1453 | goto free_pt; |
| 1454 | |
| 1455 | mm->hiwater_rss = get_mm_rss(mm); |
| 1456 | mm->hiwater_vm = mm->total_vm; |
| 1457 | |
Hiroshi Shimamoto | 801460d | 2009-09-23 15:57:41 -0700 | [diff] [blame] | 1458 | if (mm->binfmt && !try_module_get(mm->binfmt->module)) |
| 1459 | goto free_pt; |
| 1460 | |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1461 | return mm; |
| 1462 | |
| 1463 | free_pt: |
Hiroshi Shimamoto | 801460d | 2009-09-23 15:57:41 -0700 | [diff] [blame] | 1464 | /* don't put binfmt in mmput, we haven't got module yet */ |
| 1465 | mm->binfmt = NULL; |
Andrea Arcangeli | c3f3ce0 | 2019-05-14 15:40:46 -0700 | [diff] [blame] | 1466 | mm_init_owner(mm, NULL); |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1467 | mmput(mm); |
| 1468 | |
| 1469 | fail_nomem: |
| 1470 | return NULL; |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1471 | } |
| 1472 | |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1473 | static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1474 | { |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1475 | struct mm_struct *mm, *oldmm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1476 | |
| 1477 | tsk->min_flt = tsk->maj_flt = 0; |
| 1478 | tsk->nvcsw = tsk->nivcsw = 0; |
Mandeep Singh Baines | 17406b8 | 2009-02-06 15:37:47 -0800 | [diff] [blame] | 1479 | #ifdef CONFIG_DETECT_HUNG_TASK |
| 1480 | tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; |
Dmitry Vyukov | a2e5144 | 2018-08-21 21:55:52 -0700 | [diff] [blame] | 1481 | tsk->last_switch_time = 0; |
Mandeep Singh Baines | 17406b8 | 2009-02-06 15:37:47 -0800 | [diff] [blame] | 1482 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1483 | |
| 1484 | tsk->mm = NULL; |
| 1485 | tsk->active_mm = NULL; |
| 1486 | |
| 1487 | /* |
| 1488 | * Are we cloning a kernel thread? |
| 1489 | * |
| 1490 | * We need to steal a active VM for that.. |
| 1491 | */ |
| 1492 | oldmm = current->mm; |
| 1493 | if (!oldmm) |
| 1494 | return 0; |
| 1495 | |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 1496 | /* initialize the new vmacache entries */ |
| 1497 | vmacache_flush(tsk); |
| 1498 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1499 | if (clone_flags & CLONE_VM) { |
Vegard Nossum | 3fce371 | 2017-02-27 14:30:10 -0800 | [diff] [blame] | 1500 | mmget(oldmm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1501 | mm = oldmm; |
Rolf Eike Beer | a689539 | 2021-05-06 18:04:25 -0700 | [diff] [blame] | 1502 | } else { |
| 1503 | mm = dup_mm(tsk, current->mm); |
| 1504 | if (!mm) |
| 1505 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1506 | } |
| 1507 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1508 | tsk->mm = mm; |
| 1509 | tsk->active_mm = mm; |
| 1510 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1511 | } |
| 1512 | |
Alexey Dobriyan | a39bc51 | 2007-10-18 23:41:10 -0700 | [diff] [blame] | 1513 | static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1514 | { |
Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 1515 | struct fs_struct *fs = current->fs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1516 | if (clone_flags & CLONE_FS) { |
Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 1517 | /* tsk->fs is already what we want */ |
Nick Piggin | 2a4419b | 2010-08-18 04:37:33 +1000 | [diff] [blame] | 1518 | spin_lock(&fs->lock); |
Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 1519 | if (fs->in_exec) { |
Nick Piggin | 2a4419b | 2010-08-18 04:37:33 +1000 | [diff] [blame] | 1520 | spin_unlock(&fs->lock); |
Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 1521 | return -EAGAIN; |
| 1522 | } |
| 1523 | fs->users++; |
Nick Piggin | 2a4419b | 2010-08-18 04:37:33 +1000 | [diff] [blame] | 1524 | spin_unlock(&fs->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1525 | return 0; |
| 1526 | } |
Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 1527 | tsk->fs = copy_fs_struct(fs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1528 | if (!tsk->fs) |
| 1529 | return -ENOMEM; |
| 1530 | return 0; |
| 1531 | } |
| 1532 | |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1533 | static int copy_files(unsigned long clone_flags, struct task_struct *tsk) |
JANAK DESAI | a016f33 | 2006-02-07 12:59:02 -0800 | [diff] [blame] | 1534 | { |
| 1535 | struct files_struct *oldf, *newf; |
| 1536 | int error = 0; |
| 1537 | |
| 1538 | /* |
| 1539 | * A background process may not have any files ... |
| 1540 | */ |
| 1541 | oldf = current->files; |
| 1542 | if (!oldf) |
| 1543 | goto out; |
| 1544 | |
| 1545 | if (clone_flags & CLONE_FILES) { |
| 1546 | atomic_inc(&oldf->count); |
| 1547 | goto out; |
| 1548 | } |
| 1549 | |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 1550 | newf = dup_fd(oldf, NR_OPEN_MAX, &error); |
JANAK DESAI | a016f33 | 2006-02-07 12:59:02 -0800 | [diff] [blame] | 1551 | if (!newf) |
| 1552 | goto out; |
| 1553 | |
| 1554 | tsk->files = newf; |
| 1555 | error = 0; |
| 1556 | out: |
| 1557 | return error; |
| 1558 | } |
| 1559 | |
Alexey Dobriyan | a39bc51 | 2007-10-18 23:41:10 -0700 | [diff] [blame] | 1560 | static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1561 | { |
| 1562 | struct sighand_struct *sig; |
| 1563 | |
Zhaolei | 6034880 | 2009-01-06 14:40:46 -0800 | [diff] [blame] | 1564 | if (clone_flags & CLONE_SIGHAND) { |
Elena Reshetova | d036bda | 2019-01-18 14:27:26 +0200 | [diff] [blame] | 1565 | refcount_inc(¤t->sighand->count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1566 | return 0; |
| 1567 | } |
| 1568 | sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); |
Madhuparna Bhowmik | 0c282b0 | 2020-01-27 23:28:21 +0530 | [diff] [blame] | 1569 | RCU_INIT_POINTER(tsk->sighand, sig); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1570 | if (!sig) |
| 1571 | return -ENOMEM; |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 1572 | |
Elena Reshetova | d036bda | 2019-01-18 14:27:26 +0200 | [diff] [blame] | 1573 | refcount_set(&sig->count, 1); |
Jann Horn | 06e62a4 | 2018-08-21 22:00:58 -0700 | [diff] [blame] | 1574 | spin_lock_irq(¤t->sighand->siglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1575 | memcpy(sig->action, current->sighand->action, sizeof(sig->action)); |
Jann Horn | 06e62a4 | 2018-08-21 22:00:58 -0700 | [diff] [blame] | 1576 | spin_unlock_irq(¤t->sighand->siglock); |
Christian Brauner | b612e5d | 2019-10-14 12:45:37 +0200 | [diff] [blame] | 1577 | |
| 1578 | /* Reset all signal handler not set to SIG_IGN to SIG_DFL. */ |
| 1579 | if (clone_flags & CLONE_CLEAR_SIGHAND) |
| 1580 | flush_signal_handlers(tsk, 0); |
| 1581 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1582 | return 0; |
| 1583 | } |
| 1584 | |
Oleg Nesterov | a7e5328 | 2006-03-28 16:11:27 -0800 | [diff] [blame] | 1585 | void __cleanup_sighand(struct sighand_struct *sighand) |
Oleg Nesterov | c81addc | 2006-03-28 16:11:17 -0800 | [diff] [blame] | 1586 | { |
Elena Reshetova | d036bda | 2019-01-18 14:27:26 +0200 | [diff] [blame] | 1587 | if (refcount_dec_and_test(&sighand->count)) { |
Oleg Nesterov | d80e731 | 2012-02-24 20:07:11 +0100 | [diff] [blame] | 1588 | signalfd_cleanup(sighand); |
Oleg Nesterov | 392809b | 2014-09-28 23:44:18 +0200 | [diff] [blame] | 1589 | /* |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 1590 | * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it |
Oleg Nesterov | 392809b | 2014-09-28 23:44:18 +0200 | [diff] [blame] | 1591 | * without an RCU grace period, see __lock_task_sighand(). |
| 1592 | */ |
Oleg Nesterov | c81addc | 2006-03-28 16:11:17 -0800 | [diff] [blame] | 1593 | kmem_cache_free(sighand_cachep, sighand); |
Oleg Nesterov | d80e731 | 2012-02-24 20:07:11 +0100 | [diff] [blame] | 1594 | } |
Oleg Nesterov | c81addc | 2006-03-28 16:11:17 -0800 | [diff] [blame] | 1595 | } |
| 1596 | |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 1597 | /* |
| 1598 | * Initialize POSIX timer handling for a thread group. |
| 1599 | */ |
| 1600 | static void posix_cpu_timers_init_group(struct signal_struct *sig) |
| 1601 | { |
Thomas Gleixner | 2b69942 | 2019-08-21 21:09:04 +0200 | [diff] [blame] | 1602 | struct posix_cputimers *pct = &sig->posix_cputimers; |
Jiri Slaby | 78d7d40 | 2010-03-05 13:42:54 -0800 | [diff] [blame] | 1603 | unsigned long cpu_limit; |
| 1604 | |
Jason Low | 316c1608d | 2015-04-28 13:00:20 -0700 | [diff] [blame] | 1605 | cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); |
Thomas Gleixner | 3a245c0 | 2019-08-21 21:09:06 +0200 | [diff] [blame] | 1606 | posix_cputimers_group_init(pct, cpu_limit); |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 1607 | } |
| 1608 | |
Alexey Dobriyan | a39bc51 | 2007-10-18 23:41:10 -0700 | [diff] [blame] | 1609 | static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1610 | { |
| 1611 | struct signal_struct *sig; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1612 | |
Oleg Nesterov | 4ab6c08 | 2009-08-26 14:29:24 -0700 | [diff] [blame] | 1613 | if (clone_flags & CLONE_THREAD) |
Peter Zijlstra | 490dea4 | 2008-11-24 17:06:57 +0100 | [diff] [blame] | 1614 | return 0; |
Oleg Nesterov | 6279a751 | 2009-03-27 01:06:07 +0100 | [diff] [blame] | 1615 | |
Veaceslav Falico | a56704e | 2010-03-10 15:23:01 -0800 | [diff] [blame] | 1616 | sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1617 | tsk->signal = sig; |
| 1618 | if (!sig) |
| 1619 | return -ENOMEM; |
| 1620 | |
Oleg Nesterov | b3ac022 | 2010-05-26 14:43:24 -0700 | [diff] [blame] | 1621 | sig->nr_threads = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1622 | atomic_set(&sig->live, 1); |
Elena Reshetova | 60d4de3 | 2019-01-18 14:27:27 +0200 | [diff] [blame] | 1623 | refcount_set(&sig->sigcnt, 1); |
Oleg Nesterov | 0c740d0 | 2014-01-21 15:49:56 -0800 | [diff] [blame] | 1624 | |
| 1625 | /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ |
| 1626 | sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); |
| 1627 | tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); |
| 1628 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1629 | init_waitqueue_head(&sig->wait_chldexit); |
Oleg Nesterov | db51aec | 2008-04-30 00:52:52 -0700 | [diff] [blame] | 1630 | sig->curr_target = tsk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1631 | init_sigpending(&sig->shared_pending); |
Eric W. Biederman | c3ad2c3 | 2018-07-23 15:20:37 -0500 | [diff] [blame] | 1632 | INIT_HLIST_HEAD(&sig->multiprocess); |
Rik van Riel | e78c349 | 2014-08-16 13:40:10 -0400 | [diff] [blame] | 1633 | seqlock_init(&sig->stats_lock); |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 1634 | prev_cputime_init(&sig->prev_cputime); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1635 | |
Nicolas Pitre | baa73d9 | 2016-11-11 00:10:10 -0500 | [diff] [blame] | 1636 | #ifdef CONFIG_POSIX_TIMERS |
Nicolas Pitre | b18b6a9 | 2017-01-21 00:09:08 -0500 | [diff] [blame] | 1637 | INIT_LIST_HEAD(&sig->posix_timers); |
Thomas Gleixner | c9cb2e3 | 2007-02-16 01:27:49 -0800 | [diff] [blame] | 1638 | hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1639 | sig->real_timer.function = it_real_fn; |
Nicolas Pitre | baa73d9 | 2016-11-11 00:10:10 -0500 | [diff] [blame] | 1640 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1641 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1642 | task_lock(current->group_leader); |
| 1643 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); |
| 1644 | task_unlock(current->group_leader); |
| 1645 | |
Oleg Nesterov | 6279a751 | 2009-03-27 01:06:07 +0100 | [diff] [blame] | 1646 | posix_cpu_timers_init_group(sig); |
| 1647 | |
Miloslav Trmac | 522ed77 | 2007-07-15 23:40:56 -0700 | [diff] [blame] | 1648 | tty_audit_fork(sig); |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 1649 | sched_autogroup_fork(sig); |
Miloslav Trmac | 522ed77 | 2007-07-15 23:40:56 -0700 | [diff] [blame] | 1650 | |
David Rientjes | a63d83f | 2010-08-09 17:19:46 -0700 | [diff] [blame] | 1651 | sig->oom_score_adj = current->signal->oom_score_adj; |
Mandeep Singh Baines | dabb16f63 | 2011-01-13 15:46:05 -0800 | [diff] [blame] | 1652 | sig->oom_score_adj_min = current->signal->oom_score_adj_min; |
KOSAKI Motohiro | 28b83c5 | 2009-09-21 17:03:13 -0700 | [diff] [blame] | 1653 | |
KOSAKI Motohiro | 9b1bf12 | 2010-10-27 15:34:08 -0700 | [diff] [blame] | 1654 | mutex_init(&sig->cred_guard_mutex); |
Eric W. Biederman | f7cfd87 | 2020-12-03 14:12:00 -0600 | [diff] [blame] | 1655 | init_rwsem(&sig->exec_update_lock); |
KOSAKI Motohiro | 9b1bf12 | 2010-10-27 15:34:08 -0700 | [diff] [blame] | 1656 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1657 | return 0; |
| 1658 | } |
| 1659 | |
Kees Cook | dbd95212 | 2014-06-27 15:18:48 -0700 | [diff] [blame] | 1660 | static void copy_seccomp(struct task_struct *p) |
| 1661 | { |
| 1662 | #ifdef CONFIG_SECCOMP |
| 1663 | /* |
| 1664 | * Must be called with sighand->lock held, which is common to |
| 1665 | * all threads in the group. Holding cred_guard_mutex is not |
| 1666 | * needed because this new task is not yet running and cannot |
| 1667 | * be racing exec. |
| 1668 | */ |
Guenter Roeck | 69f6a34 | 2014-08-10 20:50:30 -0700 | [diff] [blame] | 1669 | assert_spin_locked(¤t->sighand->siglock); |
Kees Cook | dbd95212 | 2014-06-27 15:18:48 -0700 | [diff] [blame] | 1670 | |
| 1671 | /* Ref-count the new filter user, and assign it. */ |
| 1672 | get_seccomp_filter(current); |
| 1673 | p->seccomp = current->seccomp; |
| 1674 | |
| 1675 | /* |
| 1676 | * Explicitly enable no_new_privs here in case it got set |
| 1677 | * between the task_struct being duplicated and holding the |
| 1678 | * sighand lock. The seccomp state and nnp must be in sync. |
| 1679 | */ |
| 1680 | if (task_no_new_privs(current)) |
| 1681 | task_set_no_new_privs(p); |
| 1682 | |
| 1683 | /* |
| 1684 | * If the parent gained a seccomp mode after copying thread |
| 1685 | * flags and between before we held the sighand lock, we have |
| 1686 | * to manually enable the seccomp thread flag here. |
| 1687 | */ |
| 1688 | if (p->seccomp.mode != SECCOMP_MODE_DISABLED) |
Gabriel Krisman Bertazi | 23d67a5 | 2020-11-16 12:42:00 -0500 | [diff] [blame] | 1689 | set_task_syscall_work(p, SECCOMP); |
Kees Cook | dbd95212 | 2014-06-27 15:18:48 -0700 | [diff] [blame] | 1690 | #endif |
| 1691 | } |
| 1692 | |
Heiko Carstens | 17da2bd | 2009-01-14 14:14:10 +0100 | [diff] [blame] | 1693 | SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1694 | { |
| 1695 | current->clear_child_tid = tidptr; |
| 1696 | |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1697 | return task_pid_vnr(current); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1698 | } |
| 1699 | |
Alexey Dobriyan | a39bc51 | 2007-10-18 23:41:10 -0700 | [diff] [blame] | 1700 | static void rt_mutex_init_task(struct task_struct *p) |
Ingo Molnar | 23f78d4a | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 1701 | { |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 1702 | raw_spin_lock_init(&p->pi_lock); |
Zilvinas Valinskas | e29e175 | 2007-03-16 13:38:34 -0800 | [diff] [blame] | 1703 | #ifdef CONFIG_RT_MUTEXES |
Davidlohr Bueso | a23ba90 | 2017-09-08 16:15:01 -0700 | [diff] [blame] | 1704 | p->pi_waiters = RB_ROOT_CACHED; |
Xunlei Pang | e96a7705 | 2017-03-23 15:56:08 +0100 | [diff] [blame] | 1705 | p->pi_top_task = NULL; |
Ingo Molnar | 23f78d4a | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 1706 | p->pi_blocked_on = NULL; |
Ingo Molnar | 23f78d4a | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 1707 | #endif |
| 1708 | } |
| 1709 | |
Eric W. Biederman | 2c47047 | 2017-09-26 13:06:43 -0500 | [diff] [blame] | 1710 | static inline void init_task_pid_links(struct task_struct *task) |
| 1711 | { |
| 1712 | enum pid_type type; |
| 1713 | |
Alexander Guril | 96e1e98 | 2020-12-26 12:40:21 +0100 | [diff] [blame] | 1714 | for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) |
Eric W. Biederman | 2c47047 | 2017-09-26 13:06:43 -0500 | [diff] [blame] | 1715 | INIT_HLIST_NODE(&task->pid_links[type]); |
Eric W. Biederman | 2c47047 | 2017-09-26 13:06:43 -0500 | [diff] [blame] | 1716 | } |
| 1717 | |
Oleg Nesterov | 8190773 | 2013-07-03 15:08:31 -0700 | [diff] [blame] | 1718 | static inline void |
| 1719 | init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) |
| 1720 | { |
Eric W. Biederman | 2c47047 | 2017-09-26 13:06:43 -0500 | [diff] [blame] | 1721 | if (type == PIDTYPE_PID) |
| 1722 | task->thread_pid = pid; |
| 1723 | else |
| 1724 | task->signal->pids[type] = pid; |
Oleg Nesterov | 8190773 | 2013-07-03 15:08:31 -0700 | [diff] [blame] | 1725 | } |
| 1726 | |
Ingo Molnar | 6bfbaa5 | 2017-02-03 21:37:49 +0100 | [diff] [blame] | 1727 | static inline void rcu_copy_process(struct task_struct *p) |
| 1728 | { |
| 1729 | #ifdef CONFIG_PREEMPT_RCU |
| 1730 | p->rcu_read_lock_nesting = 0; |
| 1731 | p->rcu_read_unlock_special.s = 0; |
| 1732 | p->rcu_blocked_node = NULL; |
| 1733 | INIT_LIST_HEAD(&p->rcu_node_entry); |
| 1734 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
| 1735 | #ifdef CONFIG_TASKS_RCU |
| 1736 | p->rcu_tasks_holdout = false; |
| 1737 | INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); |
| 1738 | p->rcu_tasks_idle_cpu = -1; |
| 1739 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1740 | #ifdef CONFIG_TASKS_TRACE_RCU |
| 1741 | p->trc_reader_nesting = 0; |
Paul E. McKenney | 276c410 | 2020-03-17 16:02:06 -0700 | [diff] [blame] | 1742 | p->trc_reader_special.s = 0; |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1743 | INIT_LIST_HEAD(&p->trc_holdout_list); |
| 1744 | #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
Ingo Molnar | 6bfbaa5 | 2017-02-03 21:37:49 +0100 | [diff] [blame] | 1745 | } |
| 1746 | |
Christian Brauner | 3695eae | 2019-07-28 00:22:29 +0200 | [diff] [blame] | 1747 | struct pid *pidfd_pid(const struct file *file) |
| 1748 | { |
| 1749 | if (file->f_op == &pidfd_fops) |
| 1750 | return file->private_data; |
| 1751 | |
| 1752 | return ERR_PTR(-EBADF); |
| 1753 | } |
| 1754 | |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 1755 | static int pidfd_release(struct inode *inode, struct file *file) |
| 1756 | { |
| 1757 | struct pid *pid = file->private_data; |
| 1758 | |
| 1759 | file->private_data = NULL; |
| 1760 | put_pid(pid); |
| 1761 | return 0; |
| 1762 | } |
| 1763 | |
| 1764 | #ifdef CONFIG_PROC_FS |
Christian Kellner | 15d42eb | 2019-10-14 18:20:32 +0200 | [diff] [blame] | 1765 | /** |
| 1766 | * pidfd_show_fdinfo - print information about a pidfd |
| 1767 | * @m: proc fdinfo file |
| 1768 | * @f: file referencing a pidfd |
| 1769 | * |
| 1770 | * Pid: |
| 1771 | * This function will print the pid that a given pidfd refers to in the |
| 1772 | * pid namespace of the procfs instance. |
| 1773 | * If the pid namespace of the process is not a descendant of the pid |
| 1774 | * namespace of the procfs instance 0 will be shown as its pid. This is |
| 1775 | * similar to calling getppid() on a process whose parent is outside of |
| 1776 | * its pid namespace. |
| 1777 | * |
| 1778 | * NSpid: |
| 1779 | * If pid namespaces are supported then this function will also print |
| 1780 | * the pid of a given pidfd refers to for all descendant pid namespaces |
| 1781 | * starting from the current pid namespace of the instance, i.e. the |
| 1782 | * Pid field and the first entry in the NSpid field will be identical. |
| 1783 | * If the pid namespace of the process is not a descendant of the pid |
| 1784 | * namespace of the procfs instance 0 will be shown as its first NSpid |
| 1785 | * entry and no others will be shown. |
| 1786 | * Note that this differs from the Pid and NSpid fields in |
| 1787 | * /proc/<pid>/status where Pid and NSpid are always shown relative to |
| 1788 | * the pid namespace of the procfs instance. The difference becomes |
| 1789 | * obvious when sending around a pidfd between pid namespaces from a |
Xiaofeng Cao | a8ca6b1 | 2021-05-06 18:04:28 -0700 | [diff] [blame] | 1790 | * different branch of the tree, i.e. where no ancestral relation is |
Christian Kellner | 15d42eb | 2019-10-14 18:20:32 +0200 | [diff] [blame] | 1791 | * present between the pid namespaces: |
| 1792 | * - create two new pid namespaces ns1 and ns2 in the initial pid |
| 1793 | * namespace (also take care to create new mount namespaces in the |
| 1794 | * new pid namespace and mount procfs) |
| 1795 | * - create a process with a pidfd in ns1 |
| 1796 | * - send pidfd from ns1 to ns2 |
| 1797 | * - read /proc/self/fdinfo/<pidfd> and observe that both Pid and NSpid |
| 1798 | * have exactly one entry, which is 0 |
| 1799 | */ |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 1800 | static void pidfd_show_fdinfo(struct seq_file *m, struct file *f) |
| 1801 | { |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 1802 | struct pid *pid = f->private_data; |
Christian Brauner | 3d6d8da | 2019-10-17 12:18:28 +0200 | [diff] [blame] | 1803 | struct pid_namespace *ns; |
| 1804 | pid_t nr = -1; |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 1805 | |
Christian Brauner | 3d6d8da | 2019-10-17 12:18:28 +0200 | [diff] [blame] | 1806 | if (likely(pid_has_task(pid, PIDTYPE_PID))) { |
Alexey Gladkov | 9d78ede | 2020-05-18 20:07:38 +0200 | [diff] [blame] | 1807 | ns = proc_pid_ns(file_inode(m->file)->i_sb); |
Christian Brauner | 3d6d8da | 2019-10-17 12:18:28 +0200 | [diff] [blame] | 1808 | nr = pid_nr_ns(pid, ns); |
| 1809 | } |
| 1810 | |
| 1811 | seq_put_decimal_ll(m, "Pid:\t", nr); |
Christian Kellner | 15d42eb | 2019-10-14 18:20:32 +0200 | [diff] [blame] | 1812 | |
| 1813 | #ifdef CONFIG_PID_NS |
Christian Brauner | 3d6d8da | 2019-10-17 12:18:28 +0200 | [diff] [blame] | 1814 | seq_put_decimal_ll(m, "\nNSpid:\t", nr); |
| 1815 | if (nr > 0) { |
Christian Kellner | 15d42eb | 2019-10-14 18:20:32 +0200 | [diff] [blame] | 1816 | int i; |
| 1817 | |
| 1818 | /* If nr is non-zero it means that 'pid' is valid and that |
| 1819 | * ns, i.e. the pid namespace associated with the procfs |
| 1820 | * instance, is in the pid namespace hierarchy of pid. |
| 1821 | * Start at one below the already printed level. |
| 1822 | */ |
| 1823 | for (i = ns->level + 1; i <= pid->level; i++) |
Christian Brauner | 3d6d8da | 2019-10-17 12:18:28 +0200 | [diff] [blame] | 1824 | seq_put_decimal_ll(m, "\t", pid->numbers[i].nr); |
Christian Kellner | 15d42eb | 2019-10-14 18:20:32 +0200 | [diff] [blame] | 1825 | } |
| 1826 | #endif |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 1827 | seq_putc(m, '\n'); |
| 1828 | } |
| 1829 | #endif |
| 1830 | |
Joel Fernandes (Google) | b53b0b9 | 2019-04-30 12:21:53 -0400 | [diff] [blame] | 1831 | /* |
| 1832 | * Poll support for process exit notification. |
| 1833 | */ |
Luc Van Oostenryck | 9e77716 | 2019-11-20 01:33:20 +0100 | [diff] [blame] | 1834 | static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts) |
Joel Fernandes (Google) | b53b0b9 | 2019-04-30 12:21:53 -0400 | [diff] [blame] | 1835 | { |
Joel Fernandes (Google) | b53b0b9 | 2019-04-30 12:21:53 -0400 | [diff] [blame] | 1836 | struct pid *pid = file->private_data; |
Luc Van Oostenryck | 9e77716 | 2019-11-20 01:33:20 +0100 | [diff] [blame] | 1837 | __poll_t poll_flags = 0; |
Joel Fernandes (Google) | b53b0b9 | 2019-04-30 12:21:53 -0400 | [diff] [blame] | 1838 | |
| 1839 | poll_wait(file, &pid->wait_pidfd, pts); |
| 1840 | |
Joel Fernandes (Google) | b53b0b9 | 2019-04-30 12:21:53 -0400 | [diff] [blame] | 1841 | /* |
| 1842 | * Inform pollers only when the whole thread group exits. |
| 1843 | * If the thread group leader exits before all other threads in the |
| 1844 | * group, then poll(2) should block, similar to the wait(2) family. |
| 1845 | */ |
Eric W. Biederman | 38fd525a | 2020-07-01 07:30:06 -0500 | [diff] [blame] | 1846 | if (thread_group_exited(pid)) |
Luc Van Oostenryck | 9e77716 | 2019-11-20 01:33:20 +0100 | [diff] [blame] | 1847 | poll_flags = EPOLLIN | EPOLLRDNORM; |
Joel Fernandes (Google) | b53b0b9 | 2019-04-30 12:21:53 -0400 | [diff] [blame] | 1848 | |
| 1849 | return poll_flags; |
| 1850 | } |
| 1851 | |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 1852 | const struct file_operations pidfd_fops = { |
| 1853 | .release = pidfd_release, |
Joel Fernandes (Google) | b53b0b9 | 2019-04-30 12:21:53 -0400 | [diff] [blame] | 1854 | .poll = pidfd_poll, |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 1855 | #ifdef CONFIG_PROC_FS |
| 1856 | .show_fdinfo = pidfd_show_fdinfo, |
| 1857 | #endif |
| 1858 | }; |
| 1859 | |
Andrea Arcangeli | c3f3ce0 | 2019-05-14 15:40:46 -0700 | [diff] [blame] | 1860 | static void __delayed_free_task(struct rcu_head *rhp) |
| 1861 | { |
| 1862 | struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); |
| 1863 | |
| 1864 | free_task(tsk); |
| 1865 | } |
| 1866 | |
| 1867 | static __always_inline void delayed_free_task(struct task_struct *tsk) |
| 1868 | { |
| 1869 | if (IS_ENABLED(CONFIG_MEMCG)) |
| 1870 | call_rcu(&tsk->rcu, __delayed_free_task); |
| 1871 | else |
| 1872 | free_task(tsk); |
| 1873 | } |
| 1874 | |
Suren Baghdasaryan | 67197a4 | 2020-10-13 16:58:35 -0700 | [diff] [blame] | 1875 | static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk) |
| 1876 | { |
| 1877 | /* Skip if kernel thread */ |
| 1878 | if (!tsk->mm) |
| 1879 | return; |
| 1880 | |
| 1881 | /* Skip if spawning a thread or using vfork */ |
| 1882 | if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM) |
| 1883 | return; |
| 1884 | |
| 1885 | /* We need to synchronize with __set_oom_adj */ |
| 1886 | mutex_lock(&oom_adj_mutex); |
| 1887 | set_bit(MMF_MULTIPROCESS, &tsk->mm->flags); |
| 1888 | /* Update the values in case they were changed after copy_signal */ |
| 1889 | tsk->signal->oom_score_adj = current->signal->oom_score_adj; |
| 1890 | tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min; |
| 1891 | mutex_unlock(&oom_adj_mutex); |
| 1892 | } |
| 1893 | |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 1894 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1895 | * This creates a new process as a copy of the old one, |
| 1896 | * but does not actually start it yet. |
| 1897 | * |
| 1898 | * It copies the registers, and all the appropriate |
| 1899 | * parts of the process environment (as per the clone |
| 1900 | * flags). The actual kick-off is left to the caller. |
| 1901 | */ |
Emese Revfy | 0766f78 | 2016-06-20 20:42:34 +0200 | [diff] [blame] | 1902 | static __latent_entropy struct task_struct *copy_process( |
Roland McGrath | 09a0539 | 2008-07-25 19:45:47 -0700 | [diff] [blame] | 1903 | struct pid *pid, |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 1904 | int trace, |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 1905 | int node, |
| 1906 | struct kernel_clone_args *args) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1907 | { |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 1908 | int pidfd = -1, retval; |
Mariusz Kozlowski | a24efe6 | 2007-10-18 23:41:09 -0700 | [diff] [blame] | 1909 | struct task_struct *p; |
Eric W. Biederman | c3ad2c3 | 2018-07-23 15:20:37 -0500 | [diff] [blame] | 1910 | struct multiprocess_signals delayed; |
Al Viro | 6fd2fe4 | 2019-06-26 22:22:09 -0400 | [diff] [blame] | 1911 | struct file *pidfile = NULL; |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 1912 | u64 clone_flags = args->flags; |
Andrei Vagin | 769071a | 2019-11-12 01:26:52 +0000 | [diff] [blame] | 1913 | struct nsproxy *nsp = current->nsproxy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1914 | |
Marcos Paulo de Souza | 667b609 | 2018-02-06 15:39:34 -0800 | [diff] [blame] | 1915 | /* |
| 1916 | * Don't allow sharing the root directory with processes in a different |
| 1917 | * namespace |
| 1918 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1919 | if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) |
| 1920 | return ERR_PTR(-EINVAL); |
| 1921 | |
Eric W. Biederman | e66eded | 2013-03-13 11:51:49 -0700 | [diff] [blame] | 1922 | if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) |
| 1923 | return ERR_PTR(-EINVAL); |
| 1924 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1925 | /* |
| 1926 | * Thread groups must share signals as well, and detached threads |
| 1927 | * can only be started up within the thread group. |
| 1928 | */ |
| 1929 | if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) |
| 1930 | return ERR_PTR(-EINVAL); |
| 1931 | |
| 1932 | /* |
| 1933 | * Shared signal handlers imply shared VM. By way of the above, |
| 1934 | * thread groups also imply shared VM. Blocking this case allows |
| 1935 | * for various simplifications in other code. |
| 1936 | */ |
| 1937 | if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) |
| 1938 | return ERR_PTR(-EINVAL); |
| 1939 | |
Sukadev Bhattiprolu | 123be07 | 2009-09-23 15:57:20 -0700 | [diff] [blame] | 1940 | /* |
| 1941 | * Siblings of global init remain as zombies on exit since they are |
| 1942 | * not reaped by their parent (swapper). To solve this and to avoid |
| 1943 | * multi-rooted process trees, prevent global and container-inits |
| 1944 | * from creating siblings. |
| 1945 | */ |
| 1946 | if ((clone_flags & CLONE_PARENT) && |
| 1947 | current->signal->flags & SIGNAL_UNKILLABLE) |
| 1948 | return ERR_PTR(-EINVAL); |
| 1949 | |
Eric W. Biederman | 8382fca | 2012-12-20 19:26:06 -0800 | [diff] [blame] | 1950 | /* |
Oleg Nesterov | 40a0d32 | 2013-09-11 14:19:41 -0700 | [diff] [blame] | 1951 | * If the new process will be in a different pid or user namespace |
Eric W. Biederman | faf00da | 2015-08-10 18:25:44 -0500 | [diff] [blame] | 1952 | * do not allow it to share a thread group with the forking task. |
Eric W. Biederman | 8382fca | 2012-12-20 19:26:06 -0800 | [diff] [blame] | 1953 | */ |
Eric W. Biederman | faf00da | 2015-08-10 18:25:44 -0500 | [diff] [blame] | 1954 | if (clone_flags & CLONE_THREAD) { |
Oleg Nesterov | 40a0d32 | 2013-09-11 14:19:41 -0700 | [diff] [blame] | 1955 | if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || |
Andrei Vagin | 769071a | 2019-11-12 01:26:52 +0000 | [diff] [blame] | 1956 | (task_active_pid_ns(current) != nsp->pid_ns_for_children)) |
| 1957 | return ERR_PTR(-EINVAL); |
| 1958 | } |
| 1959 | |
| 1960 | /* |
| 1961 | * If the new process will be in a different time namespace |
| 1962 | * do not allow it to share VM or a thread group with the forking task. |
| 1963 | */ |
| 1964 | if (clone_flags & (CLONE_THREAD | CLONE_VM)) { |
| 1965 | if (nsp->time_ns != nsp->time_ns_for_children) |
Oleg Nesterov | 40a0d32 | 2013-09-11 14:19:41 -0700 | [diff] [blame] | 1966 | return ERR_PTR(-EINVAL); |
| 1967 | } |
Eric W. Biederman | 8382fca | 2012-12-20 19:26:06 -0800 | [diff] [blame] | 1968 | |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 1969 | if (clone_flags & CLONE_PIDFD) { |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 1970 | /* |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 1971 | * - CLONE_DETACHED is blocked so that we can potentially |
| 1972 | * reuse it later for CLONE_PIDFD. |
| 1973 | * - CLONE_THREAD is blocked until someone really needs it. |
| 1974 | */ |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 1975 | if (clone_flags & (CLONE_DETACHED | CLONE_THREAD)) |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 1976 | return ERR_PTR(-EINVAL); |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 1977 | } |
| 1978 | |
Eric W. Biederman | c3ad2c3 | 2018-07-23 15:20:37 -0500 | [diff] [blame] | 1979 | /* |
| 1980 | * Force any signals received before this point to be delivered |
| 1981 | * before the fork happens. Collect up signals sent to multiple |
| 1982 | * processes that happen during the fork and delay them so that |
| 1983 | * they appear to happen after the fork. |
| 1984 | */ |
| 1985 | sigemptyset(&delayed.signal); |
| 1986 | INIT_HLIST_NODE(&delayed.node); |
| 1987 | |
| 1988 | spin_lock_irq(¤t->sighand->siglock); |
| 1989 | if (!(clone_flags & CLONE_THREAD)) |
| 1990 | hlist_add_head(&delayed.node, ¤t->signal->multiprocess); |
| 1991 | recalc_sigpending(); |
| 1992 | spin_unlock_irq(¤t->sighand->siglock); |
| 1993 | retval = -ERESTARTNOINTR; |
Jens Axboe | 66ae0d1 | 2021-03-22 09:39:12 -0600 | [diff] [blame] | 1994 | if (task_sigpending(current)) |
Eric W. Biederman | c3ad2c3 | 2018-07-23 15:20:37 -0500 | [diff] [blame] | 1995 | goto fork_out; |
| 1996 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1997 | retval = -ENOMEM; |
Andi Kleen | 725fc62 | 2016-05-23 16:24:05 -0700 | [diff] [blame] | 1998 | p = dup_task_struct(current, node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1999 | if (!p) |
| 2000 | goto fork_out; |
Jens Axboe | b16b385 | 2021-03-26 09:05:22 -0600 | [diff] [blame] | 2001 | if (args->io_thread) { |
| 2002 | /* |
| 2003 | * Mark us an IO worker, and block any signal that isn't |
| 2004 | * fatal or STOP |
| 2005 | */ |
Jens Axboe | cc440e8 | 2021-03-04 12:21:05 -0700 | [diff] [blame] | 2006 | p->flags |= PF_IO_WORKER; |
Jens Axboe | b16b385 | 2021-03-26 09:05:22 -0600 | [diff] [blame] | 2007 | siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
| 2008 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2009 | |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2010 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL; |
Vegard Nossum | 4d6501d | 2017-05-09 09:39:59 +0200 | [diff] [blame] | 2011 | /* |
| 2012 | * Clear TID on mm_release()? |
| 2013 | */ |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2014 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? args->child_tid : NULL; |
Vegard Nossum | 4d6501d | 2017-05-09 09:39:59 +0200 | [diff] [blame] | 2015 | |
Steven Rostedt | f7e8b61 | 2009-06-02 16:39:48 -0400 | [diff] [blame] | 2016 | ftrace_graph_init_task(p); |
| 2017 | |
Peter Zijlstra | bea493a | 2006-10-17 00:10:33 -0700 | [diff] [blame] | 2018 | rt_mutex_init_task(p); |
| 2019 | |
Peter Zijlstra | a21ee60 | 2020-05-25 12:22:41 +0200 | [diff] [blame] | 2020 | lockdep_assert_irqs_enabled(); |
Ingo Molnar | d12c1a3 | 2008-07-14 12:09:28 +0200 | [diff] [blame] | 2021 | #ifdef CONFIG_PROVE_LOCKING |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 2022 | DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); |
| 2023 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2024 | retval = -EAGAIN; |
Alexey Gladkov | 21d1c5e | 2021-04-22 14:27:11 +0200 | [diff] [blame] | 2025 | if (is_ucounts_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) { |
Eric Paris | b57922b | 2013-07-03 15:08:29 -0700 | [diff] [blame] | 2026 | if (p->real_cred->user != INIT_USER && |
| 2027 | !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2028 | goto bad_fork_free; |
| 2029 | } |
Vasiliy Kulikov | 72fa599 | 2011-08-08 19:02:04 +0400 | [diff] [blame] | 2030 | current->flags &= ~PF_NPROC_EXCEEDED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2031 | |
David Howells | f1752ee | 2008-11-14 10:39:17 +1100 | [diff] [blame] | 2032 | retval = copy_creds(p, clone_flags); |
| 2033 | if (retval < 0) |
| 2034 | goto bad_fork_free; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2035 | |
| 2036 | /* |
| 2037 | * If multiple threads are within copy_process(), then this check |
| 2038 | * triggers too late. This doesn't hurt, the check is only there |
| 2039 | * to stop root fork bombs. |
| 2040 | */ |
Li Zefan | 04ec93f | 2009-02-06 08:17:19 +0000 | [diff] [blame] | 2041 | retval = -EAGAIN; |
Weilong Chen | c17d1a3 | 2020-06-23 12:12:40 +0800 | [diff] [blame] | 2042 | if (data_race(nr_threads >= max_threads)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2043 | goto bad_fork_cleanup_count; |
| 2044 | |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 2045 | delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ |
Frederic Weisbecker | a8ea6fc | 2021-05-26 01:58:49 +0200 | [diff] [blame] | 2046 | p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY); |
David Rientjes | 514ddb4 | 2014-04-07 15:37:27 -0700 | [diff] [blame] | 2047 | p->flags |= PF_FORKNOEXEC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2048 | INIT_LIST_HEAD(&p->children); |
| 2049 | INIT_LIST_HEAD(&p->sibling); |
Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 2050 | rcu_copy_process(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2051 | p->vfork_done = NULL; |
| 2052 | spin_lock_init(&p->alloc_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2053 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2054 | init_sigpending(&p->pending); |
| 2055 | |
Martin Schwidefsky | 6486163 | 2011-12-15 14:56:09 +0100 | [diff] [blame] | 2056 | p->utime = p->stime = p->gtime = 0; |
Stanislaw Gruszka | 40565b5 | 2016-11-15 03:06:51 +0100 | [diff] [blame] | 2057 | #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME |
Martin Schwidefsky | 6486163 | 2011-12-15 14:56:09 +0100 | [diff] [blame] | 2058 | p->utimescaled = p->stimescaled = 0; |
Stanislaw Gruszka | 40565b5 | 2016-11-15 03:06:51 +0100 | [diff] [blame] | 2059 | #endif |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 2060 | prev_cputime_init(&p->prev_cputime); |
| 2061 | |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 2062 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
Frederic Weisbecker | bac5b6b | 2017-06-29 19:15:10 +0200 | [diff] [blame] | 2063 | seqcount_init(&p->vtime.seqcount); |
| 2064 | p->vtime.starttime = 0; |
| 2065 | p->vtime.state = VTIME_INACTIVE; |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 2066 | #endif |
| 2067 | |
Jens Axboe | 0f21220 | 2020-09-13 13:09:39 -0600 | [diff] [blame] | 2068 | #ifdef CONFIG_IO_URING |
| 2069 | p->io_uring = NULL; |
| 2070 | #endif |
| 2071 | |
KAMEZAWA Hiroyuki | a3a2e76 | 2010-04-06 14:34:42 -0700 | [diff] [blame] | 2072 | #if defined(SPLIT_RSS_COUNTING) |
| 2073 | memset(&p->rss_stat, 0, sizeof(p->rss_stat)); |
| 2074 | #endif |
Balbir Singh | 172ba84 | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 2075 | |
Arjan van de Ven | 6976675 | 2008-09-01 15:52:40 -0700 | [diff] [blame] | 2076 | p->default_timer_slack_ns = current->timer_slack_ns; |
| 2077 | |
Johannes Weiner | eb41468 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 2078 | #ifdef CONFIG_PSI |
| 2079 | p->psi_flags = 0; |
| 2080 | #endif |
| 2081 | |
Andrea Righi | 5995477 | 2008-07-27 17:29:15 +0200 | [diff] [blame] | 2082 | task_io_accounting_init(&p->ioac); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2083 | acct_clear_integrals(p); |
| 2084 | |
Thomas Gleixner | 3a245c0 | 2019-08-21 21:09:06 +0200 | [diff] [blame] | 2085 | posix_cputimers_init(&p->posix_cputimers); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2086 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2087 | p->io_context = NULL; |
Richard Guy Briggs | c0b0ae8 | 2018-05-12 21:58:21 -0400 | [diff] [blame] | 2088 | audit_set_context(p, NULL); |
Paul Menage | b4f48b6 | 2007-10-18 23:39:33 -0700 | [diff] [blame] | 2089 | cgroup_fork(p); |
Eric W. Biederman | 40966e3 | 2021-12-02 09:56:14 -0600 | [diff] [blame] | 2090 | if (p->flags & PF_KTHREAD) { |
| 2091 | if (!set_kthread_struct(p)) |
Eric W. Biederman | ff8288f | 2021-12-20 10:42:18 -0600 | [diff] [blame] | 2092 | goto bad_fork_cleanup_delayacct; |
Eric W. Biederman | 40966e3 | 2021-12-02 09:56:14 -0600 | [diff] [blame] | 2093 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2094 | #ifdef CONFIG_NUMA |
Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 2095 | p->mempolicy = mpol_dup(p->mempolicy); |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 2096 | if (IS_ERR(p->mempolicy)) { |
| 2097 | retval = PTR_ERR(p->mempolicy); |
| 2098 | p->mempolicy = NULL; |
Eric W. Biederman | ff8288f | 2021-12-20 10:42:18 -0600 | [diff] [blame] | 2099 | goto bad_fork_cleanup_delayacct; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 2100 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2101 | #endif |
Michal Hocko | 778d3b0 | 2011-07-26 16:08:30 -0700 | [diff] [blame] | 2102 | #ifdef CONFIG_CPUSETS |
| 2103 | p->cpuset_mem_spread_rotor = NUMA_NO_NODE; |
| 2104 | p->cpuset_slab_spread_rotor = NUMA_NO_NODE; |
Ahmed S. Darwish | b750586 | 2020-07-20 17:55:19 +0200 | [diff] [blame] | 2105 | seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock); |
Michal Hocko | 778d3b0 | 2011-07-26 16:08:30 -0700 | [diff] [blame] | 2106 | #endif |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 2107 | #ifdef CONFIG_TRACE_IRQFLAGS |
Marco Elver | 0584df9 | 2020-07-29 13:09:15 +0200 | [diff] [blame] | 2108 | memset(&p->irqtrace, 0, sizeof(p->irqtrace)); |
| 2109 | p->irqtrace.hardirq_disable_ip = _THIS_IP_; |
| 2110 | p->irqtrace.softirq_enable_ip = _THIS_IP_; |
| 2111 | p->softirqs_enabled = 1; |
| 2112 | p->softirq_context = 0; |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 2113 | #endif |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 2114 | |
| 2115 | p->pagefault_disabled = 0; |
| 2116 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2117 | #ifdef CONFIG_LOCKDEP |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 2118 | lockdep_init_task(p); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2119 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2120 | |
Ingo Molnar | 408894e | 2006-01-09 15:59:20 -0800 | [diff] [blame] | 2121 | #ifdef CONFIG_DEBUG_MUTEXES |
| 2122 | p->blocked_on = NULL; /* not blocked yet */ |
| 2123 | #endif |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 2124 | #ifdef CONFIG_BCACHE |
| 2125 | p->sequential_io = 0; |
| 2126 | p->sequential_io_avg = 0; |
| 2127 | #endif |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 2128 | #ifdef CONFIG_BPF_SYSCALL |
| 2129 | RCU_INIT_POINTER(p->bpf_storage, NULL); |
Andrii Nakryiko | c7603cf | 2021-07-12 16:06:15 -0700 | [diff] [blame] | 2130 | p->bpf_ctx = NULL; |
Song Liu | a10787e | 2021-02-25 15:43:14 -0800 | [diff] [blame] | 2131 | #endif |
Markus Metzger | 0f48140 | 2009-04-03 16:43:48 +0200 | [diff] [blame] | 2132 | |
Srivatsa Vaddagiri | 3c90e6e | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 2133 | /* Perform scheduler related setup. Assign this task to a CPU. */ |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 2134 | retval = sched_fork(clone_flags, p); |
| 2135 | if (retval) |
| 2136 | goto bad_fork_cleanup_policy; |
Peter Zijlstra | 6ab423e | 2009-05-25 14:45:27 +0200 | [diff] [blame] | 2137 | |
Marco Elver | 2b26f0a | 2021-04-08 12:35:58 +0200 | [diff] [blame] | 2138 | retval = perf_event_init_task(p, clone_flags); |
Peter Zijlstra | 6ab423e | 2009-05-25 14:45:27 +0200 | [diff] [blame] | 2139 | if (retval) |
| 2140 | goto bad_fork_cleanup_policy; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 2141 | retval = audit_alloc(p); |
| 2142 | if (retval) |
Peter Zijlstra | 6c72e350 | 2014-10-02 16:17:02 -0700 | [diff] [blame] | 2143 | goto bad_fork_cleanup_perf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2144 | /* copy all the process information */ |
Jack Miller | ab602f7 | 2014-08-08 14:23:19 -0700 | [diff] [blame] | 2145 | shm_init_task(p); |
Tetsuo Handa | e4e55b4 | 2017-03-24 20:46:33 +0900 | [diff] [blame] | 2146 | retval = security_task_alloc(p, clone_flags); |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 2147 | if (retval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2148 | goto bad_fork_cleanup_audit; |
Tetsuo Handa | e4e55b4 | 2017-03-24 20:46:33 +0900 | [diff] [blame] | 2149 | retval = copy_semundo(clone_flags, p); |
| 2150 | if (retval) |
| 2151 | goto bad_fork_cleanup_security; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 2152 | retval = copy_files(clone_flags, p); |
| 2153 | if (retval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2154 | goto bad_fork_cleanup_semundo; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 2155 | retval = copy_fs(clone_flags, p); |
| 2156 | if (retval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2157 | goto bad_fork_cleanup_files; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 2158 | retval = copy_sighand(clone_flags, p); |
| 2159 | if (retval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2160 | goto bad_fork_cleanup_fs; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 2161 | retval = copy_signal(clone_flags, p); |
| 2162 | if (retval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2163 | goto bad_fork_cleanup_sighand; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 2164 | retval = copy_mm(clone_flags, p); |
| 2165 | if (retval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2166 | goto bad_fork_cleanup_signal; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 2167 | retval = copy_namespaces(clone_flags, p); |
| 2168 | if (retval) |
David Howells | d84f4f9 | 2008-11-14 10:39:23 +1100 | [diff] [blame] | 2169 | goto bad_fork_cleanup_mm; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 2170 | retval = copy_io(clone_flags, p); |
| 2171 | if (retval) |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 2172 | goto bad_fork_cleanup_namespaces; |
Christian Brauner | 714acdb | 2020-06-11 11:04:15 +0200 | [diff] [blame] | 2173 | retval = copy_thread(clone_flags, args->stack, args->stack_size, p, args->tls); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2174 | if (retval) |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 2175 | goto bad_fork_cleanup_io; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2176 | |
Alexander Popov | afaef01 | 2018-08-17 01:16:58 +0300 | [diff] [blame] | 2177 | stackleak_task_init(p); |
| 2178 | |
Pavel Emelyanov | 425fb2b | 2007-10-18 23:40:07 -0700 | [diff] [blame] | 2179 | if (pid != &init_struct_pid) { |
Adrian Reber | 49cb2fc | 2019-11-15 13:36:20 +0100 | [diff] [blame] | 2180 | pid = alloc_pid(p->nsproxy->pid_ns_for_children, args->set_tid, |
| 2181 | args->set_tid_size); |
Michal Hocko | 35f71bc | 2015-04-16 12:47:38 -0700 | [diff] [blame] | 2182 | if (IS_ERR(pid)) { |
| 2183 | retval = PTR_ERR(pid); |
Jiri Slaby | 0740aa5 | 2016-05-20 17:00:25 -0700 | [diff] [blame] | 2184 | goto bad_fork_cleanup_thread; |
Michal Hocko | 35f71bc | 2015-04-16 12:47:38 -0700 | [diff] [blame] | 2185 | } |
Pavel Emelyanov | 425fb2b | 2007-10-18 23:40:07 -0700 | [diff] [blame] | 2186 | } |
| 2187 | |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 2188 | /* |
| 2189 | * This has to happen after we've potentially unshared the file |
| 2190 | * descriptor table (so that the pidfd doesn't leak into the child |
| 2191 | * if the fd table isn't shared). |
| 2192 | */ |
| 2193 | if (clone_flags & CLONE_PIDFD) { |
Al Viro | 6fd2fe4 | 2019-06-26 22:22:09 -0400 | [diff] [blame] | 2194 | retval = get_unused_fd_flags(O_RDWR | O_CLOEXEC); |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 2195 | if (retval < 0) |
| 2196 | goto bad_fork_free_pid; |
| 2197 | |
| 2198 | pidfd = retval; |
Al Viro | 6fd2fe4 | 2019-06-26 22:22:09 -0400 | [diff] [blame] | 2199 | |
| 2200 | pidfile = anon_inode_getfile("[pidfd]", &pidfd_fops, pid, |
| 2201 | O_RDWR | O_CLOEXEC); |
| 2202 | if (IS_ERR(pidfile)) { |
| 2203 | put_unused_fd(pidfd); |
Christian Brauner | 28dd29c | 2019-07-01 16:01:46 +0200 | [diff] [blame] | 2204 | retval = PTR_ERR(pidfile); |
Al Viro | 6fd2fe4 | 2019-06-26 22:22:09 -0400 | [diff] [blame] | 2205 | goto bad_fork_free_pid; |
| 2206 | } |
| 2207 | get_pid(pid); /* held by pidfile now */ |
| 2208 | |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2209 | retval = put_user(pidfd, args->pidfd); |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 2210 | if (retval) |
| 2211 | goto bad_fork_put_pidfd; |
| 2212 | } |
| 2213 | |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2214 | #ifdef CONFIG_BLOCK |
| 2215 | p->plug = NULL; |
| 2216 | #endif |
Thomas Gleixner | ba31c1a4 | 2019-11-06 22:55:36 +0100 | [diff] [blame] | 2217 | futex_init_task(p); |
| 2218 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2219 | /* |
GOTO Masanori | f9a3879 | 2006-03-13 21:20:44 -0800 | [diff] [blame] | 2220 | * sigaltstack should be cleared when sharing the same VM |
| 2221 | */ |
| 2222 | if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) |
Stas Sergeev | 2a74213 | 2016-04-14 23:20:04 +0300 | [diff] [blame] | 2223 | sas_ss_reset(p); |
GOTO Masanori | f9a3879 | 2006-03-13 21:20:44 -0800 | [diff] [blame] | 2224 | |
| 2225 | /* |
Oleg Nesterov | 6580807 | 2009-12-15 16:47:16 -0800 | [diff] [blame] | 2226 | * Syscall tracing and stepping should be turned off in the |
| 2227 | * child regardless of CLONE_PTRACE. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2228 | */ |
Oleg Nesterov | 6580807 | 2009-12-15 16:47:16 -0800 | [diff] [blame] | 2229 | user_disable_single_step(p); |
Gabriel Krisman Bertazi | 64c19ba | 2020-11-16 12:42:02 -0500 | [diff] [blame] | 2230 | clear_task_syscall_work(p, SYSCALL_TRACE); |
Gabriel Krisman Bertazi | 64eb35f | 2020-11-16 12:42:03 -0500 | [diff] [blame] | 2231 | #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU) |
| 2232 | clear_task_syscall_work(p, SYSCALL_EMU); |
Laurent Vivier | ed75e8d | 2005-09-03 15:57:18 -0700 | [diff] [blame] | 2233 | #endif |
Lin Feng | e02c9b0 | 2019-05-14 15:42:34 -0700 | [diff] [blame] | 2234 | clear_tsk_latency_tracing(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2235 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2236 | /* ok, now we should be set up.. */ |
Oleg Nesterov | 18c830d | 2013-07-03 15:08:32 -0700 | [diff] [blame] | 2237 | p->pid = pid_nr(pid); |
| 2238 | if (clone_flags & CLONE_THREAD) { |
Oleg Nesterov | 18c830d | 2013-07-03 15:08:32 -0700 | [diff] [blame] | 2239 | p->group_leader = current->group_leader; |
| 2240 | p->tgid = current->tgid; |
| 2241 | } else { |
Oleg Nesterov | 18c830d | 2013-07-03 15:08:32 -0700 | [diff] [blame] | 2242 | p->group_leader = p; |
| 2243 | p->tgid = p->pid; |
| 2244 | } |
Oleg Nesterov | 5f8aadd | 2012-03-14 19:55:38 +0100 | [diff] [blame] | 2245 | |
Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 2246 | p->nr_dirtied = 0; |
| 2247 | p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); |
Wu Fengguang | 8371235 | 2011-06-11 19:25:42 -0600 | [diff] [blame] | 2248 | p->dirty_paused_when = 0; |
Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 2249 | |
Oleg Nesterov | bb8cbbf | 2013-11-13 15:36:12 +0100 | [diff] [blame] | 2250 | p->pdeath_signal = 0; |
Oleg Nesterov | 47e6532 | 2006-03-28 16:11:25 -0800 | [diff] [blame] | 2251 | INIT_LIST_HEAD(&p->thread_group); |
Al Viro | 158e164 | 2012-06-27 09:24:13 +0400 | [diff] [blame] | 2252 | p->task_works = NULL; |
Michael Pratt | ca7752c | 2021-11-01 17:06:15 -0400 | [diff] [blame] | 2253 | clear_posix_cputimers_work(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2254 | |
Peter Zijlstra | d741bf4 | 2020-08-29 22:03:24 +0900 | [diff] [blame] | 2255 | #ifdef CONFIG_KRETPROBES |
| 2256 | p->kretprobe_instances.first = NULL; |
| 2257 | #endif |
| 2258 | |
Oleg Nesterov | 18c830d | 2013-07-03 15:08:32 -0700 | [diff] [blame] | 2259 | /* |
Aleksa Sarai | 7e47682 | 2015-06-09 21:32:09 +1000 | [diff] [blame] | 2260 | * Ensure that the cgroup subsystem policies allow the new process to be |
Randy Dunlap | 7b7b8a2 | 2020-10-15 20:10:28 -0700 | [diff] [blame] | 2261 | * forked. It should be noted that the new process's css_set can be changed |
Aleksa Sarai | 7e47682 | 2015-06-09 21:32:09 +1000 | [diff] [blame] | 2262 | * between here and cgroup_post_fork() if an organisation operation is in |
| 2263 | * progress. |
| 2264 | */ |
Christian Brauner | ef2c41c | 2020-02-05 14:26:22 +0100 | [diff] [blame] | 2265 | retval = cgroup_can_fork(p, args); |
Aleksa Sarai | 7e47682 | 2015-06-09 21:32:09 +1000 | [diff] [blame] | 2266 | if (retval) |
Christian Brauner | 5a5cf5c | 2020-02-05 14:26:20 +0100 | [diff] [blame] | 2267 | goto bad_fork_put_pidfd; |
Aleksa Sarai | 7e47682 | 2015-06-09 21:32:09 +1000 | [diff] [blame] | 2268 | |
| 2269 | /* |
David Herrmann | 7b55851 | 2019-01-08 13:58:52 +0100 | [diff] [blame] | 2270 | * From this point on we must avoid any synchronous user-space |
| 2271 | * communication until we take the tasklist-lock. In particular, we do |
| 2272 | * not want user-space to be able to predict the process start-time by |
| 2273 | * stalling fork(2) after we recorded the start_time but before it is |
| 2274 | * visible to the system. |
| 2275 | */ |
| 2276 | |
| 2277 | p->start_time = ktime_get_ns(); |
Peter Zijlstra | cf25e24 | 2019-11-07 11:07:58 +0100 | [diff] [blame] | 2278 | p->start_boottime = ktime_get_boottime_ns(); |
David Herrmann | 7b55851 | 2019-01-08 13:58:52 +0100 | [diff] [blame] | 2279 | |
| 2280 | /* |
Oleg Nesterov | 18c830d | 2013-07-03 15:08:32 -0700 | [diff] [blame] | 2281 | * Make it visible to the rest of the system, but dont wake it up yet. |
| 2282 | * Need tasklist lock for parent etc handling! |
| 2283 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2284 | write_lock_irq(&tasklist_lock); |
| 2285 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2286 | /* CLONE_PARENT re-uses the old parent */ |
Oleg Nesterov | 2d5516c | 2009-03-02 22:58:45 +0100 | [diff] [blame] | 2287 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2288 | p->real_parent = current->real_parent; |
Oleg Nesterov | 2d5516c | 2009-03-02 22:58:45 +0100 | [diff] [blame] | 2289 | p->parent_exec_id = current->parent_exec_id; |
Eddy Wu | b4e0044 | 2020-11-07 14:47:22 +0800 | [diff] [blame] | 2290 | if (clone_flags & CLONE_THREAD) |
| 2291 | p->exit_signal = -1; |
| 2292 | else |
| 2293 | p->exit_signal = current->group_leader->exit_signal; |
Oleg Nesterov | 2d5516c | 2009-03-02 22:58:45 +0100 | [diff] [blame] | 2294 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2295 | p->real_parent = current; |
Oleg Nesterov | 2d5516c | 2009-03-02 22:58:45 +0100 | [diff] [blame] | 2296 | p->parent_exec_id = current->self_exec_id; |
Eddy Wu | b4e0044 | 2020-11-07 14:47:22 +0800 | [diff] [blame] | 2297 | p->exit_signal = args->exit_signal; |
Oleg Nesterov | 2d5516c | 2009-03-02 22:58:45 +0100 | [diff] [blame] | 2298 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2299 | |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 2300 | klp_copy_process(p); |
| 2301 | |
Peter Zijlstra | 85dd3f6 | 2021-03-29 15:18:35 +0200 | [diff] [blame] | 2302 | sched_core_fork(p); |
| 2303 | |
Oleg Nesterov | 3f17da6 | 2006-02-15 22:13:24 +0300 | [diff] [blame] | 2304 | spin_lock(¤t->sighand->siglock); |
Oleg Nesterov | 4a2c7a7 | 2006-03-28 16:11:26 -0800 | [diff] [blame] | 2305 | |
| 2306 | /* |
Kees Cook | dbd95212 | 2014-06-27 15:18:48 -0700 | [diff] [blame] | 2307 | * Copy seccomp details explicitly here, in case they were changed |
| 2308 | * before holding sighand lock. |
| 2309 | */ |
| 2310 | copy_seccomp(p); |
| 2311 | |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 2312 | rseq_fork(p, clone_flags); |
| 2313 | |
Eric W. Biederman | 4ca1d3e | 2018-07-13 15:30:33 -0500 | [diff] [blame] | 2314 | /* Don't start children in a dying pid namespace */ |
Gargi Sharma | e8cfbc2 | 2017-11-17 15:30:34 -0800 | [diff] [blame] | 2315 | if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) { |
Kirill Tkhai | 3fd3722 | 2017-05-12 19:11:31 +0300 | [diff] [blame] | 2316 | retval = -ENOMEM; |
| 2317 | goto bad_fork_cancel_cgroup; |
| 2318 | } |
Oleg Nesterov | 4a2c7a7 | 2006-03-28 16:11:26 -0800 | [diff] [blame] | 2319 | |
Eric W. Biederman | 7673bf5 | 2018-07-23 08:01:10 -0500 | [diff] [blame] | 2320 | /* Let kill terminate clone/fork in the middle */ |
| 2321 | if (fatal_signal_pending(current)) { |
| 2322 | retval = -EINTR; |
| 2323 | goto bad_fork_cancel_cgroup; |
| 2324 | } |
| 2325 | |
Al Viro | 6fd2fe4 | 2019-06-26 22:22:09 -0400 | [diff] [blame] | 2326 | /* past the last point of failure */ |
| 2327 | if (pidfile) |
| 2328 | fd_install(pidfd, pidfile); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2329 | |
Eric W. Biederman | 2c47047 | 2017-09-26 13:06:43 -0500 | [diff] [blame] | 2330 | init_task_pid_links(p); |
Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 2331 | if (likely(p->pid)) { |
Tejun Heo | 4b9d33e | 2011-06-17 16:50:38 +0200 | [diff] [blame] | 2332 | ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2333 | |
Oleg Nesterov | 8190773 | 2013-07-03 15:08:31 -0700 | [diff] [blame] | 2334 | init_task_pid(p, PIDTYPE_PID, pid); |
Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 2335 | if (thread_group_leader(p)) { |
Eric W. Biederman | 6883f81 | 2017-06-04 04:32:13 -0500 | [diff] [blame] | 2336 | init_task_pid(p, PIDTYPE_TGID, pid); |
Oleg Nesterov | 8190773 | 2013-07-03 15:08:31 -0700 | [diff] [blame] | 2337 | init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); |
| 2338 | init_task_pid(p, PIDTYPE_SID, task_session(current)); |
| 2339 | |
Eric W. Biederman | 1c4042c | 2010-07-12 17:10:36 -0700 | [diff] [blame] | 2340 | if (is_child_reaper(pid)) { |
Eric W. Biederman | 17cf22c | 2010-03-02 14:51:53 -0800 | [diff] [blame] | 2341 | ns_of_pid(pid)->child_reaper = p; |
Eric W. Biederman | 1c4042c | 2010-07-12 17:10:36 -0700 | [diff] [blame] | 2342 | p->signal->flags |= SIGNAL_UNKILLABLE; |
| 2343 | } |
Eric W. Biederman | c3ad2c3 | 2018-07-23 15:20:37 -0500 | [diff] [blame] | 2344 | p->signal->shared_pending.signal = delayed.signal; |
Alan Cox | 9c9f4de | 2008-10-13 10:37:26 +0100 | [diff] [blame] | 2345 | p->signal->tty = tty_kref_get(current->signal->tty); |
Pavel Tikhomirov | 749860c | 2017-01-30 18:06:12 +0300 | [diff] [blame] | 2346 | /* |
| 2347 | * Inherit has_child_subreaper flag under the same |
| 2348 | * tasklist_lock with adding child to the process tree |
| 2349 | * for propagate_has_child_subreaper optimization. |
| 2350 | */ |
| 2351 | p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper || |
| 2352 | p->real_parent->signal->is_child_subreaper; |
Oleg Nesterov | 9cd80bb | 2009-12-17 15:27:15 -0800 | [diff] [blame] | 2353 | list_add_tail(&p->sibling, &p->real_parent->children); |
Eric W. Biederman | 5e85d4a | 2006-04-18 22:20:16 -0700 | [diff] [blame] | 2354 | list_add_tail_rcu(&p->tasks, &init_task.tasks); |
Eric W. Biederman | 6883f81 | 2017-06-04 04:32:13 -0500 | [diff] [blame] | 2355 | attach_pid(p, PIDTYPE_TGID); |
Oleg Nesterov | 8190773 | 2013-07-03 15:08:31 -0700 | [diff] [blame] | 2356 | attach_pid(p, PIDTYPE_PGID); |
| 2357 | attach_pid(p, PIDTYPE_SID); |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 2358 | __this_cpu_inc(process_counts); |
Oleg Nesterov | 80628ca | 2013-07-03 15:08:30 -0700 | [diff] [blame] | 2359 | } else { |
| 2360 | current->signal->nr_threads++; |
| 2361 | atomic_inc(¤t->signal->live); |
Elena Reshetova | 60d4de3 | 2019-01-18 14:27:27 +0200 | [diff] [blame] | 2362 | refcount_inc(¤t->signal->sigcnt); |
Eric W. Biederman | 924de3b | 2018-07-23 13:38:00 -0500 | [diff] [blame] | 2363 | task_join_group_stop(p); |
Oleg Nesterov | 80628ca | 2013-07-03 15:08:30 -0700 | [diff] [blame] | 2364 | list_add_tail_rcu(&p->thread_group, |
| 2365 | &p->group_leader->thread_group); |
Oleg Nesterov | 0c740d0 | 2014-01-21 15:49:56 -0800 | [diff] [blame] | 2366 | list_add_tail_rcu(&p->thread_node, |
| 2367 | &p->signal->thread_head); |
Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 2368 | } |
Oleg Nesterov | 8190773 | 2013-07-03 15:08:31 -0700 | [diff] [blame] | 2369 | attach_pid(p, PIDTYPE_PID); |
Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 2370 | nr_threads++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2371 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2372 | total_forks++; |
Eric W. Biederman | c3ad2c3 | 2018-07-23 15:20:37 -0500 | [diff] [blame] | 2373 | hlist_del_init(&delayed.node); |
Oleg Nesterov | 3f17da6 | 2006-02-15 22:13:24 +0300 | [diff] [blame] | 2374 | spin_unlock(¤t->sighand->siglock); |
Oleg Nesterov | 4af4206 | 2014-04-13 20:58:54 +0200 | [diff] [blame] | 2375 | syscall_tracepoint_update(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2376 | write_unlock_irq(&tasklist_lock); |
Oleg Nesterov | 4af4206 | 2014-04-13 20:58:54 +0200 | [diff] [blame] | 2377 | |
Andrew Morton | c13cf85 | 2005-11-28 13:43:48 -0800 | [diff] [blame] | 2378 | proc_fork_connector(p); |
Zhang Qiao | 4ef0c5c | 2021-09-15 14:40:30 +0800 | [diff] [blame] | 2379 | sched_post_fork(p, args); |
Christian Brauner | ef2c41c | 2020-02-05 14:26:22 +0100 | [diff] [blame] | 2380 | cgroup_post_fork(p, args); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2381 | perf_event_fork(p); |
KAMEZAWA Hiroyuki | 43d2b11 | 2012-01-10 15:08:09 -0800 | [diff] [blame] | 2382 | |
| 2383 | trace_task_newtask(p, clone_flags); |
Oleg Nesterov | 3ab6796 | 2013-10-16 19:39:37 +0200 | [diff] [blame] | 2384 | uprobe_copy_process(p, clone_flags); |
KAMEZAWA Hiroyuki | 43d2b11 | 2012-01-10 15:08:09 -0800 | [diff] [blame] | 2385 | |
Suren Baghdasaryan | 67197a4 | 2020-10-13 16:58:35 -0700 | [diff] [blame] | 2386 | copy_oom_score_adj(clone_flags, p); |
| 2387 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2388 | return p; |
| 2389 | |
Aleksa Sarai | 7e47682 | 2015-06-09 21:32:09 +1000 | [diff] [blame] | 2390 | bad_fork_cancel_cgroup: |
Peter Zijlstra | 85dd3f6 | 2021-03-29 15:18:35 +0200 | [diff] [blame] | 2391 | sched_core_free(p); |
Kirill Tkhai | 3fd3722 | 2017-05-12 19:11:31 +0300 | [diff] [blame] | 2392 | spin_unlock(¤t->sighand->siglock); |
| 2393 | write_unlock_irq(&tasklist_lock); |
Christian Brauner | ef2c41c | 2020-02-05 14:26:22 +0100 | [diff] [blame] | 2394 | cgroup_cancel_fork(p, args); |
Christian Brauner | b3e58382 | 2019-03-27 13:04:15 +0100 | [diff] [blame] | 2395 | bad_fork_put_pidfd: |
Al Viro | 6fd2fe4 | 2019-06-26 22:22:09 -0400 | [diff] [blame] | 2396 | if (clone_flags & CLONE_PIDFD) { |
| 2397 | fput(pidfile); |
| 2398 | put_unused_fd(pidfd); |
| 2399 | } |
Pavel Emelyanov | 425fb2b | 2007-10-18 23:40:07 -0700 | [diff] [blame] | 2400 | bad_fork_free_pid: |
| 2401 | if (pid != &init_struct_pid) |
| 2402 | free_pid(pid); |
Jiri Slaby | 0740aa5 | 2016-05-20 17:00:25 -0700 | [diff] [blame] | 2403 | bad_fork_cleanup_thread: |
| 2404 | exit_thread(p); |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 2405 | bad_fork_cleanup_io: |
Louis Rilling | b69f229 | 2009-12-04 14:52:42 +0100 | [diff] [blame] | 2406 | if (p->io_context) |
| 2407 | exit_io_context(p); |
Serge E. Hallyn | ab51601 | 2006-10-02 02:18:06 -0700 | [diff] [blame] | 2408 | bad_fork_cleanup_namespaces: |
Linus Torvalds | 444f378 | 2007-01-30 13:35:18 -0800 | [diff] [blame] | 2409 | exit_task_namespaces(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2410 | bad_fork_cleanup_mm: |
Andrea Arcangeli | c3f3ce0 | 2019-05-14 15:40:46 -0700 | [diff] [blame] | 2411 | if (p->mm) { |
| 2412 | mm_clear_owner(p->mm, p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2413 | mmput(p->mm); |
Andrea Arcangeli | c3f3ce0 | 2019-05-14 15:40:46 -0700 | [diff] [blame] | 2414 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2415 | bad_fork_cleanup_signal: |
Oleg Nesterov | 4ab6c08 | 2009-08-26 14:29:24 -0700 | [diff] [blame] | 2416 | if (!(clone_flags & CLONE_THREAD)) |
Mike Galbraith | 1c5354d | 2011-01-05 11:16:04 +0100 | [diff] [blame] | 2417 | free_signal_struct(p->signal); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2418 | bad_fork_cleanup_sighand: |
Oleg Nesterov | a7e5328 | 2006-03-28 16:11:27 -0800 | [diff] [blame] | 2419 | __cleanup_sighand(p->sighand); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2420 | bad_fork_cleanup_fs: |
| 2421 | exit_fs(p); /* blocking */ |
| 2422 | bad_fork_cleanup_files: |
| 2423 | exit_files(p); /* blocking */ |
| 2424 | bad_fork_cleanup_semundo: |
| 2425 | exit_sem(p); |
Tetsuo Handa | e4e55b4 | 2017-03-24 20:46:33 +0900 | [diff] [blame] | 2426 | bad_fork_cleanup_security: |
| 2427 | security_task_free(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2428 | bad_fork_cleanup_audit: |
| 2429 | audit_free(p); |
Peter Zijlstra | 6c72e350 | 2014-10-02 16:17:02 -0700 | [diff] [blame] | 2430 | bad_fork_cleanup_perf: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2431 | perf_event_free_task(p); |
Peter Zijlstra | 6c72e350 | 2014-10-02 16:17:02 -0700 | [diff] [blame] | 2432 | bad_fork_cleanup_policy: |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 2433 | lockdep_free_task(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2434 | #ifdef CONFIG_NUMA |
Lee Schermerhorn | f0be3d3 | 2008-04-28 02:13:08 -0700 | [diff] [blame] | 2435 | mpol_put(p->mempolicy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2436 | #endif |
Eric W. Biederman | ff8288f | 2021-12-20 10:42:18 -0600 | [diff] [blame] | 2437 | bad_fork_cleanup_delayacct: |
Shailabh Nagar | 35df17c | 2006-08-31 21:27:38 -0700 | [diff] [blame] | 2438 | delayacct_tsk_free(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2439 | bad_fork_cleanup_count: |
Alexey Gladkov | 21d1c5e | 2021-04-22 14:27:11 +0200 | [diff] [blame] | 2440 | dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); |
David Howells | e0e8173 | 2009-09-02 09:13:40 +0100 | [diff] [blame] | 2441 | exit_creds(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2442 | bad_fork_free: |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 2443 | WRITE_ONCE(p->__state, TASK_DEAD); |
Andy Lutomirski | 68f24b08 | 2016-09-15 22:45:48 -0700 | [diff] [blame] | 2444 | put_task_stack(p); |
Andrea Arcangeli | c3f3ce0 | 2019-05-14 15:40:46 -0700 | [diff] [blame] | 2445 | delayed_free_task(p); |
Oleg Nesterov | fe7d37d | 2006-01-08 01:04:02 -0800 | [diff] [blame] | 2446 | fork_out: |
Eric W. Biederman | c3ad2c3 | 2018-07-23 15:20:37 -0500 | [diff] [blame] | 2447 | spin_lock_irq(¤t->sighand->siglock); |
| 2448 | hlist_del_init(&delayed.node); |
| 2449 | spin_unlock_irq(¤t->sighand->siglock); |
Oleg Nesterov | fe7d37d | 2006-01-08 01:04:02 -0800 | [diff] [blame] | 2450 | return ERR_PTR(retval); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2451 | } |
| 2452 | |
Eric W. Biederman | 2c47047 | 2017-09-26 13:06:43 -0500 | [diff] [blame] | 2453 | static inline void init_idle_pids(struct task_struct *idle) |
Oleg Nesterov | f106eee | 2010-05-26 14:44:11 -0700 | [diff] [blame] | 2454 | { |
| 2455 | enum pid_type type; |
| 2456 | |
| 2457 | for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { |
Eric W. Biederman | 2c47047 | 2017-09-26 13:06:43 -0500 | [diff] [blame] | 2458 | INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */ |
| 2459 | init_task_pid(idle, type, &init_struct_pid); |
Oleg Nesterov | f106eee | 2010-05-26 14:44:11 -0700 | [diff] [blame] | 2460 | } |
| 2461 | } |
| 2462 | |
Valentin Schneider | f1a0a37 | 2021-05-12 10:46:36 +0100 | [diff] [blame] | 2463 | struct task_struct * __init fork_idle(int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2464 | { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2465 | struct task_struct *task; |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2466 | struct kernel_clone_args args = { |
| 2467 | .flags = CLONE_VM, |
| 2468 | }; |
| 2469 | |
| 2470 | task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args); |
Oleg Nesterov | f106eee | 2010-05-26 14:44:11 -0700 | [diff] [blame] | 2471 | if (!IS_ERR(task)) { |
Eric W. Biederman | 2c47047 | 2017-09-26 13:06:43 -0500 | [diff] [blame] | 2472 | init_idle_pids(task); |
Akinobu Mita | 753ca4f | 2006-11-25 11:09:34 -0800 | [diff] [blame] | 2473 | init_idle(task, cpu); |
Oleg Nesterov | f106eee | 2010-05-26 14:44:11 -0700 | [diff] [blame] | 2474 | } |
Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 2475 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2476 | return task; |
| 2477 | } |
| 2478 | |
Nadav Amit | 13585fa | 2019-04-25 17:11:25 -0700 | [diff] [blame] | 2479 | struct mm_struct *copy_init_mm(void) |
| 2480 | { |
| 2481 | return dup_mm(NULL, &init_mm); |
| 2482 | } |
| 2483 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2484 | /* |
Jens Axboe | cc440e8 | 2021-03-04 12:21:05 -0700 | [diff] [blame] | 2485 | * This is like kernel_clone(), but shaved down and tailored to just |
| 2486 | * creating io_uring workers. It returns a created task, or an error pointer. |
| 2487 | * The returned task is inactive, and the caller must fire it up through |
| 2488 | * wake_up_new_task(p). All signals are blocked in the created task. |
| 2489 | */ |
| 2490 | struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node) |
| 2491 | { |
| 2492 | unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| |
| 2493 | CLONE_IO; |
| 2494 | struct kernel_clone_args args = { |
| 2495 | .flags = ((lower_32_bits(flags) | CLONE_VM | |
| 2496 | CLONE_UNTRACED) & ~CSIGNAL), |
| 2497 | .exit_signal = (lower_32_bits(flags) & CSIGNAL), |
| 2498 | .stack = (unsigned long)fn, |
| 2499 | .stack_size = (unsigned long)arg, |
| 2500 | .io_thread = 1, |
| 2501 | }; |
Jens Axboe | cc440e8 | 2021-03-04 12:21:05 -0700 | [diff] [blame] | 2502 | |
Jens Axboe | b16b385 | 2021-03-26 09:05:22 -0600 | [diff] [blame] | 2503 | return copy_process(NULL, 0, node, &args); |
Jens Axboe | cc440e8 | 2021-03-04 12:21:05 -0700 | [diff] [blame] | 2504 | } |
| 2505 | |
| 2506 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2507 | * Ok, this is the main fork-routine. |
| 2508 | * |
| 2509 | * It copies the process, and if successful kick-starts |
| 2510 | * it and waits for it to finish using the VM if required. |
Eugene Syromiatnikov | a0eb9ab | 2019-09-11 18:45:40 +0100 | [diff] [blame] | 2511 | * |
| 2512 | * args->exit_signal is expected to be checked for sanity by the caller. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2513 | */ |
Christian Brauner | cad6967 | 2020-08-19 12:46:45 +0200 | [diff] [blame] | 2514 | pid_t kernel_clone(struct kernel_clone_args *args) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2515 | { |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2516 | u64 clone_flags = args->flags; |
Marcos Paulo de Souza | 9f5325a | 2018-02-06 15:39:30 -0800 | [diff] [blame] | 2517 | struct completion vfork; |
| 2518 | struct pid *pid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2519 | struct task_struct *p; |
| 2520 | int trace = 0; |
Christian Brauner | cad6967 | 2020-08-19 12:46:45 +0200 | [diff] [blame] | 2521 | pid_t nr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2522 | |
Andrew Morton | bdff746 | 2008-02-04 22:27:22 -0800 | [diff] [blame] | 2523 | /* |
Christian Brauner | 3af8588 | 2020-06-08 17:28:50 +0200 | [diff] [blame] | 2524 | * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument |
| 2525 | * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are |
| 2526 | * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate |
| 2527 | * field in struct clone_args and it still doesn't make sense to have |
| 2528 | * them both point at the same memory location. Performing this check |
| 2529 | * here has the advantage that we don't need to have a separate helper |
| 2530 | * to check for legacy clone(). |
| 2531 | */ |
| 2532 | if ((args->flags & CLONE_PIDFD) && |
| 2533 | (args->flags & CLONE_PARENT_SETTID) && |
| 2534 | (args->pidfd == args->parent_tid)) |
| 2535 | return -EINVAL; |
| 2536 | |
| 2537 | /* |
Tejun Heo | 4b9d33e | 2011-06-17 16:50:38 +0200 | [diff] [blame] | 2538 | * Determine whether and which event to report to ptracer. When |
| 2539 | * called from kernel_thread or CLONE_UNTRACED is explicitly |
| 2540 | * requested, no event is reported; otherwise, report if the event |
| 2541 | * for the type of forking is enabled. |
Roland McGrath | 09a0539 | 2008-07-25 19:45:47 -0700 | [diff] [blame] | 2542 | */ |
Al Viro | e80d666 | 2012-10-22 23:10:08 -0400 | [diff] [blame] | 2543 | if (!(clone_flags & CLONE_UNTRACED)) { |
Tejun Heo | 4b9d33e | 2011-06-17 16:50:38 +0200 | [diff] [blame] | 2544 | if (clone_flags & CLONE_VFORK) |
| 2545 | trace = PTRACE_EVENT_VFORK; |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2546 | else if (args->exit_signal != SIGCHLD) |
Tejun Heo | 4b9d33e | 2011-06-17 16:50:38 +0200 | [diff] [blame] | 2547 | trace = PTRACE_EVENT_CLONE; |
| 2548 | else |
| 2549 | trace = PTRACE_EVENT_FORK; |
| 2550 | |
| 2551 | if (likely(!ptrace_event_enabled(current, trace))) |
| 2552 | trace = 0; |
| 2553 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2554 | |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2555 | p = copy_process(NULL, trace, NUMA_NO_NODE, args); |
Emese Revfy | 38addce | 2016-06-20 20:41:19 +0200 | [diff] [blame] | 2556 | add_latent_entropy(); |
Marcos Paulo de Souza | 9f5325a | 2018-02-06 15:39:30 -0800 | [diff] [blame] | 2557 | |
| 2558 | if (IS_ERR(p)) |
| 2559 | return PTR_ERR(p); |
| 2560 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2561 | /* |
| 2562 | * Do this prior waking up the new thread - the thread pointer |
| 2563 | * might get invalid after that point, if the thread exits quickly. |
| 2564 | */ |
Marcos Paulo de Souza | 9f5325a | 2018-02-06 15:39:30 -0800 | [diff] [blame] | 2565 | trace_sched_process_fork(current, p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2566 | |
Marcos Paulo de Souza | 9f5325a | 2018-02-06 15:39:30 -0800 | [diff] [blame] | 2567 | pid = get_task_pid(p, PIDTYPE_PID); |
| 2568 | nr = pid_vnr(pid); |
Mathieu Desnoyers | 0a16b60 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 2569 | |
Marcos Paulo de Souza | 9f5325a | 2018-02-06 15:39:30 -0800 | [diff] [blame] | 2570 | if (clone_flags & CLONE_PARENT_SETTID) |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2571 | put_user(nr, args->parent_tid); |
Pavel Emelyanov | 30e49c2 | 2007-10-18 23:40:10 -0700 | [diff] [blame] | 2572 | |
Marcos Paulo de Souza | 9f5325a | 2018-02-06 15:39:30 -0800 | [diff] [blame] | 2573 | if (clone_flags & CLONE_VFORK) { |
| 2574 | p->vfork_done = &vfork; |
| 2575 | init_completion(&vfork); |
| 2576 | get_task_struct(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2577 | } |
Marcos Paulo de Souza | 9f5325a | 2018-02-06 15:39:30 -0800 | [diff] [blame] | 2578 | |
| 2579 | wake_up_new_task(p); |
| 2580 | |
| 2581 | /* forking complete and child started to run, tell ptracer */ |
| 2582 | if (unlikely(trace)) |
| 2583 | ptrace_event_pid(trace, pid); |
| 2584 | |
| 2585 | if (clone_flags & CLONE_VFORK) { |
| 2586 | if (!wait_for_vfork_done(p, &vfork)) |
| 2587 | ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); |
| 2588 | } |
| 2589 | |
| 2590 | put_pid(pid); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 2591 | return nr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2592 | } |
| 2593 | |
Al Viro | 2aa3a7f | 2012-09-21 19:55:31 -0400 | [diff] [blame] | 2594 | /* |
| 2595 | * Create a kernel thread. |
| 2596 | */ |
| 2597 | pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) |
| 2598 | { |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2599 | struct kernel_clone_args args = { |
Christian Brauner | 3f2c788 | 2020-05-07 12:32:14 +0200 | [diff] [blame] | 2600 | .flags = ((lower_32_bits(flags) | CLONE_VM | |
| 2601 | CLONE_UNTRACED) & ~CSIGNAL), |
| 2602 | .exit_signal = (lower_32_bits(flags) & CSIGNAL), |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2603 | .stack = (unsigned long)fn, |
| 2604 | .stack_size = (unsigned long)arg, |
| 2605 | }; |
| 2606 | |
Christian Brauner | cad6967 | 2020-08-19 12:46:45 +0200 | [diff] [blame] | 2607 | return kernel_clone(&args); |
Al Viro | 2aa3a7f | 2012-09-21 19:55:31 -0400 | [diff] [blame] | 2608 | } |
Al Viro | 2aa3a7f | 2012-09-21 19:55:31 -0400 | [diff] [blame] | 2609 | |
Al Viro | d212504 | 2012-10-23 13:17:59 -0400 | [diff] [blame] | 2610 | #ifdef __ARCH_WANT_SYS_FORK |
| 2611 | SYSCALL_DEFINE0(fork) |
| 2612 | { |
| 2613 | #ifdef CONFIG_MMU |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2614 | struct kernel_clone_args args = { |
| 2615 | .exit_signal = SIGCHLD, |
| 2616 | }; |
| 2617 | |
Christian Brauner | cad6967 | 2020-08-19 12:46:45 +0200 | [diff] [blame] | 2618 | return kernel_clone(&args); |
Al Viro | d212504 | 2012-10-23 13:17:59 -0400 | [diff] [blame] | 2619 | #else |
| 2620 | /* can not support in nommu mode */ |
Daeseok Youn | 5d59e18 | 2014-01-23 15:55:47 -0800 | [diff] [blame] | 2621 | return -EINVAL; |
Al Viro | d212504 | 2012-10-23 13:17:59 -0400 | [diff] [blame] | 2622 | #endif |
| 2623 | } |
| 2624 | #endif |
| 2625 | |
| 2626 | #ifdef __ARCH_WANT_SYS_VFORK |
| 2627 | SYSCALL_DEFINE0(vfork) |
| 2628 | { |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2629 | struct kernel_clone_args args = { |
| 2630 | .flags = CLONE_VFORK | CLONE_VM, |
| 2631 | .exit_signal = SIGCHLD, |
| 2632 | }; |
| 2633 | |
Christian Brauner | cad6967 | 2020-08-19 12:46:45 +0200 | [diff] [blame] | 2634 | return kernel_clone(&args); |
Al Viro | d212504 | 2012-10-23 13:17:59 -0400 | [diff] [blame] | 2635 | } |
| 2636 | #endif |
| 2637 | |
| 2638 | #ifdef __ARCH_WANT_SYS_CLONE |
| 2639 | #ifdef CONFIG_CLONE_BACKWARDS |
| 2640 | SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, |
| 2641 | int __user *, parent_tidptr, |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 2642 | unsigned long, tls, |
Al Viro | d212504 | 2012-10-23 13:17:59 -0400 | [diff] [blame] | 2643 | int __user *, child_tidptr) |
| 2644 | #elif defined(CONFIG_CLONE_BACKWARDS2) |
| 2645 | SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, |
| 2646 | int __user *, parent_tidptr, |
| 2647 | int __user *, child_tidptr, |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 2648 | unsigned long, tls) |
Michal Simek | dfa9771 | 2013-08-13 16:00:53 -0700 | [diff] [blame] | 2649 | #elif defined(CONFIG_CLONE_BACKWARDS3) |
| 2650 | SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, |
| 2651 | int, stack_size, |
| 2652 | int __user *, parent_tidptr, |
| 2653 | int __user *, child_tidptr, |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 2654 | unsigned long, tls) |
Al Viro | d212504 | 2012-10-23 13:17:59 -0400 | [diff] [blame] | 2655 | #else |
| 2656 | SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, |
| 2657 | int __user *, parent_tidptr, |
| 2658 | int __user *, child_tidptr, |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 2659 | unsigned long, tls) |
Al Viro | d212504 | 2012-10-23 13:17:59 -0400 | [diff] [blame] | 2660 | #endif |
| 2661 | { |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2662 | struct kernel_clone_args args = { |
Christian Brauner | 3f2c788 | 2020-05-07 12:32:14 +0200 | [diff] [blame] | 2663 | .flags = (lower_32_bits(clone_flags) & ~CSIGNAL), |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2664 | .pidfd = parent_tidptr, |
| 2665 | .child_tid = child_tidptr, |
| 2666 | .parent_tid = parent_tidptr, |
Christian Brauner | 3f2c788 | 2020-05-07 12:32:14 +0200 | [diff] [blame] | 2667 | .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL), |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2668 | .stack = newsp, |
| 2669 | .tls = tls, |
| 2670 | }; |
| 2671 | |
Christian Brauner | cad6967 | 2020-08-19 12:46:45 +0200 | [diff] [blame] | 2672 | return kernel_clone(&args); |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2673 | } |
Christian Brauner | d68dbb0 | 2019-06-21 01:26:35 +0200 | [diff] [blame] | 2674 | #endif |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2675 | |
Christian Brauner | d68dbb0 | 2019-06-21 01:26:35 +0200 | [diff] [blame] | 2676 | #ifdef __ARCH_WANT_SYS_CLONE3 |
Amanieu d'Antras | dd499f7 | 2020-01-02 18:24:13 +0100 | [diff] [blame] | 2677 | |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2678 | noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs, |
| 2679 | struct clone_args __user *uargs, |
Aleksa Sarai | f14c234 | 2019-10-01 11:10:53 +1000 | [diff] [blame] | 2680 | size_t usize) |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2681 | { |
Aleksa Sarai | f14c234 | 2019-10-01 11:10:53 +1000 | [diff] [blame] | 2682 | int err; |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2683 | struct clone_args args; |
Adrian Reber | 49cb2fc | 2019-11-15 13:36:20 +0100 | [diff] [blame] | 2684 | pid_t *kset_tid = kargs->set_tid; |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2685 | |
Eugene Syromiatnikov | a966dcf | 2020-04-12 22:26:58 +0200 | [diff] [blame] | 2686 | BUILD_BUG_ON(offsetofend(struct clone_args, tls) != |
| 2687 | CLONE_ARGS_SIZE_VER0); |
| 2688 | BUILD_BUG_ON(offsetofend(struct clone_args, set_tid_size) != |
| 2689 | CLONE_ARGS_SIZE_VER1); |
| 2690 | BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) != |
| 2691 | CLONE_ARGS_SIZE_VER2); |
| 2692 | BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2); |
| 2693 | |
Aleksa Sarai | f14c234 | 2019-10-01 11:10:53 +1000 | [diff] [blame] | 2694 | if (unlikely(usize > PAGE_SIZE)) |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2695 | return -E2BIG; |
Aleksa Sarai | f14c234 | 2019-10-01 11:10:53 +1000 | [diff] [blame] | 2696 | if (unlikely(usize < CLONE_ARGS_SIZE_VER0)) |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2697 | return -EINVAL; |
| 2698 | |
Aleksa Sarai | f14c234 | 2019-10-01 11:10:53 +1000 | [diff] [blame] | 2699 | err = copy_struct_from_user(&args, sizeof(args), uargs, usize); |
| 2700 | if (err) |
| 2701 | return err; |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2702 | |
Adrian Reber | 49cb2fc | 2019-11-15 13:36:20 +0100 | [diff] [blame] | 2703 | if (unlikely(args.set_tid_size > MAX_PID_NS_LEVEL)) |
| 2704 | return -EINVAL; |
| 2705 | |
| 2706 | if (unlikely(!args.set_tid && args.set_tid_size > 0)) |
| 2707 | return -EINVAL; |
| 2708 | |
| 2709 | if (unlikely(args.set_tid && args.set_tid_size == 0)) |
| 2710 | return -EINVAL; |
| 2711 | |
Eugene Syromiatnikov | a0eb9ab | 2019-09-11 18:45:40 +0100 | [diff] [blame] | 2712 | /* |
| 2713 | * Verify that higher 32bits of exit_signal are unset and that |
| 2714 | * it is a valid signal |
| 2715 | */ |
| 2716 | if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) || |
| 2717 | !valid_signal(args.exit_signal))) |
| 2718 | return -EINVAL; |
| 2719 | |
Eugene Syromiatnikov | 6217387 | 2020-04-12 22:31:23 +0200 | [diff] [blame] | 2720 | if ((args.flags & CLONE_INTO_CGROUP) && |
| 2721 | (args.cgroup > INT_MAX || usize < CLONE_ARGS_SIZE_VER2)) |
Christian Brauner | ef2c41c | 2020-02-05 14:26:22 +0100 | [diff] [blame] | 2722 | return -EINVAL; |
| 2723 | |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2724 | *kargs = (struct kernel_clone_args){ |
| 2725 | .flags = args.flags, |
| 2726 | .pidfd = u64_to_user_ptr(args.pidfd), |
| 2727 | .child_tid = u64_to_user_ptr(args.child_tid), |
| 2728 | .parent_tid = u64_to_user_ptr(args.parent_tid), |
| 2729 | .exit_signal = args.exit_signal, |
| 2730 | .stack = args.stack, |
| 2731 | .stack_size = args.stack_size, |
| 2732 | .tls = args.tls, |
Adrian Reber | 49cb2fc | 2019-11-15 13:36:20 +0100 | [diff] [blame] | 2733 | .set_tid_size = args.set_tid_size, |
Christian Brauner | ef2c41c | 2020-02-05 14:26:22 +0100 | [diff] [blame] | 2734 | .cgroup = args.cgroup, |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2735 | }; |
| 2736 | |
Adrian Reber | 49cb2fc | 2019-11-15 13:36:20 +0100 | [diff] [blame] | 2737 | if (args.set_tid && |
| 2738 | copy_from_user(kset_tid, u64_to_user_ptr(args.set_tid), |
| 2739 | (kargs->set_tid_size * sizeof(pid_t)))) |
| 2740 | return -EFAULT; |
| 2741 | |
| 2742 | kargs->set_tid = kset_tid; |
| 2743 | |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2744 | return 0; |
| 2745 | } |
| 2746 | |
Christian Brauner | fa729c4 | 2019-10-31 12:36:08 +0100 | [diff] [blame] | 2747 | /** |
| 2748 | * clone3_stack_valid - check and prepare stack |
| 2749 | * @kargs: kernel clone args |
| 2750 | * |
| 2751 | * Verify that the stack arguments userspace gave us are sane. |
| 2752 | * In addition, set the stack direction for userspace since it's easy for us to |
| 2753 | * determine. |
| 2754 | */ |
| 2755 | static inline bool clone3_stack_valid(struct kernel_clone_args *kargs) |
| 2756 | { |
| 2757 | if (kargs->stack == 0) { |
| 2758 | if (kargs->stack_size > 0) |
| 2759 | return false; |
| 2760 | } else { |
| 2761 | if (kargs->stack_size == 0) |
| 2762 | return false; |
| 2763 | |
| 2764 | if (!access_ok((void __user *)kargs->stack, kargs->stack_size)) |
| 2765 | return false; |
| 2766 | |
| 2767 | #if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_IA64) |
| 2768 | kargs->stack += kargs->stack_size; |
| 2769 | #endif |
| 2770 | } |
| 2771 | |
| 2772 | return true; |
| 2773 | } |
| 2774 | |
| 2775 | static bool clone3_args_valid(struct kernel_clone_args *kargs) |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2776 | { |
Christian Brauner | b612e5d | 2019-10-14 12:45:37 +0200 | [diff] [blame] | 2777 | /* Verify that no unknown flags are passed along. */ |
Christian Brauner | ef2c41c | 2020-02-05 14:26:22 +0100 | [diff] [blame] | 2778 | if (kargs->flags & |
| 2779 | ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND | CLONE_INTO_CGROUP)) |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2780 | return false; |
| 2781 | |
| 2782 | /* |
Xiaofeng Cao | a8ca6b1 | 2021-05-06 18:04:28 -0700 | [diff] [blame] | 2783 | * - make the CLONE_DETACHED bit reusable for clone3 |
| 2784 | * - make the CSIGNAL bits reusable for clone3 |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2785 | */ |
| 2786 | if (kargs->flags & (CLONE_DETACHED | CSIGNAL)) |
| 2787 | return false; |
| 2788 | |
Christian Brauner | b612e5d | 2019-10-14 12:45:37 +0200 | [diff] [blame] | 2789 | if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) == |
| 2790 | (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) |
| 2791 | return false; |
| 2792 | |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2793 | if ((kargs->flags & (CLONE_THREAD | CLONE_PARENT)) && |
| 2794 | kargs->exit_signal) |
| 2795 | return false; |
| 2796 | |
Christian Brauner | fa729c4 | 2019-10-31 12:36:08 +0100 | [diff] [blame] | 2797 | if (!clone3_stack_valid(kargs)) |
| 2798 | return false; |
| 2799 | |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2800 | return true; |
| 2801 | } |
| 2802 | |
Christian Brauner | 501bd01 | 2019-09-27 17:28:42 +0200 | [diff] [blame] | 2803 | /** |
| 2804 | * clone3 - create a new process with specific properties |
| 2805 | * @uargs: argument structure |
| 2806 | * @size: size of @uargs |
| 2807 | * |
| 2808 | * clone3() is the extensible successor to clone()/clone2(). |
| 2809 | * It takes a struct as argument that is versioned by its size. |
| 2810 | * |
| 2811 | * Return: On success, a positive PID for the child process. |
| 2812 | * On error, a negative errno number. |
| 2813 | */ |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2814 | SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size) |
| 2815 | { |
| 2816 | int err; |
| 2817 | |
| 2818 | struct kernel_clone_args kargs; |
Adrian Reber | 49cb2fc | 2019-11-15 13:36:20 +0100 | [diff] [blame] | 2819 | pid_t set_tid[MAX_PID_NS_LEVEL]; |
| 2820 | |
| 2821 | kargs.set_tid = set_tid; |
Christian Brauner | 7f192e3 | 2019-05-25 11:36:41 +0200 | [diff] [blame] | 2822 | |
| 2823 | err = copy_clone_args_from_user(&kargs, uargs, size); |
| 2824 | if (err) |
| 2825 | return err; |
| 2826 | |
| 2827 | if (!clone3_args_valid(&kargs)) |
| 2828 | return -EINVAL; |
| 2829 | |
Christian Brauner | cad6967 | 2020-08-19 12:46:45 +0200 | [diff] [blame] | 2830 | return kernel_clone(&kargs); |
Al Viro | d212504 | 2012-10-23 13:17:59 -0400 | [diff] [blame] | 2831 | } |
| 2832 | #endif |
| 2833 | |
Oleg Nesterov | 0f1b92c | 2017-01-30 18:06:11 +0300 | [diff] [blame] | 2834 | void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data) |
| 2835 | { |
| 2836 | struct task_struct *leader, *parent, *child; |
| 2837 | int res; |
| 2838 | |
| 2839 | read_lock(&tasklist_lock); |
| 2840 | leader = top = top->group_leader; |
| 2841 | down: |
| 2842 | for_each_thread(leader, parent) { |
| 2843 | list_for_each_entry(child, &parent->children, sibling) { |
| 2844 | res = visitor(child, data); |
| 2845 | if (res) { |
| 2846 | if (res < 0) |
| 2847 | goto out; |
| 2848 | leader = child; |
| 2849 | goto down; |
| 2850 | } |
| 2851 | up: |
| 2852 | ; |
| 2853 | } |
| 2854 | } |
| 2855 | |
| 2856 | if (leader != top) { |
| 2857 | child = leader; |
| 2858 | parent = child->real_parent; |
| 2859 | leader = parent->group_leader; |
| 2860 | goto up; |
| 2861 | } |
| 2862 | out: |
| 2863 | read_unlock(&tasklist_lock); |
| 2864 | } |
| 2865 | |
Ravikiran G Thirumalai | 5fd63b3 | 2006-01-11 22:46:15 +0100 | [diff] [blame] | 2866 | #ifndef ARCH_MIN_MMSTRUCT_ALIGN |
| 2867 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 |
| 2868 | #endif |
| 2869 | |
Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 2870 | static void sighand_ctor(void *data) |
Oleg Nesterov | aa1757f | 2006-03-28 16:11:12 -0800 | [diff] [blame] | 2871 | { |
| 2872 | struct sighand_struct *sighand = data; |
| 2873 | |
Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 2874 | spin_lock_init(&sighand->siglock); |
Davide Libenzi | b8fceee | 2007-09-20 12:40:16 -0700 | [diff] [blame] | 2875 | init_waitqueue_head(&sighand->signalfd_wqh); |
Oleg Nesterov | aa1757f | 2006-03-28 16:11:12 -0800 | [diff] [blame] | 2876 | } |
| 2877 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2878 | void __init proc_caches_init(void) |
| 2879 | { |
Rik van Riel | c1a2f7f | 2018-07-16 15:03:31 -0400 | [diff] [blame] | 2880 | unsigned int mm_size; |
| 2881 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2882 | sighand_cachep = kmem_cache_create("sighand_cache", |
| 2883 | sizeof(struct sighand_struct), 0, |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 2884 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 2885 | SLAB_ACCOUNT, sighand_ctor); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2886 | signal_cachep = kmem_cache_create("signal_cache", |
| 2887 | sizeof(struct signal_struct), 0, |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 2888 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 2889 | NULL); |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 2890 | files_cachep = kmem_cache_create("files_cache", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2891 | sizeof(struct files_struct), 0, |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 2892 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 2893 | NULL); |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 2894 | fs_cachep = kmem_cache_create("fs_cache", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2895 | sizeof(struct fs_struct), 0, |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 2896 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 2897 | NULL); |
Rik van Riel | c1a2f7f | 2018-07-16 15:03:31 -0400 | [diff] [blame] | 2898 | |
Linus Torvalds | 6345d24 | 2011-05-29 11:32:28 -0700 | [diff] [blame] | 2899 | /* |
Rik van Riel | c1a2f7f | 2018-07-16 15:03:31 -0400 | [diff] [blame] | 2900 | * The mm_cpumask is located at the end of mm_struct, and is |
| 2901 | * dynamically sized based on the maximum CPU number this system |
| 2902 | * can have, taking hotplug into account (nr_cpu_ids). |
Linus Torvalds | 6345d24 | 2011-05-29 11:32:28 -0700 | [diff] [blame] | 2903 | */ |
Rik van Riel | c1a2f7f | 2018-07-16 15:03:31 -0400 | [diff] [blame] | 2904 | mm_size = sizeof(struct mm_struct) + cpumask_size(); |
| 2905 | |
David Windsor | 07dcd7f | 2017-08-15 16:45:00 -0700 | [diff] [blame] | 2906 | mm_cachep = kmem_cache_create_usercopy("mm_struct", |
Rik van Riel | c1a2f7f | 2018-07-16 15:03:31 -0400 | [diff] [blame] | 2907 | mm_size, ARCH_MIN_MMSTRUCT_ALIGN, |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 2908 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, |
David Windsor | 07dcd7f | 2017-08-15 16:45:00 -0700 | [diff] [blame] | 2909 | offsetof(struct mm_struct, saved_auxv), |
| 2910 | sizeof_field(struct mm_struct, saved_auxv), |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 2911 | NULL); |
| 2912 | vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 2913 | mmap_init(); |
Al Viro | 6657719 | 2011-06-28 15:41:10 -0400 | [diff] [blame] | 2914 | nsproxy_cache_init(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2915 | } |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2916 | |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2917 | /* |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2918 | * Check constraints on flags passed to the unshare system call. |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2919 | */ |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2920 | static int check_unshare_flags(unsigned long unshare_flags) |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2921 | { |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2922 | if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| |
| 2923 | CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| |
Eric W. Biederman | 50804fe | 2010-03-02 15:41:50 -0800 | [diff] [blame] | 2924 | CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| |
Andrei Vagin | 769071a | 2019-11-12 01:26:52 +0000 | [diff] [blame] | 2925 | CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP| |
| 2926 | CLONE_NEWTIME)) |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2927 | return -EINVAL; |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2928 | /* |
Eric W. Biederman | 12c641a | 2015-08-10 17:35:07 -0500 | [diff] [blame] | 2929 | * Not implemented, but pretend it works if there is nothing |
| 2930 | * to unshare. Note that unsharing the address space or the |
| 2931 | * signal handlers also need to unshare the signal queues (aka |
| 2932 | * CLONE_THREAD). |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2933 | */ |
| 2934 | if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { |
Eric W. Biederman | 12c641a | 2015-08-10 17:35:07 -0500 | [diff] [blame] | 2935 | if (!thread_group_empty(current)) |
| 2936 | return -EINVAL; |
| 2937 | } |
| 2938 | if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { |
Elena Reshetova | d036bda | 2019-01-18 14:27:26 +0200 | [diff] [blame] | 2939 | if (refcount_read(¤t->sighand->count) > 1) |
Eric W. Biederman | 12c641a | 2015-08-10 17:35:07 -0500 | [diff] [blame] | 2940 | return -EINVAL; |
| 2941 | } |
| 2942 | if (unshare_flags & CLONE_VM) { |
| 2943 | if (!current_is_single_threaded()) |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2944 | return -EINVAL; |
| 2945 | } |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2946 | |
| 2947 | return 0; |
| 2948 | } |
| 2949 | |
| 2950 | /* |
JANAK DESAI | 99d1419 | 2006-02-07 12:58:59 -0800 | [diff] [blame] | 2951 | * Unshare the filesystem structure if it is being shared |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2952 | */ |
| 2953 | static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) |
| 2954 | { |
| 2955 | struct fs_struct *fs = current->fs; |
| 2956 | |
Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 2957 | if (!(unshare_flags & CLONE_FS) || !fs) |
| 2958 | return 0; |
| 2959 | |
| 2960 | /* don't need lock here; in the worst case we'll do useless copy */ |
| 2961 | if (fs->users == 1) |
| 2962 | return 0; |
| 2963 | |
| 2964 | *new_fsp = copy_fs_struct(fs); |
| 2965 | if (!*new_fsp) |
| 2966 | return -ENOMEM; |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2967 | |
| 2968 | return 0; |
| 2969 | } |
| 2970 | |
| 2971 | /* |
JANAK DESAI | a016f33 | 2006-02-07 12:59:02 -0800 | [diff] [blame] | 2972 | * Unshare file descriptor table if it is being shared |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2973 | */ |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 2974 | int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, |
| 2975 | struct files_struct **new_fdp) |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2976 | { |
| 2977 | struct files_struct *fd = current->files; |
JANAK DESAI | a016f33 | 2006-02-07 12:59:02 -0800 | [diff] [blame] | 2978 | int error = 0; |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2979 | |
| 2980 | if ((unshare_flags & CLONE_FILES) && |
JANAK DESAI | a016f33 | 2006-02-07 12:59:02 -0800 | [diff] [blame] | 2981 | (fd && atomic_read(&fd->count) > 1)) { |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 2982 | *new_fdp = dup_fd(fd, max_fds, &error); |
JANAK DESAI | a016f33 | 2006-02-07 12:59:02 -0800 | [diff] [blame] | 2983 | if (!*new_fdp) |
| 2984 | return error; |
| 2985 | } |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2986 | |
| 2987 | return 0; |
| 2988 | } |
| 2989 | |
| 2990 | /* |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2991 | * unshare allows a process to 'unshare' part of the process |
| 2992 | * context which was originally shared using clone. copy_* |
Christian Brauner | cad6967 | 2020-08-19 12:46:45 +0200 | [diff] [blame] | 2993 | * functions used by kernel_clone() cannot be used here directly |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2994 | * because they modify an inactive task_struct that is being |
| 2995 | * constructed. Here we are modifying the current, active, |
| 2996 | * task_struct. |
| 2997 | */ |
Dominik Brodowski | 9b32105 | 2018-03-11 11:34:42 +0100 | [diff] [blame] | 2998 | int ksys_unshare(unsigned long unshare_flags) |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2999 | { |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 3000 | struct fs_struct *fs, *new_fs = NULL; |
Ran Xiaokai | ba1f70d | 2021-11-08 18:35:22 -0800 | [diff] [blame] | 3001 | struct files_struct *new_fd = NULL; |
Eric W. Biederman | b2e0d987 | 2012-07-26 05:15:35 -0700 | [diff] [blame] | 3002 | struct cred *new_cred = NULL; |
Pavel Emelyanov | cf7b708 | 2007-10-18 23:39:54 -0700 | [diff] [blame] | 3003 | struct nsproxy *new_nsproxy = NULL; |
Manfred Spraul | 9edff4a | 2008-04-29 01:00:57 -0700 | [diff] [blame] | 3004 | int do_sysvsem = 0; |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 3005 | int err; |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 3006 | |
Eric W. Biederman | 50804fe | 2010-03-02 15:41:50 -0800 | [diff] [blame] | 3007 | /* |
Eric W. Biederman | faf00da | 2015-08-10 18:25:44 -0500 | [diff] [blame] | 3008 | * If unsharing a user namespace must also unshare the thread group |
| 3009 | * and unshare the filesystem root and working directories. |
Eric W. Biederman | b2e0d987 | 2012-07-26 05:15:35 -0700 | [diff] [blame] | 3010 | */ |
| 3011 | if (unshare_flags & CLONE_NEWUSER) |
Eric W. Biederman | e66eded | 2013-03-13 11:51:49 -0700 | [diff] [blame] | 3012 | unshare_flags |= CLONE_THREAD | CLONE_FS; |
Eric W. Biederman | b2e0d987 | 2012-07-26 05:15:35 -0700 | [diff] [blame] | 3013 | /* |
Eric W. Biederman | 50804fe | 2010-03-02 15:41:50 -0800 | [diff] [blame] | 3014 | * If unsharing vm, must also unshare signal handlers. |
| 3015 | */ |
| 3016 | if (unshare_flags & CLONE_VM) |
| 3017 | unshare_flags |= CLONE_SIGHAND; |
Manfred Spraul | 6013f67 | 2008-04-29 01:00:59 -0700 | [diff] [blame] | 3018 | /* |
Eric W. Biederman | 12c641a | 2015-08-10 17:35:07 -0500 | [diff] [blame] | 3019 | * If unsharing a signal handlers, must also unshare the signal queues. |
| 3020 | */ |
| 3021 | if (unshare_flags & CLONE_SIGHAND) |
| 3022 | unshare_flags |= CLONE_THREAD; |
| 3023 | /* |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 3024 | * If unsharing namespace, must also unshare filesystem information. |
| 3025 | */ |
| 3026 | if (unshare_flags & CLONE_NEWNS) |
| 3027 | unshare_flags |= CLONE_FS; |
Eric W. Biederman | 50804fe | 2010-03-02 15:41:50 -0800 | [diff] [blame] | 3028 | |
| 3029 | err = check_unshare_flags(unshare_flags); |
| 3030 | if (err) |
| 3031 | goto bad_unshare_out; |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 3032 | /* |
Manfred Spraul | 6013f67 | 2008-04-29 01:00:59 -0700 | [diff] [blame] | 3033 | * CLONE_NEWIPC must also detach from the undolist: after switching |
| 3034 | * to a new ipc namespace, the semaphore arrays from the old |
| 3035 | * namespace are unreachable. |
| 3036 | */ |
| 3037 | if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) |
Manfred Spraul | 9edff4a | 2008-04-29 01:00:57 -0700 | [diff] [blame] | 3038 | do_sysvsem = 1; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 3039 | err = unshare_fs(unshare_flags, &new_fs); |
| 3040 | if (err) |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 3041 | goto bad_unshare_out; |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 3042 | err = unshare_fd(unshare_flags, NR_OPEN_MAX, &new_fd); |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 3043 | if (err) |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 3044 | goto bad_unshare_cleanup_fs; |
Eric W. Biederman | b2e0d987 | 2012-07-26 05:15:35 -0700 | [diff] [blame] | 3045 | err = unshare_userns(unshare_flags, &new_cred); |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 3046 | if (err) |
Manfred Spraul | 9edff4a | 2008-04-29 01:00:57 -0700 | [diff] [blame] | 3047 | goto bad_unshare_cleanup_fd; |
Eric W. Biederman | b2e0d987 | 2012-07-26 05:15:35 -0700 | [diff] [blame] | 3048 | err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, |
| 3049 | new_cred, new_fs); |
| 3050 | if (err) |
| 3051 | goto bad_unshare_cleanup_cred; |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 3052 | |
Alexey Gladkov | 905ae01 | 2021-04-22 14:27:09 +0200 | [diff] [blame] | 3053 | if (new_cred) { |
| 3054 | err = set_cred_ucounts(new_cred); |
| 3055 | if (err) |
| 3056 | goto bad_unshare_cleanup_cred; |
| 3057 | } |
| 3058 | |
Eric W. Biederman | b2e0d987 | 2012-07-26 05:15:35 -0700 | [diff] [blame] | 3059 | if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { |
Manfred Spraul | 9edff4a | 2008-04-29 01:00:57 -0700 | [diff] [blame] | 3060 | if (do_sysvsem) { |
| 3061 | /* |
| 3062 | * CLONE_SYSVSEM is equivalent to sys_exit(). |
| 3063 | */ |
| 3064 | exit_sem(current); |
| 3065 | } |
Jack Miller | ab602f7 | 2014-08-08 14:23:19 -0700 | [diff] [blame] | 3066 | if (unshare_flags & CLONE_NEWIPC) { |
| 3067 | /* Orphan segments in old ns (see sem above). */ |
| 3068 | exit_shm(current); |
| 3069 | shm_init_task(current); |
| 3070 | } |
Serge E. Hallyn | ab51601 | 2006-10-02 02:18:06 -0700 | [diff] [blame] | 3071 | |
Alan Cox | 6f977e6 | 2013-02-27 17:03:23 -0800 | [diff] [blame] | 3072 | if (new_nsproxy) |
Pavel Emelyanov | cf7b708 | 2007-10-18 23:39:54 -0700 | [diff] [blame] | 3073 | switch_task_namespaces(current, new_nsproxy); |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 3074 | |
Pavel Emelyanov | cf7b708 | 2007-10-18 23:39:54 -0700 | [diff] [blame] | 3075 | task_lock(current); |
| 3076 | |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 3077 | if (new_fs) { |
| 3078 | fs = current->fs; |
Nick Piggin | 2a4419b | 2010-08-18 04:37:33 +1000 | [diff] [blame] | 3079 | spin_lock(&fs->lock); |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 3080 | current->fs = new_fs; |
Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 3081 | if (--fs->users) |
| 3082 | new_fs = NULL; |
| 3083 | else |
| 3084 | new_fs = fs; |
Nick Piggin | 2a4419b | 2010-08-18 04:37:33 +1000 | [diff] [blame] | 3085 | spin_unlock(&fs->lock); |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 3086 | } |
| 3087 | |
Ran Xiaokai | ba1f70d | 2021-11-08 18:35:22 -0800 | [diff] [blame] | 3088 | if (new_fd) |
| 3089 | swap(current->files, new_fd); |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 3090 | |
| 3091 | task_unlock(current); |
Eric W. Biederman | b2e0d987 | 2012-07-26 05:15:35 -0700 | [diff] [blame] | 3092 | |
| 3093 | if (new_cred) { |
| 3094 | /* Install the new user namespace */ |
| 3095 | commit_creds(new_cred); |
| 3096 | new_cred = NULL; |
| 3097 | } |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 3098 | } |
| 3099 | |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 3100 | perf_event_namespaces(current); |
| 3101 | |
Eric W. Biederman | b2e0d987 | 2012-07-26 05:15:35 -0700 | [diff] [blame] | 3102 | bad_unshare_cleanup_cred: |
| 3103 | if (new_cred) |
| 3104 | put_cred(new_cred); |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 3105 | bad_unshare_cleanup_fd: |
| 3106 | if (new_fd) |
| 3107 | put_files_struct(new_fd); |
| 3108 | |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 3109 | bad_unshare_cleanup_fs: |
| 3110 | if (new_fs) |
Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 3111 | free_fs_struct(new_fs); |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 3112 | |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 3113 | bad_unshare_out: |
| 3114 | return err; |
| 3115 | } |
Al Viro | 3b12538 | 2008-04-22 05:31:30 -0400 | [diff] [blame] | 3116 | |
Dominik Brodowski | 9b32105 | 2018-03-11 11:34:42 +0100 | [diff] [blame] | 3117 | SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) |
| 3118 | { |
| 3119 | return ksys_unshare(unshare_flags); |
| 3120 | } |
| 3121 | |
Al Viro | 3b12538 | 2008-04-22 05:31:30 -0400 | [diff] [blame] | 3122 | /* |
| 3123 | * Helper to unshare the files of the current task. |
| 3124 | * We don't want to expose copy_files internals to |
| 3125 | * the exec layer of the kernel. |
| 3126 | */ |
| 3127 | |
Eric W. Biederman | 1f70260 | 2020-11-20 17:14:19 -0600 | [diff] [blame] | 3128 | int unshare_files(void) |
Al Viro | 3b12538 | 2008-04-22 05:31:30 -0400 | [diff] [blame] | 3129 | { |
| 3130 | struct task_struct *task = current; |
Eric W. Biederman | 1f70260 | 2020-11-20 17:14:19 -0600 | [diff] [blame] | 3131 | struct files_struct *old, *copy = NULL; |
Al Viro | 3b12538 | 2008-04-22 05:31:30 -0400 | [diff] [blame] | 3132 | int error; |
| 3133 | |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 3134 | error = unshare_fd(CLONE_FILES, NR_OPEN_MAX, ©); |
Eric W. Biederman | 1f70260 | 2020-11-20 17:14:19 -0600 | [diff] [blame] | 3135 | if (error || !copy) |
Al Viro | 3b12538 | 2008-04-22 05:31:30 -0400 | [diff] [blame] | 3136 | return error; |
Eric W. Biederman | 1f70260 | 2020-11-20 17:14:19 -0600 | [diff] [blame] | 3137 | |
| 3138 | old = task->files; |
Al Viro | 3b12538 | 2008-04-22 05:31:30 -0400 | [diff] [blame] | 3139 | task_lock(task); |
| 3140 | task->files = copy; |
| 3141 | task_unlock(task); |
Eric W. Biederman | 1f70260 | 2020-11-20 17:14:19 -0600 | [diff] [blame] | 3142 | put_files_struct(old); |
Al Viro | 3b12538 | 2008-04-22 05:31:30 -0400 | [diff] [blame] | 3143 | return 0; |
| 3144 | } |
Heinrich Schuchardt | 16db3d3 | 2015-04-16 12:47:50 -0700 | [diff] [blame] | 3145 | |
| 3146 | int sysctl_max_threads(struct ctl_table *table, int write, |
Tobias Klauser | b0daa2c | 2020-09-04 16:35:49 -0700 | [diff] [blame] | 3147 | void *buffer, size_t *lenp, loff_t *ppos) |
Heinrich Schuchardt | 16db3d3 | 2015-04-16 12:47:50 -0700 | [diff] [blame] | 3148 | { |
| 3149 | struct ctl_table t; |
| 3150 | int ret; |
| 3151 | int threads = max_threads; |
Michal Hocko | b0f53db | 2019-10-06 17:58:19 -0700 | [diff] [blame] | 3152 | int min = 1; |
Heinrich Schuchardt | 16db3d3 | 2015-04-16 12:47:50 -0700 | [diff] [blame] | 3153 | int max = MAX_THREADS; |
| 3154 | |
| 3155 | t = *table; |
| 3156 | t.data = &threads; |
| 3157 | t.extra1 = &min; |
| 3158 | t.extra2 = &max; |
| 3159 | |
| 3160 | ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); |
| 3161 | if (ret || !write) |
| 3162 | return ret; |
| 3163 | |
Michal Hocko | b0f53db | 2019-10-06 17:58:19 -0700 | [diff] [blame] | 3164 | max_threads = threads; |
Heinrich Schuchardt | 16db3d3 | 2015-04-16 12:47:50 -0700 | [diff] [blame] | 3165 | |
| 3166 | return 0; |
| 3167 | } |