Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Pid namespaces |
| 3 | * |
| 4 | * Authors: |
| 5 | * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. |
| 6 | * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM |
| 7 | * Many thanks to Oleg Nesterov for comments and help |
| 8 | * |
| 9 | */ |
| 10 | |
| 11 | #include <linux/pid.h> |
| 12 | #include <linux/pid_namespace.h> |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 13 | #include <linux/user_namespace.h> |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 14 | #include <linux/syscalls.h> |
Ingo Molnar | 5b825c3 | 2017-02-02 17:54:15 +0100 | [diff] [blame] | 15 | #include <linux/cred.h> |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 16 | #include <linux/err.h> |
Pavel Emelyanov | 0b6b030 | 2008-07-25 01:48:47 -0700 | [diff] [blame] | 17 | #include <linux/acct.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 18 | #include <linux/slab.h> |
David Howells | 0bb80f2 | 2013-04-12 01:50:06 +0100 | [diff] [blame] | 19 | #include <linux/proc_ns.h> |
Daniel Lezcano | cf3f892 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 20 | #include <linux/reboot.h> |
Eric W. Biederman | 523a6a9 | 2012-08-03 19:11:22 -0700 | [diff] [blame] | 21 | #include <linux/export.h> |
Ingo Molnar | 2993002 | 2017-02-08 18:51:36 +0100 | [diff] [blame] | 22 | #include <linux/sched/task.h> |
Ingo Molnar | f361bf4 | 2017-02-03 23:47:37 +0100 | [diff] [blame] | 23 | #include <linux/sched/signal.h> |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 24 | |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 25 | struct pid_cache { |
| 26 | int nr_ids; |
| 27 | char name[16]; |
| 28 | struct kmem_cache *cachep; |
| 29 | struct list_head list; |
| 30 | }; |
| 31 | |
| 32 | static LIST_HEAD(pid_caches_lh); |
| 33 | static DEFINE_MUTEX(pid_caches_mutex); |
| 34 | static struct kmem_cache *pid_ns_cachep; |
| 35 | |
| 36 | /* |
| 37 | * creates the kmem cache to allocate pids from. |
| 38 | * @nr_ids: the number of numerical ids this pid will have to carry |
| 39 | */ |
| 40 | |
| 41 | static struct kmem_cache *create_pid_cachep(int nr_ids) |
| 42 | { |
| 43 | struct pid_cache *pcache; |
| 44 | struct kmem_cache *cachep; |
| 45 | |
| 46 | mutex_lock(&pid_caches_mutex); |
| 47 | list_for_each_entry(pcache, &pid_caches_lh, list) |
| 48 | if (pcache->nr_ids == nr_ids) |
| 49 | goto out; |
| 50 | |
| 51 | pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL); |
| 52 | if (pcache == NULL) |
| 53 | goto err_alloc; |
| 54 | |
| 55 | snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids); |
| 56 | cachep = kmem_cache_create(pcache->name, |
| 57 | sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid), |
| 58 | 0, SLAB_HWCACHE_ALIGN, NULL); |
| 59 | if (cachep == NULL) |
| 60 | goto err_cachep; |
| 61 | |
| 62 | pcache->nr_ids = nr_ids; |
| 63 | pcache->cachep = cachep; |
| 64 | list_add(&pcache->list, &pid_caches_lh); |
| 65 | out: |
| 66 | mutex_unlock(&pid_caches_mutex); |
| 67 | return pcache->cachep; |
| 68 | |
| 69 | err_cachep: |
| 70 | kfree(pcache); |
| 71 | err_alloc: |
| 72 | mutex_unlock(&pid_caches_mutex); |
| 73 | return NULL; |
| 74 | } |
| 75 | |
Eric W. Biederman | 0a01f2c | 2012-08-01 10:33:47 -0700 | [diff] [blame] | 76 | static void proc_cleanup_work(struct work_struct *work) |
| 77 | { |
| 78 | struct pid_namespace *ns = container_of(work, struct pid_namespace, proc_work); |
| 79 | pid_ns_release_proc(ns); |
| 80 | } |
| 81 | |
Andrew Vagin | f230250 | 2012-10-25 13:38:07 -0700 | [diff] [blame] | 82 | /* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */ |
| 83 | #define MAX_PID_NS_LEVEL 32 |
| 84 | |
Eric W. Biederman | f333c70 | 2016-08-08 14:08:36 -0500 | [diff] [blame] | 85 | static struct ucounts *inc_pid_namespaces(struct user_namespace *ns) |
| 86 | { |
| 87 | return inc_ucount(ns, current_euid(), UCOUNT_PID_NAMESPACES); |
| 88 | } |
| 89 | |
| 90 | static void dec_pid_namespaces(struct ucounts *ucounts) |
| 91 | { |
| 92 | dec_ucount(ucounts, UCOUNT_PID_NAMESPACES); |
| 93 | } |
| 94 | |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 95 | static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns, |
| 96 | struct pid_namespace *parent_pid_ns) |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 97 | { |
| 98 | struct pid_namespace *ns; |
Alexey Dobriyan | ed469a6 | 2009-06-17 16:27:52 -0700 | [diff] [blame] | 99 | unsigned int level = parent_pid_ns->level + 1; |
Eric W. Biederman | f333c70 | 2016-08-08 14:08:36 -0500 | [diff] [blame] | 100 | struct ucounts *ucounts; |
Andrew Vagin | f230250 | 2012-10-25 13:38:07 -0700 | [diff] [blame] | 101 | int i; |
| 102 | int err; |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 103 | |
Eric W. Biederman | df75e77 | 2016-09-22 13:08:36 -0500 | [diff] [blame] | 104 | err = -ENOSPC; |
Eric W. Biederman | f333c70 | 2016-08-08 14:08:36 -0500 | [diff] [blame] | 105 | if (level > MAX_PID_NS_LEVEL) |
Andrew Vagin | f230250 | 2012-10-25 13:38:07 -0700 | [diff] [blame] | 106 | goto out; |
Eric W. Biederman | f333c70 | 2016-08-08 14:08:36 -0500 | [diff] [blame] | 107 | ucounts = inc_pid_namespaces(user_ns); |
| 108 | if (!ucounts) |
| 109 | goto out; |
Andrew Vagin | f230250 | 2012-10-25 13:38:07 -0700 | [diff] [blame] | 110 | |
| 111 | err = -ENOMEM; |
Pavel Emelyanov | 84406c1 | 2008-07-25 01:48:42 -0700 | [diff] [blame] | 112 | ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 113 | if (ns == NULL) |
Eric W. Biederman | f333c70 | 2016-08-08 14:08:36 -0500 | [diff] [blame] | 114 | goto out_dec; |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 115 | |
| 116 | ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); |
| 117 | if (!ns->pidmap[0].page) |
| 118 | goto out_free; |
| 119 | |
| 120 | ns->pid_cachep = create_pid_cachep(level + 1); |
| 121 | if (ns->pid_cachep == NULL) |
| 122 | goto out_free_map; |
| 123 | |
Al Viro | 6344c43 | 2014-11-01 00:45:45 -0400 | [diff] [blame] | 124 | err = ns_alloc_inum(&ns->ns); |
Eric W. Biederman | 98f842e | 2011-06-15 10:21:48 -0700 | [diff] [blame] | 125 | if (err) |
| 126 | goto out_free_map; |
Al Viro | 33c4294 | 2014-11-01 02:32:53 -0400 | [diff] [blame] | 127 | ns->ns.ops = &pidns_operations; |
Eric W. Biederman | 98f842e | 2011-06-15 10:21:48 -0700 | [diff] [blame] | 128 | |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 129 | kref_init(&ns->kref); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 130 | ns->level = level; |
Alexey Dobriyan | ed469a6 | 2009-06-17 16:27:52 -0700 | [diff] [blame] | 131 | ns->parent = get_pid_ns(parent_pid_ns); |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 132 | ns->user_ns = get_user_ns(user_ns); |
Eric W. Biederman | f333c70 | 2016-08-08 14:08:36 -0500 | [diff] [blame] | 133 | ns->ucounts = ucounts; |
Eric W. Biederman | c876ad76 | 2012-12-21 20:27:12 -0800 | [diff] [blame] | 134 | ns->nr_hashed = PIDNS_HASH_ADDING; |
Eric W. Biederman | 0a01f2c | 2012-08-01 10:33:47 -0700 | [diff] [blame] | 135 | INIT_WORK(&ns->proc_work, proc_cleanup_work); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 136 | |
| 137 | set_bit(0, ns->pidmap[0].page); |
| 138 | atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1); |
| 139 | |
Pavel Emelyanov | 84406c1 | 2008-07-25 01:48:42 -0700 | [diff] [blame] | 140 | for (i = 1; i < PIDMAP_ENTRIES; i++) |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 141 | atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 142 | |
| 143 | return ns; |
| 144 | |
| 145 | out_free_map: |
| 146 | kfree(ns->pidmap[0].page); |
| 147 | out_free: |
| 148 | kmem_cache_free(pid_ns_cachep, ns); |
Eric W. Biederman | f333c70 | 2016-08-08 14:08:36 -0500 | [diff] [blame] | 149 | out_dec: |
| 150 | dec_pid_namespaces(ucounts); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 151 | out: |
Eric W. Biederman | 4308eeb | 2011-03-23 16:43:13 -0700 | [diff] [blame] | 152 | return ERR_PTR(err); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 153 | } |
| 154 | |
Al Viro | 1adfcb0 | 2013-10-03 13:28:06 -0400 | [diff] [blame] | 155 | static void delayed_free_pidns(struct rcu_head *p) |
| 156 | { |
Andrei Vagin | add7c65 | 2017-01-04 19:28:14 -0800 | [diff] [blame] | 157 | struct pid_namespace *ns = container_of(p, struct pid_namespace, rcu); |
| 158 | |
| 159 | dec_pid_namespaces(ns->ucounts); |
| 160 | put_user_ns(ns->user_ns); |
| 161 | |
| 162 | kmem_cache_free(pid_ns_cachep, ns); |
Al Viro | 1adfcb0 | 2013-10-03 13:28:06 -0400 | [diff] [blame] | 163 | } |
| 164 | |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 165 | static void destroy_pid_namespace(struct pid_namespace *ns) |
| 166 | { |
| 167 | int i; |
| 168 | |
Al Viro | 6344c43 | 2014-11-01 00:45:45 -0400 | [diff] [blame] | 169 | ns_free_inum(&ns->ns); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 170 | for (i = 0; i < PIDMAP_ENTRIES; i++) |
| 171 | kfree(ns->pidmap[i].page); |
Al Viro | 1adfcb0 | 2013-10-03 13:28:06 -0400 | [diff] [blame] | 172 | call_rcu(&ns->rcu, delayed_free_pidns); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 173 | } |
| 174 | |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 175 | struct pid_namespace *copy_pid_ns(unsigned long flags, |
| 176 | struct user_namespace *user_ns, struct pid_namespace *old_ns) |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 177 | { |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 178 | if (!(flags & CLONE_NEWPID)) |
Alexey Dobriyan | dca4a97 | 2009-06-17 16:27:53 -0700 | [diff] [blame] | 179 | return get_pid_ns(old_ns); |
Eric W. Biederman | 225778d | 2012-08-02 08:35:35 -0700 | [diff] [blame] | 180 | if (task_active_pid_ns(current) != old_ns) |
| 181 | return ERR_PTR(-EINVAL); |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 182 | return create_pid_namespace(user_ns, old_ns); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 183 | } |
| 184 | |
Cyrill Gorcunov | bbc2e3e | 2012-10-19 13:56:53 -0700 | [diff] [blame] | 185 | static void free_pid_ns(struct kref *kref) |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 186 | { |
Cyrill Gorcunov | bbc2e3e | 2012-10-19 13:56:53 -0700 | [diff] [blame] | 187 | struct pid_namespace *ns; |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 188 | |
| 189 | ns = container_of(kref, struct pid_namespace, kref); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 190 | destroy_pid_namespace(ns); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 191 | } |
Cyrill Gorcunov | bbc2e3e | 2012-10-19 13:56:53 -0700 | [diff] [blame] | 192 | |
| 193 | void put_pid_ns(struct pid_namespace *ns) |
| 194 | { |
| 195 | struct pid_namespace *parent; |
| 196 | |
| 197 | while (ns != &init_pid_ns) { |
| 198 | parent = ns->parent; |
| 199 | if (!kref_put(&ns->kref, free_pid_ns)) |
| 200 | break; |
| 201 | ns = parent; |
| 202 | } |
| 203 | } |
| 204 | EXPORT_SYMBOL_GPL(put_pid_ns); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 205 | |
| 206 | void zap_pid_ns_processes(struct pid_namespace *pid_ns) |
| 207 | { |
| 208 | int nr; |
| 209 | int rc; |
Eric W. Biederman | 00c10bc | 2012-05-31 16:26:40 -0700 | [diff] [blame] | 210 | struct task_struct *task, *me = current; |
Eric W. Biederman | 751c644b | 2013-03-26 02:27:11 -0700 | [diff] [blame] | 211 | int init_pids = thread_group_leader(me) ? 1 : 2; |
Eric W. Biederman | 00c10bc | 2012-05-31 16:26:40 -0700 | [diff] [blame] | 212 | |
Eric W. Biederman | c876ad76 | 2012-12-21 20:27:12 -0800 | [diff] [blame] | 213 | /* Don't allow any more processes into the pid namespace */ |
| 214 | disable_pid_allocation(pid_ns); |
| 215 | |
Oleg Nesterov | a53b831 | 2014-12-10 15:55:28 -0800 | [diff] [blame] | 216 | /* |
| 217 | * Ignore SIGCHLD causing any terminated children to autoreap. |
| 218 | * This speeds up the namespace shutdown, plus see the comment |
| 219 | * below. |
| 220 | */ |
Eric W. Biederman | 00c10bc | 2012-05-31 16:26:40 -0700 | [diff] [blame] | 221 | spin_lock_irq(&me->sighand->siglock); |
| 222 | me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN; |
| 223 | spin_unlock_irq(&me->sighand->siglock); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 224 | |
| 225 | /* |
| 226 | * The last thread in the cgroup-init thread group is terminating. |
| 227 | * Find remaining pid_ts in the namespace, signal and wait for them |
| 228 | * to exit. |
| 229 | * |
| 230 | * Note: This signals each threads in the namespace - even those that |
| 231 | * belong to the same thread group, To avoid this, we would have |
| 232 | * to walk the entire tasklist looking a processes in this |
| 233 | * namespace, but that could be unnecessarily expensive if the |
| 234 | * pid namespace has just a few processes. Or we need to |
| 235 | * maintain a tasklist for each pid namespace. |
| 236 | * |
| 237 | */ |
| 238 | read_lock(&tasklist_lock); |
| 239 | nr = next_pidmap(pid_ns, 1); |
| 240 | while (nr > 0) { |
Sukadev Bhattiprolu | e4da026 | 2009-04-02 16:58:06 -0700 | [diff] [blame] | 241 | rcu_read_lock(); |
| 242 | |
Sukadev Bhattiprolu | e4da026 | 2009-04-02 16:58:06 -0700 | [diff] [blame] | 243 | task = pid_task(find_vpid(nr), PIDTYPE_PID); |
Oleg Nesterov | a02d6fd | 2012-03-23 15:02:46 -0700 | [diff] [blame] | 244 | if (task && !__fatal_signal_pending(task)) |
| 245 | send_sig_info(SIGKILL, SEND_SIG_FORCED, task); |
Sukadev Bhattiprolu | e4da026 | 2009-04-02 16:58:06 -0700 | [diff] [blame] | 246 | |
| 247 | rcu_read_unlock(); |
| 248 | |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 249 | nr = next_pidmap(pid_ns, nr); |
| 250 | } |
| 251 | read_unlock(&tasklist_lock); |
| 252 | |
Oleg Nesterov | a53b831 | 2014-12-10 15:55:28 -0800 | [diff] [blame] | 253 | /* |
| 254 | * Reap the EXIT_ZOMBIE children we had before we ignored SIGCHLD. |
| 255 | * sys_wait4() will also block until our children traced from the |
| 256 | * parent namespace are detached and become EXIT_DEAD. |
| 257 | */ |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 258 | do { |
| 259 | clear_thread_flag(TIF_SIGPENDING); |
| 260 | rc = sys_wait4(-1, NULL, __WALL, NULL); |
| 261 | } while (rc != -ECHILD); |
| 262 | |
Eric W. Biederman | 6347e90 | 2012-06-20 12:53:03 -0700 | [diff] [blame] | 263 | /* |
Oleg Nesterov | a53b831 | 2014-12-10 15:55:28 -0800 | [diff] [blame] | 264 | * sys_wait4() above can't reap the EXIT_DEAD children but we do not |
| 265 | * really care, we could reparent them to the global init. We could |
| 266 | * exit and reap ->child_reaper even if it is not the last thread in |
| 267 | * this pid_ns, free_pid(nr_hashed == 0) calls proc_cleanup_work(), |
| 268 | * pid_ns can not go away until proc_kill_sb() drops the reference. |
| 269 | * |
| 270 | * But this ns can also have other tasks injected by setns()+fork(). |
| 271 | * Again, ignoring the user visible semantics we do not really need |
| 272 | * to wait until they are all reaped, but they can be reparented to |
| 273 | * us and thus we need to ensure that pid->child_reaper stays valid |
| 274 | * until they all go away. See free_pid()->wake_up_process(). |
| 275 | * |
| 276 | * We rely on ignored SIGCHLD, an injected zombie must be autoreaped |
| 277 | * if reparented. |
Eric W. Biederman | 6347e90 | 2012-06-20 12:53:03 -0700 | [diff] [blame] | 278 | */ |
| 279 | for (;;) { |
Eric W. Biederman | af4b8a8 | 2012-08-01 15:03:42 -0700 | [diff] [blame] | 280 | set_current_state(TASK_UNINTERRUPTIBLE); |
Eric W. Biederman | 751c644b | 2013-03-26 02:27:11 -0700 | [diff] [blame] | 281 | if (pid_ns->nr_hashed == init_pids) |
Eric W. Biederman | 6347e90 | 2012-06-20 12:53:03 -0700 | [diff] [blame] | 282 | break; |
| 283 | schedule(); |
| 284 | } |
Eric W. Biederman | af4b8a8 | 2012-08-01 15:03:42 -0700 | [diff] [blame] | 285 | __set_current_state(TASK_RUNNING); |
Eric W. Biederman | 6347e90 | 2012-06-20 12:53:03 -0700 | [diff] [blame] | 286 | |
Daniel Lezcano | cf3f892 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 287 | if (pid_ns->reboot) |
| 288 | current->signal->group_exit_code = pid_ns->reboot; |
| 289 | |
Pavel Emelyanov | 0b6b030 | 2008-07-25 01:48:47 -0700 | [diff] [blame] | 290 | acct_exit_ns(pid_ns); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 291 | return; |
| 292 | } |
| 293 | |
Cyrill Gorcunov | 98ed57ee | 2012-05-31 16:26:42 -0700 | [diff] [blame] | 294 | #ifdef CONFIG_CHECKPOINT_RESTORE |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 295 | static int pid_ns_ctl_handler(struct ctl_table *table, int write, |
| 296 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 297 | { |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 298 | struct pid_namespace *pid_ns = task_active_pid_ns(current); |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 299 | struct ctl_table tmp = *table; |
| 300 | |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 301 | if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN)) |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 302 | return -EPERM; |
| 303 | |
| 304 | /* |
| 305 | * Writing directly to ns' last_pid field is OK, since this field |
| 306 | * is volatile in a living namespace anyway and a code writing to |
| 307 | * it should synchronize its usage with external means. |
| 308 | */ |
| 309 | |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 310 | tmp.data = &pid_ns->last_pid; |
Andrew Vagin | 579035d | 2012-09-17 14:09:12 -0700 | [diff] [blame] | 311 | return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 312 | } |
| 313 | |
Andrew Vagin | 579035d | 2012-09-17 14:09:12 -0700 | [diff] [blame] | 314 | extern int pid_max; |
| 315 | static int zero = 0; |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 316 | static struct ctl_table pid_ns_ctl_table[] = { |
| 317 | { |
| 318 | .procname = "ns_last_pid", |
| 319 | .maxlen = sizeof(int), |
| 320 | .mode = 0666, /* permissions are checked in the handler */ |
| 321 | .proc_handler = pid_ns_ctl_handler, |
Andrew Vagin | 579035d | 2012-09-17 14:09:12 -0700 | [diff] [blame] | 322 | .extra1 = &zero, |
| 323 | .extra2 = &pid_max, |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 324 | }, |
| 325 | { } |
| 326 | }; |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 327 | static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } }; |
Cyrill Gorcunov | 98ed57ee | 2012-05-31 16:26:42 -0700 | [diff] [blame] | 328 | #endif /* CONFIG_CHECKPOINT_RESTORE */ |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 329 | |
Daniel Lezcano | cf3f892 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 330 | int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) |
| 331 | { |
| 332 | if (pid_ns == &init_pid_ns) |
| 333 | return 0; |
| 334 | |
| 335 | switch (cmd) { |
| 336 | case LINUX_REBOOT_CMD_RESTART2: |
| 337 | case LINUX_REBOOT_CMD_RESTART: |
| 338 | pid_ns->reboot = SIGHUP; |
| 339 | break; |
| 340 | |
| 341 | case LINUX_REBOOT_CMD_POWER_OFF: |
| 342 | case LINUX_REBOOT_CMD_HALT: |
| 343 | pid_ns->reboot = SIGINT; |
| 344 | break; |
| 345 | default: |
| 346 | return -EINVAL; |
| 347 | } |
| 348 | |
| 349 | read_lock(&tasklist_lock); |
| 350 | force_sig(SIGKILL, pid_ns->child_reaper); |
| 351 | read_unlock(&tasklist_lock); |
| 352 | |
| 353 | do_exit(0); |
| 354 | |
| 355 | /* Not reached */ |
| 356 | return 0; |
| 357 | } |
| 358 | |
Al Viro | 3c04118 | 2014-11-01 00:25:30 -0400 | [diff] [blame] | 359 | static inline struct pid_namespace *to_pid_ns(struct ns_common *ns) |
| 360 | { |
| 361 | return container_of(ns, struct pid_namespace, ns); |
| 362 | } |
| 363 | |
Al Viro | 6496452 | 2014-11-01 00:37:32 -0400 | [diff] [blame] | 364 | static struct ns_common *pidns_get(struct task_struct *task) |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 365 | { |
| 366 | struct pid_namespace *ns; |
| 367 | |
| 368 | rcu_read_lock(); |
Oleg Nesterov | d230822 | 2014-04-02 17:45:05 +0200 | [diff] [blame] | 369 | ns = task_active_pid_ns(task); |
| 370 | if (ns) |
| 371 | get_pid_ns(ns); |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 372 | rcu_read_unlock(); |
| 373 | |
Al Viro | 3c04118 | 2014-11-01 00:25:30 -0400 | [diff] [blame] | 374 | return ns ? &ns->ns : NULL; |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 375 | } |
| 376 | |
Al Viro | 6496452 | 2014-11-01 00:37:32 -0400 | [diff] [blame] | 377 | static void pidns_put(struct ns_common *ns) |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 378 | { |
Al Viro | 3c04118 | 2014-11-01 00:25:30 -0400 | [diff] [blame] | 379 | put_pid_ns(to_pid_ns(ns)); |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 380 | } |
| 381 | |
Al Viro | 6496452 | 2014-11-01 00:37:32 -0400 | [diff] [blame] | 382 | static int pidns_install(struct nsproxy *nsproxy, struct ns_common *ns) |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 383 | { |
| 384 | struct pid_namespace *active = task_active_pid_ns(current); |
Al Viro | 3c04118 | 2014-11-01 00:25:30 -0400 | [diff] [blame] | 385 | struct pid_namespace *ancestor, *new = to_pid_ns(ns); |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 386 | |
Eric W. Biederman | 5e4a084 | 2012-12-14 07:55:36 -0800 | [diff] [blame] | 387 | if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) || |
Eric W. Biederman | c7b96ac | 2013-03-20 12:49:49 -0700 | [diff] [blame] | 388 | !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 389 | return -EPERM; |
| 390 | |
| 391 | /* |
| 392 | * Only allow entering the current active pid namespace |
| 393 | * or a child of the current active pid namespace. |
| 394 | * |
| 395 | * This is required for fork to return a usable pid value and |
| 396 | * this maintains the property that processes and their |
| 397 | * children can not escape their current pid namespace. |
| 398 | */ |
| 399 | if (new->level < active->level) |
| 400 | return -EINVAL; |
| 401 | |
| 402 | ancestor = new; |
| 403 | while (ancestor->level > active->level) |
| 404 | ancestor = ancestor->parent; |
| 405 | if (ancestor != active) |
| 406 | return -EINVAL; |
| 407 | |
Andy Lutomirski | c2b1df2 | 2013-08-22 11:39:16 -0700 | [diff] [blame] | 408 | put_pid_ns(nsproxy->pid_ns_for_children); |
| 409 | nsproxy->pid_ns_for_children = get_pid_ns(new); |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 410 | return 0; |
| 411 | } |
| 412 | |
Andrey Vagin | a7306ed | 2016-09-06 00:47:15 -0700 | [diff] [blame] | 413 | static struct ns_common *pidns_get_parent(struct ns_common *ns) |
| 414 | { |
| 415 | struct pid_namespace *active = task_active_pid_ns(current); |
| 416 | struct pid_namespace *pid_ns, *p; |
| 417 | |
| 418 | /* See if the parent is in the current namespace */ |
| 419 | pid_ns = p = to_pid_ns(ns)->parent; |
| 420 | for (;;) { |
| 421 | if (!p) |
| 422 | return ERR_PTR(-EPERM); |
| 423 | if (p == active) |
| 424 | break; |
| 425 | p = p->parent; |
| 426 | } |
| 427 | |
| 428 | return &get_pid_ns(pid_ns)->ns; |
| 429 | } |
| 430 | |
Andrey Vagin | bcac25a | 2016-09-06 00:47:13 -0700 | [diff] [blame] | 431 | static struct user_namespace *pidns_owner(struct ns_common *ns) |
| 432 | { |
| 433 | return to_pid_ns(ns)->user_ns; |
| 434 | } |
| 435 | |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 436 | const struct proc_ns_operations pidns_operations = { |
| 437 | .name = "pid", |
| 438 | .type = CLONE_NEWPID, |
| 439 | .get = pidns_get, |
| 440 | .put = pidns_put, |
| 441 | .install = pidns_install, |
Andrey Vagin | bcac25a | 2016-09-06 00:47:13 -0700 | [diff] [blame] | 442 | .owner = pidns_owner, |
Andrey Vagin | a7306ed | 2016-09-06 00:47:15 -0700 | [diff] [blame] | 443 | .get_parent = pidns_get_parent, |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 444 | }; |
| 445 | |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 446 | static __init int pid_namespaces_init(void) |
| 447 | { |
| 448 | pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC); |
Cyrill Gorcunov | 98ed57ee | 2012-05-31 16:26:42 -0700 | [diff] [blame] | 449 | |
| 450 | #ifdef CONFIG_CHECKPOINT_RESTORE |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 451 | register_sysctl_paths(kern_path, pid_ns_ctl_table); |
Cyrill Gorcunov | 98ed57ee | 2012-05-31 16:26:42 -0700 | [diff] [blame] | 452 | #endif |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 453 | return 0; |
| 454 | } |
| 455 | |
| 456 | __initcall(pid_namespaces_init); |