Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Pid namespaces |
| 3 | * |
| 4 | * Authors: |
| 5 | * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. |
| 6 | * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM |
| 7 | * Many thanks to Oleg Nesterov for comments and help |
| 8 | * |
| 9 | */ |
| 10 | |
| 11 | #include <linux/pid.h> |
| 12 | #include <linux/pid_namespace.h> |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 13 | #include <linux/user_namespace.h> |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 14 | #include <linux/syscalls.h> |
| 15 | #include <linux/err.h> |
Pavel Emelyanov | 0b6b030 | 2008-07-25 01:48:47 -0700 | [diff] [blame] | 16 | #include <linux/acct.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 17 | #include <linux/slab.h> |
David Howells | 0bb80f2 | 2013-04-12 01:50:06 +0100 | [diff] [blame] | 18 | #include <linux/proc_ns.h> |
Daniel Lezcano | cf3f892 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 19 | #include <linux/reboot.h> |
Eric W. Biederman | 523a6a9 | 2012-08-03 19:11:22 -0700 | [diff] [blame] | 20 | #include <linux/export.h> |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 21 | |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 22 | struct pid_cache { |
| 23 | int nr_ids; |
| 24 | char name[16]; |
| 25 | struct kmem_cache *cachep; |
| 26 | struct list_head list; |
| 27 | }; |
| 28 | |
| 29 | static LIST_HEAD(pid_caches_lh); |
| 30 | static DEFINE_MUTEX(pid_caches_mutex); |
| 31 | static struct kmem_cache *pid_ns_cachep; |
| 32 | |
| 33 | /* |
| 34 | * creates the kmem cache to allocate pids from. |
| 35 | * @nr_ids: the number of numerical ids this pid will have to carry |
| 36 | */ |
| 37 | |
| 38 | static struct kmem_cache *create_pid_cachep(int nr_ids) |
| 39 | { |
| 40 | struct pid_cache *pcache; |
| 41 | struct kmem_cache *cachep; |
| 42 | |
| 43 | mutex_lock(&pid_caches_mutex); |
| 44 | list_for_each_entry(pcache, &pid_caches_lh, list) |
| 45 | if (pcache->nr_ids == nr_ids) |
| 46 | goto out; |
| 47 | |
| 48 | pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL); |
| 49 | if (pcache == NULL) |
| 50 | goto err_alloc; |
| 51 | |
| 52 | snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids); |
| 53 | cachep = kmem_cache_create(pcache->name, |
| 54 | sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid), |
| 55 | 0, SLAB_HWCACHE_ALIGN, NULL); |
| 56 | if (cachep == NULL) |
| 57 | goto err_cachep; |
| 58 | |
| 59 | pcache->nr_ids = nr_ids; |
| 60 | pcache->cachep = cachep; |
| 61 | list_add(&pcache->list, &pid_caches_lh); |
| 62 | out: |
| 63 | mutex_unlock(&pid_caches_mutex); |
| 64 | return pcache->cachep; |
| 65 | |
| 66 | err_cachep: |
| 67 | kfree(pcache); |
| 68 | err_alloc: |
| 69 | mutex_unlock(&pid_caches_mutex); |
| 70 | return NULL; |
| 71 | } |
| 72 | |
Eric W. Biederman | 0a01f2c | 2012-08-01 10:33:47 -0700 | [diff] [blame] | 73 | static void proc_cleanup_work(struct work_struct *work) |
| 74 | { |
| 75 | struct pid_namespace *ns = container_of(work, struct pid_namespace, proc_work); |
| 76 | pid_ns_release_proc(ns); |
| 77 | } |
| 78 | |
Andrew Vagin | f230250 | 2012-10-25 13:38:07 -0700 | [diff] [blame] | 79 | /* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */ |
| 80 | #define MAX_PID_NS_LEVEL 32 |
| 81 | |
Eric W. Biederman | f333c70 | 2016-08-08 14:08:36 -0500 | [diff] [blame^] | 82 | static struct ucounts *inc_pid_namespaces(struct user_namespace *ns) |
| 83 | { |
| 84 | return inc_ucount(ns, current_euid(), UCOUNT_PID_NAMESPACES); |
| 85 | } |
| 86 | |
| 87 | static void dec_pid_namespaces(struct ucounts *ucounts) |
| 88 | { |
| 89 | dec_ucount(ucounts, UCOUNT_PID_NAMESPACES); |
| 90 | } |
| 91 | |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 92 | static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns, |
| 93 | struct pid_namespace *parent_pid_ns) |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 94 | { |
| 95 | struct pid_namespace *ns; |
Alexey Dobriyan | ed469a6 | 2009-06-17 16:27:52 -0700 | [diff] [blame] | 96 | unsigned int level = parent_pid_ns->level + 1; |
Eric W. Biederman | f333c70 | 2016-08-08 14:08:36 -0500 | [diff] [blame^] | 97 | struct ucounts *ucounts; |
Andrew Vagin | f230250 | 2012-10-25 13:38:07 -0700 | [diff] [blame] | 98 | int i; |
| 99 | int err; |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 100 | |
Eric W. Biederman | f333c70 | 2016-08-08 14:08:36 -0500 | [diff] [blame^] | 101 | err = -EINVAL; |
| 102 | if (level > MAX_PID_NS_LEVEL) |
Andrew Vagin | f230250 | 2012-10-25 13:38:07 -0700 | [diff] [blame] | 103 | goto out; |
Eric W. Biederman | f333c70 | 2016-08-08 14:08:36 -0500 | [diff] [blame^] | 104 | ucounts = inc_pid_namespaces(user_ns); |
| 105 | if (!ucounts) |
| 106 | goto out; |
Andrew Vagin | f230250 | 2012-10-25 13:38:07 -0700 | [diff] [blame] | 107 | |
| 108 | err = -ENOMEM; |
Pavel Emelyanov | 84406c1 | 2008-07-25 01:48:42 -0700 | [diff] [blame] | 109 | ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 110 | if (ns == NULL) |
Eric W. Biederman | f333c70 | 2016-08-08 14:08:36 -0500 | [diff] [blame^] | 111 | goto out_dec; |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 112 | |
| 113 | ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); |
| 114 | if (!ns->pidmap[0].page) |
| 115 | goto out_free; |
| 116 | |
| 117 | ns->pid_cachep = create_pid_cachep(level + 1); |
| 118 | if (ns->pid_cachep == NULL) |
| 119 | goto out_free_map; |
| 120 | |
Al Viro | 6344c43 | 2014-11-01 00:45:45 -0400 | [diff] [blame] | 121 | err = ns_alloc_inum(&ns->ns); |
Eric W. Biederman | 98f842e | 2011-06-15 10:21:48 -0700 | [diff] [blame] | 122 | if (err) |
| 123 | goto out_free_map; |
Al Viro | 33c4294 | 2014-11-01 02:32:53 -0400 | [diff] [blame] | 124 | ns->ns.ops = &pidns_operations; |
Eric W. Biederman | 98f842e | 2011-06-15 10:21:48 -0700 | [diff] [blame] | 125 | |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 126 | kref_init(&ns->kref); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 127 | ns->level = level; |
Alexey Dobriyan | ed469a6 | 2009-06-17 16:27:52 -0700 | [diff] [blame] | 128 | ns->parent = get_pid_ns(parent_pid_ns); |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 129 | ns->user_ns = get_user_ns(user_ns); |
Eric W. Biederman | f333c70 | 2016-08-08 14:08:36 -0500 | [diff] [blame^] | 130 | ns->ucounts = ucounts; |
Eric W. Biederman | c876ad76 | 2012-12-21 20:27:12 -0800 | [diff] [blame] | 131 | ns->nr_hashed = PIDNS_HASH_ADDING; |
Eric W. Biederman | 0a01f2c | 2012-08-01 10:33:47 -0700 | [diff] [blame] | 132 | INIT_WORK(&ns->proc_work, proc_cleanup_work); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 133 | |
| 134 | set_bit(0, ns->pidmap[0].page); |
| 135 | atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1); |
| 136 | |
Pavel Emelyanov | 84406c1 | 2008-07-25 01:48:42 -0700 | [diff] [blame] | 137 | for (i = 1; i < PIDMAP_ENTRIES; i++) |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 138 | atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 139 | |
| 140 | return ns; |
| 141 | |
| 142 | out_free_map: |
| 143 | kfree(ns->pidmap[0].page); |
| 144 | out_free: |
| 145 | kmem_cache_free(pid_ns_cachep, ns); |
Eric W. Biederman | f333c70 | 2016-08-08 14:08:36 -0500 | [diff] [blame^] | 146 | out_dec: |
| 147 | dec_pid_namespaces(ucounts); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 148 | out: |
Eric W. Biederman | 4308eeb | 2011-03-23 16:43:13 -0700 | [diff] [blame] | 149 | return ERR_PTR(err); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 150 | } |
| 151 | |
Al Viro | 1adfcb0 | 2013-10-03 13:28:06 -0400 | [diff] [blame] | 152 | static void delayed_free_pidns(struct rcu_head *p) |
| 153 | { |
| 154 | kmem_cache_free(pid_ns_cachep, |
| 155 | container_of(p, struct pid_namespace, rcu)); |
| 156 | } |
| 157 | |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 158 | static void destroy_pid_namespace(struct pid_namespace *ns) |
| 159 | { |
| 160 | int i; |
| 161 | |
Al Viro | 6344c43 | 2014-11-01 00:45:45 -0400 | [diff] [blame] | 162 | ns_free_inum(&ns->ns); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 163 | for (i = 0; i < PIDMAP_ENTRIES; i++) |
| 164 | kfree(ns->pidmap[i].page); |
Eric W. Biederman | f333c70 | 2016-08-08 14:08:36 -0500 | [diff] [blame^] | 165 | dec_pid_namespaces(ns->ucounts); |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 166 | put_user_ns(ns->user_ns); |
Al Viro | 1adfcb0 | 2013-10-03 13:28:06 -0400 | [diff] [blame] | 167 | call_rcu(&ns->rcu, delayed_free_pidns); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 168 | } |
| 169 | |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 170 | struct pid_namespace *copy_pid_ns(unsigned long flags, |
| 171 | struct user_namespace *user_ns, struct pid_namespace *old_ns) |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 172 | { |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 173 | if (!(flags & CLONE_NEWPID)) |
Alexey Dobriyan | dca4a97 | 2009-06-17 16:27:53 -0700 | [diff] [blame] | 174 | return get_pid_ns(old_ns); |
Eric W. Biederman | 225778d | 2012-08-02 08:35:35 -0700 | [diff] [blame] | 175 | if (task_active_pid_ns(current) != old_ns) |
| 176 | return ERR_PTR(-EINVAL); |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 177 | return create_pid_namespace(user_ns, old_ns); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 178 | } |
| 179 | |
Cyrill Gorcunov | bbc2e3e | 2012-10-19 13:56:53 -0700 | [diff] [blame] | 180 | static void free_pid_ns(struct kref *kref) |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 181 | { |
Cyrill Gorcunov | bbc2e3e | 2012-10-19 13:56:53 -0700 | [diff] [blame] | 182 | struct pid_namespace *ns; |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 183 | |
| 184 | ns = container_of(kref, struct pid_namespace, kref); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 185 | destroy_pid_namespace(ns); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 186 | } |
Cyrill Gorcunov | bbc2e3e | 2012-10-19 13:56:53 -0700 | [diff] [blame] | 187 | |
| 188 | void put_pid_ns(struct pid_namespace *ns) |
| 189 | { |
| 190 | struct pid_namespace *parent; |
| 191 | |
| 192 | while (ns != &init_pid_ns) { |
| 193 | parent = ns->parent; |
| 194 | if (!kref_put(&ns->kref, free_pid_ns)) |
| 195 | break; |
| 196 | ns = parent; |
| 197 | } |
| 198 | } |
| 199 | EXPORT_SYMBOL_GPL(put_pid_ns); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 200 | |
| 201 | void zap_pid_ns_processes(struct pid_namespace *pid_ns) |
| 202 | { |
| 203 | int nr; |
| 204 | int rc; |
Eric W. Biederman | 00c10bc | 2012-05-31 16:26:40 -0700 | [diff] [blame] | 205 | struct task_struct *task, *me = current; |
Eric W. Biederman | 751c644b | 2013-03-26 02:27:11 -0700 | [diff] [blame] | 206 | int init_pids = thread_group_leader(me) ? 1 : 2; |
Eric W. Biederman | 00c10bc | 2012-05-31 16:26:40 -0700 | [diff] [blame] | 207 | |
Eric W. Biederman | c876ad76 | 2012-12-21 20:27:12 -0800 | [diff] [blame] | 208 | /* Don't allow any more processes into the pid namespace */ |
| 209 | disable_pid_allocation(pid_ns); |
| 210 | |
Oleg Nesterov | a53b831 | 2014-12-10 15:55:28 -0800 | [diff] [blame] | 211 | /* |
| 212 | * Ignore SIGCHLD causing any terminated children to autoreap. |
| 213 | * This speeds up the namespace shutdown, plus see the comment |
| 214 | * below. |
| 215 | */ |
Eric W. Biederman | 00c10bc | 2012-05-31 16:26:40 -0700 | [diff] [blame] | 216 | spin_lock_irq(&me->sighand->siglock); |
| 217 | me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN; |
| 218 | spin_unlock_irq(&me->sighand->siglock); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 219 | |
| 220 | /* |
| 221 | * The last thread in the cgroup-init thread group is terminating. |
| 222 | * Find remaining pid_ts in the namespace, signal and wait for them |
| 223 | * to exit. |
| 224 | * |
| 225 | * Note: This signals each threads in the namespace - even those that |
| 226 | * belong to the same thread group, To avoid this, we would have |
| 227 | * to walk the entire tasklist looking a processes in this |
| 228 | * namespace, but that could be unnecessarily expensive if the |
| 229 | * pid namespace has just a few processes. Or we need to |
| 230 | * maintain a tasklist for each pid namespace. |
| 231 | * |
| 232 | */ |
| 233 | read_lock(&tasklist_lock); |
| 234 | nr = next_pidmap(pid_ns, 1); |
| 235 | while (nr > 0) { |
Sukadev Bhattiprolu | e4da026 | 2009-04-02 16:58:06 -0700 | [diff] [blame] | 236 | rcu_read_lock(); |
| 237 | |
Sukadev Bhattiprolu | e4da026 | 2009-04-02 16:58:06 -0700 | [diff] [blame] | 238 | task = pid_task(find_vpid(nr), PIDTYPE_PID); |
Oleg Nesterov | a02d6fd | 2012-03-23 15:02:46 -0700 | [diff] [blame] | 239 | if (task && !__fatal_signal_pending(task)) |
| 240 | send_sig_info(SIGKILL, SEND_SIG_FORCED, task); |
Sukadev Bhattiprolu | e4da026 | 2009-04-02 16:58:06 -0700 | [diff] [blame] | 241 | |
| 242 | rcu_read_unlock(); |
| 243 | |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 244 | nr = next_pidmap(pid_ns, nr); |
| 245 | } |
| 246 | read_unlock(&tasklist_lock); |
| 247 | |
Oleg Nesterov | a53b831 | 2014-12-10 15:55:28 -0800 | [diff] [blame] | 248 | /* |
| 249 | * Reap the EXIT_ZOMBIE children we had before we ignored SIGCHLD. |
| 250 | * sys_wait4() will also block until our children traced from the |
| 251 | * parent namespace are detached and become EXIT_DEAD. |
| 252 | */ |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 253 | do { |
| 254 | clear_thread_flag(TIF_SIGPENDING); |
| 255 | rc = sys_wait4(-1, NULL, __WALL, NULL); |
| 256 | } while (rc != -ECHILD); |
| 257 | |
Eric W. Biederman | 6347e90 | 2012-06-20 12:53:03 -0700 | [diff] [blame] | 258 | /* |
Oleg Nesterov | a53b831 | 2014-12-10 15:55:28 -0800 | [diff] [blame] | 259 | * sys_wait4() above can't reap the EXIT_DEAD children but we do not |
| 260 | * really care, we could reparent them to the global init. We could |
| 261 | * exit and reap ->child_reaper even if it is not the last thread in |
| 262 | * this pid_ns, free_pid(nr_hashed == 0) calls proc_cleanup_work(), |
| 263 | * pid_ns can not go away until proc_kill_sb() drops the reference. |
| 264 | * |
| 265 | * But this ns can also have other tasks injected by setns()+fork(). |
| 266 | * Again, ignoring the user visible semantics we do not really need |
| 267 | * to wait until they are all reaped, but they can be reparented to |
| 268 | * us and thus we need to ensure that pid->child_reaper stays valid |
| 269 | * until they all go away. See free_pid()->wake_up_process(). |
| 270 | * |
| 271 | * We rely on ignored SIGCHLD, an injected zombie must be autoreaped |
| 272 | * if reparented. |
Eric W. Biederman | 6347e90 | 2012-06-20 12:53:03 -0700 | [diff] [blame] | 273 | */ |
| 274 | for (;;) { |
Eric W. Biederman | af4b8a8 | 2012-08-01 15:03:42 -0700 | [diff] [blame] | 275 | set_current_state(TASK_UNINTERRUPTIBLE); |
Eric W. Biederman | 751c644b | 2013-03-26 02:27:11 -0700 | [diff] [blame] | 276 | if (pid_ns->nr_hashed == init_pids) |
Eric W. Biederman | 6347e90 | 2012-06-20 12:53:03 -0700 | [diff] [blame] | 277 | break; |
| 278 | schedule(); |
| 279 | } |
Eric W. Biederman | af4b8a8 | 2012-08-01 15:03:42 -0700 | [diff] [blame] | 280 | __set_current_state(TASK_RUNNING); |
Eric W. Biederman | 6347e90 | 2012-06-20 12:53:03 -0700 | [diff] [blame] | 281 | |
Daniel Lezcano | cf3f892 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 282 | if (pid_ns->reboot) |
| 283 | current->signal->group_exit_code = pid_ns->reboot; |
| 284 | |
Pavel Emelyanov | 0b6b030 | 2008-07-25 01:48:47 -0700 | [diff] [blame] | 285 | acct_exit_ns(pid_ns); |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 286 | return; |
| 287 | } |
| 288 | |
Cyrill Gorcunov | 98ed57ee | 2012-05-31 16:26:42 -0700 | [diff] [blame] | 289 | #ifdef CONFIG_CHECKPOINT_RESTORE |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 290 | static int pid_ns_ctl_handler(struct ctl_table *table, int write, |
| 291 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 292 | { |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 293 | struct pid_namespace *pid_ns = task_active_pid_ns(current); |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 294 | struct ctl_table tmp = *table; |
| 295 | |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 296 | if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN)) |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 297 | return -EPERM; |
| 298 | |
| 299 | /* |
| 300 | * Writing directly to ns' last_pid field is OK, since this field |
| 301 | * is volatile in a living namespace anyway and a code writing to |
| 302 | * it should synchronize its usage with external means. |
| 303 | */ |
| 304 | |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 305 | tmp.data = &pid_ns->last_pid; |
Andrew Vagin | 579035d | 2012-09-17 14:09:12 -0700 | [diff] [blame] | 306 | return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 307 | } |
| 308 | |
Andrew Vagin | 579035d | 2012-09-17 14:09:12 -0700 | [diff] [blame] | 309 | extern int pid_max; |
| 310 | static int zero = 0; |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 311 | static struct ctl_table pid_ns_ctl_table[] = { |
| 312 | { |
| 313 | .procname = "ns_last_pid", |
| 314 | .maxlen = sizeof(int), |
| 315 | .mode = 0666, /* permissions are checked in the handler */ |
| 316 | .proc_handler = pid_ns_ctl_handler, |
Andrew Vagin | 579035d | 2012-09-17 14:09:12 -0700 | [diff] [blame] | 317 | .extra1 = &zero, |
| 318 | .extra2 = &pid_max, |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 319 | }, |
| 320 | { } |
| 321 | }; |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 322 | static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } }; |
Cyrill Gorcunov | 98ed57ee | 2012-05-31 16:26:42 -0700 | [diff] [blame] | 323 | #endif /* CONFIG_CHECKPOINT_RESTORE */ |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 324 | |
Daniel Lezcano | cf3f892 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 325 | int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) |
| 326 | { |
| 327 | if (pid_ns == &init_pid_ns) |
| 328 | return 0; |
| 329 | |
| 330 | switch (cmd) { |
| 331 | case LINUX_REBOOT_CMD_RESTART2: |
| 332 | case LINUX_REBOOT_CMD_RESTART: |
| 333 | pid_ns->reboot = SIGHUP; |
| 334 | break; |
| 335 | |
| 336 | case LINUX_REBOOT_CMD_POWER_OFF: |
| 337 | case LINUX_REBOOT_CMD_HALT: |
| 338 | pid_ns->reboot = SIGINT; |
| 339 | break; |
| 340 | default: |
| 341 | return -EINVAL; |
| 342 | } |
| 343 | |
| 344 | read_lock(&tasklist_lock); |
| 345 | force_sig(SIGKILL, pid_ns->child_reaper); |
| 346 | read_unlock(&tasklist_lock); |
| 347 | |
| 348 | do_exit(0); |
| 349 | |
| 350 | /* Not reached */ |
| 351 | return 0; |
| 352 | } |
| 353 | |
Al Viro | 3c04118 | 2014-11-01 00:25:30 -0400 | [diff] [blame] | 354 | static inline struct pid_namespace *to_pid_ns(struct ns_common *ns) |
| 355 | { |
| 356 | return container_of(ns, struct pid_namespace, ns); |
| 357 | } |
| 358 | |
Al Viro | 6496452 | 2014-11-01 00:37:32 -0400 | [diff] [blame] | 359 | static struct ns_common *pidns_get(struct task_struct *task) |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 360 | { |
| 361 | struct pid_namespace *ns; |
| 362 | |
| 363 | rcu_read_lock(); |
Oleg Nesterov | d230822 | 2014-04-02 17:45:05 +0200 | [diff] [blame] | 364 | ns = task_active_pid_ns(task); |
| 365 | if (ns) |
| 366 | get_pid_ns(ns); |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 367 | rcu_read_unlock(); |
| 368 | |
Al Viro | 3c04118 | 2014-11-01 00:25:30 -0400 | [diff] [blame] | 369 | return ns ? &ns->ns : NULL; |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 370 | } |
| 371 | |
Al Viro | 6496452 | 2014-11-01 00:37:32 -0400 | [diff] [blame] | 372 | static void pidns_put(struct ns_common *ns) |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 373 | { |
Al Viro | 3c04118 | 2014-11-01 00:25:30 -0400 | [diff] [blame] | 374 | put_pid_ns(to_pid_ns(ns)); |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 375 | } |
| 376 | |
Al Viro | 6496452 | 2014-11-01 00:37:32 -0400 | [diff] [blame] | 377 | static int pidns_install(struct nsproxy *nsproxy, struct ns_common *ns) |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 378 | { |
| 379 | struct pid_namespace *active = task_active_pid_ns(current); |
Al Viro | 3c04118 | 2014-11-01 00:25:30 -0400 | [diff] [blame] | 380 | struct pid_namespace *ancestor, *new = to_pid_ns(ns); |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 381 | |
Eric W. Biederman | 5e4a084 | 2012-12-14 07:55:36 -0800 | [diff] [blame] | 382 | if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) || |
Eric W. Biederman | c7b96ac | 2013-03-20 12:49:49 -0700 | [diff] [blame] | 383 | !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 384 | return -EPERM; |
| 385 | |
| 386 | /* |
| 387 | * Only allow entering the current active pid namespace |
| 388 | * or a child of the current active pid namespace. |
| 389 | * |
| 390 | * This is required for fork to return a usable pid value and |
| 391 | * this maintains the property that processes and their |
| 392 | * children can not escape their current pid namespace. |
| 393 | */ |
| 394 | if (new->level < active->level) |
| 395 | return -EINVAL; |
| 396 | |
| 397 | ancestor = new; |
| 398 | while (ancestor->level > active->level) |
| 399 | ancestor = ancestor->parent; |
| 400 | if (ancestor != active) |
| 401 | return -EINVAL; |
| 402 | |
Andy Lutomirski | c2b1df2 | 2013-08-22 11:39:16 -0700 | [diff] [blame] | 403 | put_pid_ns(nsproxy->pid_ns_for_children); |
| 404 | nsproxy->pid_ns_for_children = get_pid_ns(new); |
Eric W. Biederman | 57e8391 | 2010-03-07 18:17:03 -0800 | [diff] [blame] | 405 | return 0; |
| 406 | } |
| 407 | |
| 408 | const struct proc_ns_operations pidns_operations = { |
| 409 | .name = "pid", |
| 410 | .type = CLONE_NEWPID, |
| 411 | .get = pidns_get, |
| 412 | .put = pidns_put, |
| 413 | .install = pidns_install, |
| 414 | }; |
| 415 | |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 416 | static __init int pid_namespaces_init(void) |
| 417 | { |
| 418 | pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC); |
Cyrill Gorcunov | 98ed57ee | 2012-05-31 16:26:42 -0700 | [diff] [blame] | 419 | |
| 420 | #ifdef CONFIG_CHECKPOINT_RESTORE |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 421 | register_sysctl_paths(kern_path, pid_ns_ctl_table); |
Cyrill Gorcunov | 98ed57ee | 2012-05-31 16:26:42 -0700 | [diff] [blame] | 422 | #endif |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 423 | return 0; |
| 424 | } |
| 425 | |
| 426 | __initcall(pid_namespaces_init); |