blob: 7d87017a0040982ab4ec1141fb246f8199835164 [file] [log] [blame]
Cedric Le Goateracce2922007-07-15 23:40:59 -07001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License as
4 * published by the Free Software Foundation, version 2 of the
5 * License.
6 */
7
Paul Gortmaker9984de12011-05-23 14:51:41 -04008#include <linux/export.h>
Cedric Le Goateracce2922007-07-15 23:40:59 -07009#include <linux/nsproxy.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070010#include <linux/slab.h>
Cedric Le Goateracce2922007-07-15 23:40:59 -070011#include <linux/user_namespace.h>
David Howells0bb80f22013-04-12 01:50:06 +010012#include <linux/proc_ns.h>
Eric W. Biederman5c1469d2010-06-13 03:28:03 +000013#include <linux/highuid.h>
Serge Hallyn18b6e042008-10-15 16:38:45 -050014#include <linux/cred.h>
Eric W. Biederman973c5912011-11-17 01:59:07 -080015#include <linux/securebits.h>
Eric W. Biederman22d917d2011-11-17 00:11:58 -080016#include <linux/keyctl.h>
17#include <linux/key-type.h>
18#include <keys/user-type.h>
19#include <linux/seq_file.h>
20#include <linux/fs.h>
21#include <linux/uaccess.h>
22#include <linux/ctype.h>
Eric W. Biedermanf76d2072012-08-30 01:24:05 -070023#include <linux/projid.h>
Eric W. Biedermane66eded2013-03-13 11:51:49 -070024#include <linux/fs_struct.h>
Cedric Le Goateracce2922007-07-15 23:40:59 -070025
Pavel Emelyanov61642812011-01-12 17:00:46 -080026static struct kmem_cache *user_ns_cachep __read_mostly;
Eric W. Biedermanf0d62ae2014-12-09 14:03:14 -060027static DEFINE_MUTEX(userns_state_mutex);
Pavel Emelyanov61642812011-01-12 17:00:46 -080028
Eric W. Biederman67080752013-04-14 13:47:02 -070029static bool new_idmap_permitted(const struct file *file,
30 struct user_namespace *ns, int cap_setid,
Eric W. Biederman22d917d2011-11-17 00:11:58 -080031 struct uid_gid_map *map);
Eric W. Biedermanb0321322016-07-30 13:53:37 -050032static void free_user_ns(struct work_struct *work);
Eric W. Biederman22d917d2011-11-17 00:11:58 -080033
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -050034
Eric W. Biedermancde19752012-07-26 06:24:06 -070035static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
36{
37 /* Start with the same capabilities as init but useless for doing
38 * anything as the capabilities are bound to the new user namespace.
39 */
40 cred->securebits = SECUREBITS_DEFAULT;
41 cred->cap_inheritable = CAP_EMPTY_SET;
42 cred->cap_permitted = CAP_FULL_SET;
43 cred->cap_effective = CAP_FULL_SET;
Andy Lutomirski58319052015-09-04 15:42:45 -070044 cred->cap_ambient = CAP_EMPTY_SET;
Eric W. Biedermancde19752012-07-26 06:24:06 -070045 cred->cap_bset = CAP_FULL_SET;
46#ifdef CONFIG_KEYS
47 key_put(cred->request_key_auth);
48 cred->request_key_auth = NULL;
49#endif
50 /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
51 cred->user_ns = user_ns;
52}
53
Serge E. Hallyn77ec7392007-07-15 23:41:01 -070054/*
Serge Hallyn18b6e042008-10-15 16:38:45 -050055 * Create a new user namespace, deriving the creator from the user in the
56 * passed credentials, and replacing that user with the new root user for the
57 * new namespace.
58 *
59 * This is called by copy_creds(), which will finish setting the target task's
60 * credentials.
Serge E. Hallyn77ec7392007-07-15 23:41:01 -070061 */
Serge Hallyn18b6e042008-10-15 16:38:45 -050062int create_user_ns(struct cred *new)
Serge E. Hallyn77ec7392007-07-15 23:41:01 -070063{
Eric W. Biederman0093ccb2011-11-16 21:52:53 -080064 struct user_namespace *ns, *parent_ns = new->user_ns;
Eric W. Biederman078de5f2012-02-08 07:00:08 -080065 kuid_t owner = new->euid;
66 kgid_t group = new->egid;
Eric W. Biederman98f842e2011-06-15 10:21:48 -070067 int ret;
Eric W. Biederman783291e2011-11-17 01:32:59 -080068
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -050069 ret = -EUSERS;
Oleg Nesterov8742f222013-08-08 18:55:32 +020070 if (parent_ns->level > 32)
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -050071 goto fail;
72
73 if (!inc_user_namespaces(parent_ns))
74 goto fail;
Oleg Nesterov8742f222013-08-08 18:55:32 +020075
Eric W. Biederman31515272013-03-15 01:45:51 -070076 /*
77 * Verify that we can not violate the policy of which files
78 * may be accessed that is specified by the root directory,
79 * by verifing that the root directory is at the root of the
80 * mount namespace which allows all files to be accessed.
81 */
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -050082 ret = -EPERM;
Eric W. Biederman31515272013-03-15 01:45:51 -070083 if (current_chrooted())
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -050084 goto fail_dec;
Eric W. Biederman31515272013-03-15 01:45:51 -070085
Eric W. Biederman783291e2011-11-17 01:32:59 -080086 /* The creator needs a mapping in the parent user namespace
87 * or else we won't be able to reasonably tell userspace who
88 * created a user_namespace.
89 */
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -050090 ret = -EPERM;
Eric W. Biederman783291e2011-11-17 01:32:59 -080091 if (!kuid_has_mapping(parent_ns, owner) ||
92 !kgid_has_mapping(parent_ns, group))
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -050093 goto fail_dec;
Serge E. Hallyn77ec7392007-07-15 23:41:01 -070094
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -050095 ret = -ENOMEM;
Eric W. Biederman22d917d2011-11-17 00:11:58 -080096 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
Serge E. Hallyn77ec7392007-07-15 23:41:01 -070097 if (!ns)
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -050098 goto fail_dec;
Serge E. Hallyn77ec7392007-07-15 23:41:01 -070099
Al Viro6344c432014-11-01 00:45:45 -0400100 ret = ns_alloc_inum(&ns->ns);
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500101 if (ret)
102 goto fail_free;
Al Viro33c42942014-11-01 02:32:53 -0400103 ns->ns.ops = &userns_operations;
Eric W. Biederman98f842e2011-06-15 10:21:48 -0700104
Eric W. Biedermanc61a2812012-12-28 18:58:39 -0800105 atomic_set(&ns->count, 1);
Eric W. Biedermancde19752012-07-26 06:24:06 -0700106 /* Leave the new->user_ns reference with the new user namespace. */
Eric W. Biedermanaeb3ae92011-11-16 21:59:43 -0800107 ns->parent = parent_ns;
Oleg Nesterov8742f222013-08-08 18:55:32 +0200108 ns->level = parent_ns->level + 1;
Eric W. Biederman783291e2011-11-17 01:32:59 -0800109 ns->owner = owner;
110 ns->group = group;
Eric W. Biedermanb0321322016-07-30 13:53:37 -0500111 INIT_WORK(&ns->work, free_user_ns);
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500112 ns->max_user_namespaces = INT_MAX;
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800113
Eric W. Biederman9cc46512014-12-02 12:27:26 -0600114 /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
115 mutex_lock(&userns_state_mutex);
116 ns->flags = parent_ns->flags;
117 mutex_unlock(&userns_state_mutex);
118
David Howellsf36f8c72013-09-24 10:35:19 +0100119#ifdef CONFIG_PERSISTENT_KEYRINGS
120 init_rwsem(&ns->persistent_keyring_register_sem);
121#endif
Eric W. Biedermandbec2842016-07-30 13:58:49 -0500122 ret = -ENOMEM;
123 if (!setup_userns_sysctls(ns))
124 goto fail_keyring;
125
126 set_cred_user_ns(new, ns);
Serge Hallyn18b6e042008-10-15 16:38:45 -0500127 return 0;
Eric W. Biedermandbec2842016-07-30 13:58:49 -0500128fail_keyring:
129#ifdef CONFIG_PERSISTENT_KEYRINGS
130 key_put(ns->persistent_keyring_register);
131#endif
132 ns_free_inum(&ns->ns);
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500133fail_free:
Eric W. Biedermandbec2842016-07-30 13:58:49 -0500134 kmem_cache_free(user_ns_cachep, ns);
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500135fail_dec:
136 dec_user_namespaces(parent_ns);
137fail:
Eric W. Biedermandbec2842016-07-30 13:58:49 -0500138 return ret;
Cedric Le Goateracce2922007-07-15 23:40:59 -0700139}
140
Eric W. Biedermanb2e0d9872012-07-26 05:15:35 -0700141int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
142{
143 struct cred *cred;
Oleg Nesterov61609682013-08-06 19:38:55 +0200144 int err = -ENOMEM;
Eric W. Biedermanb2e0d9872012-07-26 05:15:35 -0700145
146 if (!(unshare_flags & CLONE_NEWUSER))
147 return 0;
148
149 cred = prepare_creds();
Oleg Nesterov61609682013-08-06 19:38:55 +0200150 if (cred) {
151 err = create_user_ns(cred);
152 if (err)
153 put_cred(cred);
154 else
155 *new_cred = cred;
156 }
Eric W. Biedermanb2e0d9872012-07-26 05:15:35 -0700157
Oleg Nesterov61609682013-08-06 19:38:55 +0200158 return err;
Eric W. Biedermanb2e0d9872012-07-26 05:15:35 -0700159}
160
Eric W. Biedermanb0321322016-07-30 13:53:37 -0500161static void free_user_ns(struct work_struct *work)
David Howells51708362009-02-27 14:03:03 -0800162{
Eric W. Biedermanb0321322016-07-30 13:53:37 -0500163 struct user_namespace *parent, *ns =
164 container_of(work, struct user_namespace, work);
David Howells51708362009-02-27 14:03:03 -0800165
Eric W. Biedermanc61a2812012-12-28 18:58:39 -0800166 do {
167 parent = ns->parent;
Eric W. Biedermandbec2842016-07-30 13:58:49 -0500168 retire_userns_sysctls(ns);
David Howellsf36f8c72013-09-24 10:35:19 +0100169#ifdef CONFIG_PERSISTENT_KEYRINGS
170 key_put(ns->persistent_keyring_register);
171#endif
Al Viro6344c432014-11-01 00:45:45 -0400172 ns_free_inum(&ns->ns);
Eric W. Biedermanc61a2812012-12-28 18:58:39 -0800173 kmem_cache_free(user_ns_cachep, ns);
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500174 dec_user_namespaces(parent);
Eric W. Biedermanc61a2812012-12-28 18:58:39 -0800175 ns = parent;
176 } while (atomic_dec_and_test(&parent->count));
David Howells51708362009-02-27 14:03:03 -0800177}
Eric W. Biedermanb0321322016-07-30 13:53:37 -0500178
179void __put_user_ns(struct user_namespace *ns)
180{
181 schedule_work(&ns->work);
182}
183EXPORT_SYMBOL(__put_user_ns);
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000184
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800185static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000186{
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800187 unsigned idx, extents;
188 u32 first, last, id2;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000189
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800190 id2 = id + count - 1;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000191
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800192 /* Find the matching extent */
193 extents = map->nr_extents;
Mikulas Patockae79323b2014-04-14 16:58:55 -0400194 smp_rmb();
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800195 for (idx = 0; idx < extents; idx++) {
196 first = map->extent[idx].first;
197 last = first + map->extent[idx].count - 1;
198 if (id >= first && id <= last &&
199 (id2 >= first && id2 <= last))
200 break;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000201 }
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800202 /* Map the id or note failure */
203 if (idx < extents)
204 id = (id - first) + map->extent[idx].lower_first;
205 else
206 id = (u32) -1;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000207
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800208 return id;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000209}
210
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800211static u32 map_id_down(struct uid_gid_map *map, u32 id)
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000212{
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800213 unsigned idx, extents;
214 u32 first, last;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000215
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800216 /* Find the matching extent */
217 extents = map->nr_extents;
Mikulas Patockae79323b2014-04-14 16:58:55 -0400218 smp_rmb();
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800219 for (idx = 0; idx < extents; idx++) {
220 first = map->extent[idx].first;
221 last = first + map->extent[idx].count - 1;
222 if (id >= first && id <= last)
223 break;
224 }
225 /* Map the id or note failure */
226 if (idx < extents)
227 id = (id - first) + map->extent[idx].lower_first;
228 else
229 id = (u32) -1;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000230
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800231 return id;
232}
233
234static u32 map_id_up(struct uid_gid_map *map, u32 id)
235{
236 unsigned idx, extents;
237 u32 first, last;
238
239 /* Find the matching extent */
240 extents = map->nr_extents;
Mikulas Patockae79323b2014-04-14 16:58:55 -0400241 smp_rmb();
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800242 for (idx = 0; idx < extents; idx++) {
243 first = map->extent[idx].lower_first;
244 last = first + map->extent[idx].count - 1;
245 if (id >= first && id <= last)
246 break;
247 }
248 /* Map the id or note failure */
249 if (idx < extents)
250 id = (id - first) + map->extent[idx].first;
251 else
252 id = (u32) -1;
253
254 return id;
255}
256
257/**
258 * make_kuid - Map a user-namespace uid pair into a kuid.
259 * @ns: User namespace that the uid is in
260 * @uid: User identifier
261 *
262 * Maps a user-namespace uid pair into a kernel internal kuid,
263 * and returns that kuid.
264 *
265 * When there is no mapping defined for the user-namespace uid
266 * pair INVALID_UID is returned. Callers are expected to test
Brian Campbellb080e042014-02-16 22:58:12 -0500267 * for and handle INVALID_UID being returned. INVALID_UID
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800268 * may be tested for using uid_valid().
269 */
270kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
271{
272 /* Map the uid to a global kernel uid */
273 return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
274}
275EXPORT_SYMBOL(make_kuid);
276
277/**
278 * from_kuid - Create a uid from a kuid user-namespace pair.
279 * @targ: The user namespace we want a uid in.
280 * @kuid: The kernel internal uid to start with.
281 *
282 * Map @kuid into the user-namespace specified by @targ and
283 * return the resulting uid.
284 *
285 * There is always a mapping into the initial user_namespace.
286 *
287 * If @kuid has no mapping in @targ (uid_t)-1 is returned.
288 */
289uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
290{
291 /* Map the uid from a global kernel uid */
292 return map_id_up(&targ->uid_map, __kuid_val(kuid));
293}
294EXPORT_SYMBOL(from_kuid);
295
296/**
297 * from_kuid_munged - Create a uid from a kuid user-namespace pair.
298 * @targ: The user namespace we want a uid in.
299 * @kuid: The kernel internal uid to start with.
300 *
301 * Map @kuid into the user-namespace specified by @targ and
302 * return the resulting uid.
303 *
304 * There is always a mapping into the initial user_namespace.
305 *
306 * Unlike from_kuid from_kuid_munged never fails and always
307 * returns a valid uid. This makes from_kuid_munged appropriate
308 * for use in syscalls like stat and getuid where failing the
309 * system call and failing to provide a valid uid are not an
310 * options.
311 *
312 * If @kuid has no mapping in @targ overflowuid is returned.
313 */
314uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
315{
316 uid_t uid;
317 uid = from_kuid(targ, kuid);
318
319 if (uid == (uid_t) -1)
320 uid = overflowuid;
321 return uid;
322}
323EXPORT_SYMBOL(from_kuid_munged);
324
325/**
326 * make_kgid - Map a user-namespace gid pair into a kgid.
327 * @ns: User namespace that the gid is in
Fabian Frederick68a9a432014-06-06 14:37:21 -0700328 * @gid: group identifier
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800329 *
330 * Maps a user-namespace gid pair into a kernel internal kgid,
331 * and returns that kgid.
332 *
333 * When there is no mapping defined for the user-namespace gid
334 * pair INVALID_GID is returned. Callers are expected to test
335 * for and handle INVALID_GID being returned. INVALID_GID may be
336 * tested for using gid_valid().
337 */
338kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
339{
340 /* Map the gid to a global kernel gid */
341 return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
342}
343EXPORT_SYMBOL(make_kgid);
344
345/**
346 * from_kgid - Create a gid from a kgid user-namespace pair.
347 * @targ: The user namespace we want a gid in.
348 * @kgid: The kernel internal gid to start with.
349 *
350 * Map @kgid into the user-namespace specified by @targ and
351 * return the resulting gid.
352 *
353 * There is always a mapping into the initial user_namespace.
354 *
355 * If @kgid has no mapping in @targ (gid_t)-1 is returned.
356 */
357gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
358{
359 /* Map the gid from a global kernel gid */
360 return map_id_up(&targ->gid_map, __kgid_val(kgid));
361}
362EXPORT_SYMBOL(from_kgid);
363
364/**
365 * from_kgid_munged - Create a gid from a kgid user-namespace pair.
366 * @targ: The user namespace we want a gid in.
367 * @kgid: The kernel internal gid to start with.
368 *
369 * Map @kgid into the user-namespace specified by @targ and
370 * return the resulting gid.
371 *
372 * There is always a mapping into the initial user_namespace.
373 *
374 * Unlike from_kgid from_kgid_munged never fails and always
375 * returns a valid gid. This makes from_kgid_munged appropriate
376 * for use in syscalls like stat and getgid where failing the
377 * system call and failing to provide a valid gid are not options.
378 *
379 * If @kgid has no mapping in @targ overflowgid is returned.
380 */
381gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
382{
383 gid_t gid;
384 gid = from_kgid(targ, kgid);
385
386 if (gid == (gid_t) -1)
387 gid = overflowgid;
388 return gid;
389}
390EXPORT_SYMBOL(from_kgid_munged);
391
Eric W. Biedermanf76d2072012-08-30 01:24:05 -0700392/**
393 * make_kprojid - Map a user-namespace projid pair into a kprojid.
394 * @ns: User namespace that the projid is in
395 * @projid: Project identifier
396 *
397 * Maps a user-namespace uid pair into a kernel internal kuid,
398 * and returns that kuid.
399 *
400 * When there is no mapping defined for the user-namespace projid
401 * pair INVALID_PROJID is returned. Callers are expected to test
402 * for and handle handle INVALID_PROJID being returned. INVALID_PROJID
403 * may be tested for using projid_valid().
404 */
405kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
406{
407 /* Map the uid to a global kernel uid */
408 return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
409}
410EXPORT_SYMBOL(make_kprojid);
411
412/**
413 * from_kprojid - Create a projid from a kprojid user-namespace pair.
414 * @targ: The user namespace we want a projid in.
415 * @kprojid: The kernel internal project identifier to start with.
416 *
417 * Map @kprojid into the user-namespace specified by @targ and
418 * return the resulting projid.
419 *
420 * There is always a mapping into the initial user_namespace.
421 *
422 * If @kprojid has no mapping in @targ (projid_t)-1 is returned.
423 */
424projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
425{
426 /* Map the uid from a global kernel uid */
427 return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
428}
429EXPORT_SYMBOL(from_kprojid);
430
431/**
432 * from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
433 * @targ: The user namespace we want a projid in.
434 * @kprojid: The kernel internal projid to start with.
435 *
436 * Map @kprojid into the user-namespace specified by @targ and
437 * return the resulting projid.
438 *
439 * There is always a mapping into the initial user_namespace.
440 *
441 * Unlike from_kprojid from_kprojid_munged never fails and always
442 * returns a valid projid. This makes from_kprojid_munged
443 * appropriate for use in syscalls like stat and where
444 * failing the system call and failing to provide a valid projid are
445 * not an options.
446 *
447 * If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
448 */
449projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
450{
451 projid_t projid;
452 projid = from_kprojid(targ, kprojid);
453
454 if (projid == (projid_t) -1)
455 projid = OVERFLOW_PROJID;
456 return projid;
457}
458EXPORT_SYMBOL(from_kprojid_munged);
459
460
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800461static int uid_m_show(struct seq_file *seq, void *v)
462{
463 struct user_namespace *ns = seq->private;
464 struct uid_gid_extent *extent = v;
465 struct user_namespace *lower_ns;
466 uid_t lower;
467
Eric W. Biedermanc450f372012-08-14 21:25:13 -0700468 lower_ns = seq_user_ns(seq);
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800469 if ((lower_ns == ns) && lower_ns->parent)
470 lower_ns = lower_ns->parent;
471
472 lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
473
474 seq_printf(seq, "%10u %10u %10u\n",
475 extent->first,
476 lower,
477 extent->count);
478
479 return 0;
480}
481
482static int gid_m_show(struct seq_file *seq, void *v)
483{
484 struct user_namespace *ns = seq->private;
485 struct uid_gid_extent *extent = v;
486 struct user_namespace *lower_ns;
487 gid_t lower;
488
Eric W. Biedermanc450f372012-08-14 21:25:13 -0700489 lower_ns = seq_user_ns(seq);
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800490 if ((lower_ns == ns) && lower_ns->parent)
491 lower_ns = lower_ns->parent;
492
493 lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
494
495 seq_printf(seq, "%10u %10u %10u\n",
496 extent->first,
497 lower,
498 extent->count);
499
500 return 0;
501}
502
Eric W. Biedermanf76d2072012-08-30 01:24:05 -0700503static int projid_m_show(struct seq_file *seq, void *v)
504{
505 struct user_namespace *ns = seq->private;
506 struct uid_gid_extent *extent = v;
507 struct user_namespace *lower_ns;
508 projid_t lower;
509
510 lower_ns = seq_user_ns(seq);
511 if ((lower_ns == ns) && lower_ns->parent)
512 lower_ns = lower_ns->parent;
513
514 lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
515
516 seq_printf(seq, "%10u %10u %10u\n",
517 extent->first,
518 lower,
519 extent->count);
520
521 return 0;
522}
523
Fabian Frederick68a9a432014-06-06 14:37:21 -0700524static void *m_start(struct seq_file *seq, loff_t *ppos,
525 struct uid_gid_map *map)
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800526{
527 struct uid_gid_extent *extent = NULL;
528 loff_t pos = *ppos;
529
530 if (pos < map->nr_extents)
531 extent = &map->extent[pos];
532
533 return extent;
534}
535
536static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
537{
538 struct user_namespace *ns = seq->private;
539
540 return m_start(seq, ppos, &ns->uid_map);
541}
542
543static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
544{
545 struct user_namespace *ns = seq->private;
546
547 return m_start(seq, ppos, &ns->gid_map);
548}
549
Eric W. Biedermanf76d2072012-08-30 01:24:05 -0700550static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
551{
552 struct user_namespace *ns = seq->private;
553
554 return m_start(seq, ppos, &ns->projid_map);
555}
556
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800557static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
558{
559 (*pos)++;
560 return seq->op->start(seq, pos);
561}
562
563static void m_stop(struct seq_file *seq, void *v)
564{
565 return;
566}
567
Fabian Frederickccf94f12014-08-08 14:21:22 -0700568const struct seq_operations proc_uid_seq_operations = {
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800569 .start = uid_m_start,
570 .stop = m_stop,
571 .next = m_next,
572 .show = uid_m_show,
573};
574
Fabian Frederickccf94f12014-08-08 14:21:22 -0700575const struct seq_operations proc_gid_seq_operations = {
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800576 .start = gid_m_start,
577 .stop = m_stop,
578 .next = m_next,
579 .show = gid_m_show,
580};
581
Fabian Frederickccf94f12014-08-08 14:21:22 -0700582const struct seq_operations proc_projid_seq_operations = {
Eric W. Biedermanf76d2072012-08-30 01:24:05 -0700583 .start = projid_m_start,
584 .stop = m_stop,
585 .next = m_next,
586 .show = projid_m_show,
587};
588
Fabian Frederick68a9a432014-06-06 14:37:21 -0700589static bool mappings_overlap(struct uid_gid_map *new_map,
590 struct uid_gid_extent *extent)
Eric W. Biederman0bd14b42012-12-27 22:27:29 -0800591{
592 u32 upper_first, lower_first, upper_last, lower_last;
593 unsigned idx;
594
595 upper_first = extent->first;
596 lower_first = extent->lower_first;
597 upper_last = upper_first + extent->count - 1;
598 lower_last = lower_first + extent->count - 1;
599
600 for (idx = 0; idx < new_map->nr_extents; idx++) {
601 u32 prev_upper_first, prev_lower_first;
602 u32 prev_upper_last, prev_lower_last;
603 struct uid_gid_extent *prev;
604
605 prev = &new_map->extent[idx];
606
607 prev_upper_first = prev->first;
608 prev_lower_first = prev->lower_first;
609 prev_upper_last = prev_upper_first + prev->count - 1;
610 prev_lower_last = prev_lower_first + prev->count - 1;
611
612 /* Does the upper range intersect a previous extent? */
613 if ((prev_upper_first <= upper_last) &&
614 (prev_upper_last >= upper_first))
615 return true;
616
617 /* Does the lower range intersect a previous extent? */
618 if ((prev_lower_first <= lower_last) &&
619 (prev_lower_last >= lower_first))
620 return true;
621 }
622 return false;
623}
624
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800625static ssize_t map_write(struct file *file, const char __user *buf,
626 size_t count, loff_t *ppos,
627 int cap_setid,
628 struct uid_gid_map *map,
629 struct uid_gid_map *parent_map)
630{
631 struct seq_file *seq = file->private_data;
632 struct user_namespace *ns = seq->private;
633 struct uid_gid_map new_map;
634 unsigned idx;
Eric W. Biederman0bd14b42012-12-27 22:27:29 -0800635 struct uid_gid_extent *extent = NULL;
Al Viro70f6cbb2015-12-24 00:13:10 -0500636 char *kbuf = NULL, *pos, *next_line;
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800637 ssize_t ret = -EINVAL;
638
639 /*
Eric W. Biedermanf0d62ae2014-12-09 14:03:14 -0600640 * The userns_state_mutex serializes all writes to any given map.
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800641 *
642 * Any map is only ever written once.
643 *
644 * An id map fits within 1 cache line on most architectures.
645 *
646 * On read nothing needs to be done unless you are on an
647 * architecture with a crazy cache coherency model like alpha.
648 *
649 * There is a one time data dependency between reading the
650 * count of the extents and the values of the extents. The
651 * desired behavior is to see the values of the extents that
652 * were written before the count of the extents.
653 *
654 * To achieve this smp_wmb() is used on guarantee the write
Mikulas Patockae79323b2014-04-14 16:58:55 -0400655 * order and smp_rmb() is guaranteed that we don't have crazy
656 * architectures returning stale data.
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000657 */
Eric W. Biedermanf0d62ae2014-12-09 14:03:14 -0600658 mutex_lock(&userns_state_mutex);
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800659
660 ret = -EPERM;
661 /* Only allow one successful write to the map */
662 if (map->nr_extents != 0)
663 goto out;
664
Andy Lutomirski41c21e32013-04-14 11:44:04 -0700665 /*
666 * Adjusting namespace settings requires capabilities on the target.
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800667 */
Andy Lutomirski41c21e32013-04-14 11:44:04 -0700668 if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800669 goto out;
670
Eric W. Biederman36476be2014-12-05 20:03:28 -0600671 /* Only allow < page size writes at the beginning of the file */
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800672 ret = -EINVAL;
673 if ((*ppos != 0) || (count >= PAGE_SIZE))
674 goto out;
675
676 /* Slurp in the user data */
Al Viro70f6cbb2015-12-24 00:13:10 -0500677 kbuf = memdup_user_nul(buf, count);
678 if (IS_ERR(kbuf)) {
679 ret = PTR_ERR(kbuf);
680 kbuf = NULL;
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800681 goto out;
Al Viro70f6cbb2015-12-24 00:13:10 -0500682 }
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800683
684 /* Parse the user data */
685 ret = -EINVAL;
686 pos = kbuf;
687 new_map.nr_extents = 0;
Fabian Frederick68a9a432014-06-06 14:37:21 -0700688 for (; pos; pos = next_line) {
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800689 extent = &new_map.extent[new_map.nr_extents];
690
691 /* Find the end of line and ensure I don't look past it */
692 next_line = strchr(pos, '\n');
693 if (next_line) {
694 *next_line = '\0';
695 next_line++;
696 if (*next_line == '\0')
697 next_line = NULL;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000698 }
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800699
700 pos = skip_spaces(pos);
701 extent->first = simple_strtoul(pos, &pos, 10);
702 if (!isspace(*pos))
703 goto out;
704
705 pos = skip_spaces(pos);
706 extent->lower_first = simple_strtoul(pos, &pos, 10);
707 if (!isspace(*pos))
708 goto out;
709
710 pos = skip_spaces(pos);
711 extent->count = simple_strtoul(pos, &pos, 10);
712 if (*pos && !isspace(*pos))
713 goto out;
714
715 /* Verify there is not trailing junk on the line */
716 pos = skip_spaces(pos);
717 if (*pos != '\0')
718 goto out;
719
720 /* Verify we have been given valid starting values */
721 if ((extent->first == (u32) -1) ||
Fabian Frederick68a9a432014-06-06 14:37:21 -0700722 (extent->lower_first == (u32) -1))
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800723 goto out;
724
Fabian Frederick68a9a432014-06-06 14:37:21 -0700725 /* Verify count is not zero and does not cause the
726 * extent to wrap
727 */
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800728 if ((extent->first + extent->count) <= extent->first)
729 goto out;
Fabian Frederick68a9a432014-06-06 14:37:21 -0700730 if ((extent->lower_first + extent->count) <=
731 extent->lower_first)
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800732 goto out;
733
Eric W. Biederman0bd14b42012-12-27 22:27:29 -0800734 /* Do the ranges in extent overlap any previous extents? */
735 if (mappings_overlap(&new_map, extent))
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800736 goto out;
737
738 new_map.nr_extents++;
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800739
740 /* Fail if the file contains too many extents */
741 if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) &&
742 (next_line != NULL))
743 goto out;
744 }
745 /* Be very certaint the new map actually exists */
746 if (new_map.nr_extents == 0)
747 goto out;
748
749 ret = -EPERM;
750 /* Validate the user is allowed to use user id's mapped to. */
Eric W. Biederman67080752013-04-14 13:47:02 -0700751 if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800752 goto out;
753
754 /* Map the lower ids from the parent user namespace to the
755 * kernel global id space.
756 */
757 for (idx = 0; idx < new_map.nr_extents; idx++) {
758 u32 lower_first;
759 extent = &new_map.extent[idx];
760
761 lower_first = map_id_range_down(parent_map,
762 extent->lower_first,
763 extent->count);
764
765 /* Fail if we can not map the specified extent to
766 * the kernel global id space.
767 */
768 if (lower_first == (u32) -1)
769 goto out;
770
771 extent->lower_first = lower_first;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000772 }
773
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800774 /* Install the map */
775 memcpy(map->extent, new_map.extent,
776 new_map.nr_extents*sizeof(new_map.extent[0]));
777 smp_wmb();
778 map->nr_extents = new_map.nr_extents;
779
780 *ppos = count;
781 ret = count;
782out:
Eric W. Biedermanf0d62ae2014-12-09 14:03:14 -0600783 mutex_unlock(&userns_state_mutex);
Al Viro70f6cbb2015-12-24 00:13:10 -0500784 kfree(kbuf);
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800785 return ret;
786}
787
Fabian Frederick68a9a432014-06-06 14:37:21 -0700788ssize_t proc_uid_map_write(struct file *file, const char __user *buf,
789 size_t size, loff_t *ppos)
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800790{
791 struct seq_file *seq = file->private_data;
792 struct user_namespace *ns = seq->private;
Eric W. Biedermanc450f372012-08-14 21:25:13 -0700793 struct user_namespace *seq_ns = seq_user_ns(seq);
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800794
795 if (!ns->parent)
796 return -EPERM;
797
Eric W. Biedermanc450f372012-08-14 21:25:13 -0700798 if ((seq_ns != ns) && (seq_ns != ns->parent))
799 return -EPERM;
800
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800801 return map_write(file, buf, size, ppos, CAP_SETUID,
802 &ns->uid_map, &ns->parent->uid_map);
803}
804
Fabian Frederick68a9a432014-06-06 14:37:21 -0700805ssize_t proc_gid_map_write(struct file *file, const char __user *buf,
806 size_t size, loff_t *ppos)
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800807{
808 struct seq_file *seq = file->private_data;
809 struct user_namespace *ns = seq->private;
Eric W. Biedermanc450f372012-08-14 21:25:13 -0700810 struct user_namespace *seq_ns = seq_user_ns(seq);
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800811
812 if (!ns->parent)
813 return -EPERM;
814
Eric W. Biedermanc450f372012-08-14 21:25:13 -0700815 if ((seq_ns != ns) && (seq_ns != ns->parent))
816 return -EPERM;
817
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800818 return map_write(file, buf, size, ppos, CAP_SETGID,
819 &ns->gid_map, &ns->parent->gid_map);
820}
821
Fabian Frederick68a9a432014-06-06 14:37:21 -0700822ssize_t proc_projid_map_write(struct file *file, const char __user *buf,
823 size_t size, loff_t *ppos)
Eric W. Biedermanf76d2072012-08-30 01:24:05 -0700824{
825 struct seq_file *seq = file->private_data;
826 struct user_namespace *ns = seq->private;
827 struct user_namespace *seq_ns = seq_user_ns(seq);
828
829 if (!ns->parent)
830 return -EPERM;
831
832 if ((seq_ns != ns) && (seq_ns != ns->parent))
833 return -EPERM;
834
835 /* Anyone can set any valid project id no capability needed */
836 return map_write(file, buf, size, ppos, -1,
837 &ns->projid_map, &ns->parent->projid_map);
838}
839
Fabian Frederick68a9a432014-06-06 14:37:21 -0700840static bool new_idmap_permitted(const struct file *file,
Eric W. Biederman67080752013-04-14 13:47:02 -0700841 struct user_namespace *ns, int cap_setid,
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800842 struct uid_gid_map *new_map)
843{
Eric W. Biedermanf95d7912014-11-26 23:22:14 -0600844 const struct cred *cred = file->f_cred;
Eric W. Biederman0542f172014-12-05 17:51:47 -0600845 /* Don't allow mappings that would allow anything that wouldn't
846 * be allowed without the establishment of unprivileged mappings.
847 */
Eric W. Biedermanf95d7912014-11-26 23:22:14 -0600848 if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
849 uid_eq(ns->owner, cred->euid)) {
Eric W. Biederman37657da2012-07-27 06:21:27 -0700850 u32 id = new_map->extent[0].lower_first;
851 if (cap_setid == CAP_SETUID) {
852 kuid_t uid = make_kuid(ns->parent, id);
Eric W. Biedermanf95d7912014-11-26 23:22:14 -0600853 if (uid_eq(uid, cred->euid))
Eric W. Biederman37657da2012-07-27 06:21:27 -0700854 return true;
Fabian Frederick68a9a432014-06-06 14:37:21 -0700855 } else if (cap_setid == CAP_SETGID) {
Eric W. Biederman37657da2012-07-27 06:21:27 -0700856 kgid_t gid = make_kgid(ns->parent, id);
Eric W. Biederman66d2f332014-12-05 19:36:04 -0600857 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
858 gid_eq(gid, cred->egid))
Eric W. Biederman37657da2012-07-27 06:21:27 -0700859 return true;
860 }
861 }
862
Eric W. Biedermanf76d2072012-08-30 01:24:05 -0700863 /* Allow anyone to set a mapping that doesn't require privilege */
864 if (!cap_valid(cap_setid))
865 return true;
866
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800867 /* Allow the specified ids if we have the appropriate capability
868 * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
Eric W. Biederman67080752013-04-14 13:47:02 -0700869 * And the opener of the id file also had the approprpiate capability.
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800870 */
Eric W. Biederman67080752013-04-14 13:47:02 -0700871 if (ns_capable(ns->parent, cap_setid) &&
872 file_ns_capable(file, ns->parent, cap_setid))
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800873 return true;
874
875 return false;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000876}
Pavel Emelyanov61642812011-01-12 17:00:46 -0800877
Eric W. Biederman9cc46512014-12-02 12:27:26 -0600878int proc_setgroups_show(struct seq_file *seq, void *v)
879{
880 struct user_namespace *ns = seq->private;
881 unsigned long userns_flags = ACCESS_ONCE(ns->flags);
882
883 seq_printf(seq, "%s\n",
884 (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
885 "allow" : "deny");
886 return 0;
887}
888
889ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
890 size_t count, loff_t *ppos)
891{
892 struct seq_file *seq = file->private_data;
893 struct user_namespace *ns = seq->private;
894 char kbuf[8], *pos;
895 bool setgroups_allowed;
896 ssize_t ret;
897
898 /* Only allow a very narrow range of strings to be written */
899 ret = -EINVAL;
900 if ((*ppos != 0) || (count >= sizeof(kbuf)))
901 goto out;
902
903 /* What was written? */
904 ret = -EFAULT;
905 if (copy_from_user(kbuf, buf, count))
906 goto out;
907 kbuf[count] = '\0';
908 pos = kbuf;
909
910 /* What is being requested? */
911 ret = -EINVAL;
912 if (strncmp(pos, "allow", 5) == 0) {
913 pos += 5;
914 setgroups_allowed = true;
915 }
916 else if (strncmp(pos, "deny", 4) == 0) {
917 pos += 4;
918 setgroups_allowed = false;
919 }
920 else
921 goto out;
922
923 /* Verify there is not trailing junk on the line */
924 pos = skip_spaces(pos);
925 if (*pos != '\0')
926 goto out;
927
928 ret = -EPERM;
929 mutex_lock(&userns_state_mutex);
930 if (setgroups_allowed) {
931 /* Enabling setgroups after setgroups has been disabled
932 * is not allowed.
933 */
934 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
935 goto out_unlock;
936 } else {
937 /* Permanently disabling setgroups after setgroups has
938 * been enabled by writing the gid_map is not allowed.
939 */
940 if (ns->gid_map.nr_extents != 0)
941 goto out_unlock;
942 ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
943 }
944 mutex_unlock(&userns_state_mutex);
945
946 /* Report a successful write */
947 *ppos = count;
948 ret = count;
949out:
950 return ret;
951out_unlock:
952 mutex_unlock(&userns_state_mutex);
953 goto out;
954}
955
Eric W. Biederman273d2c62014-12-05 18:01:11 -0600956bool userns_may_setgroups(const struct user_namespace *ns)
957{
958 bool allowed;
959
Eric W. Biedermanf0d62ae2014-12-09 14:03:14 -0600960 mutex_lock(&userns_state_mutex);
Eric W. Biederman273d2c62014-12-05 18:01:11 -0600961 /* It is not safe to use setgroups until a gid mapping in
962 * the user namespace has been established.
963 */
964 allowed = ns->gid_map.nr_extents != 0;
Eric W. Biederman9cc46512014-12-02 12:27:26 -0600965 /* Is setgroups allowed? */
966 allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
Eric W. Biedermanf0d62ae2014-12-09 14:03:14 -0600967 mutex_unlock(&userns_state_mutex);
Eric W. Biederman273d2c62014-12-05 18:01:11 -0600968
969 return allowed;
970}
971
Seth Forsheed07b8462015-09-23 15:16:04 -0500972/*
973 * Returns true if @ns is the same namespace as or a descendant of
974 * @target_ns.
975 */
976bool current_in_userns(const struct user_namespace *target_ns)
977{
978 struct user_namespace *ns;
979 for (ns = current_user_ns(); ns; ns = ns->parent) {
980 if (ns == target_ns)
981 return true;
982 }
983 return false;
984}
985
Al Viro3c041182014-11-01 00:25:30 -0400986static inline struct user_namespace *to_user_ns(struct ns_common *ns)
987{
988 return container_of(ns, struct user_namespace, ns);
989}
990
Al Viro64964522014-11-01 00:37:32 -0400991static struct ns_common *userns_get(struct task_struct *task)
Eric W. Biedermancde19752012-07-26 06:24:06 -0700992{
993 struct user_namespace *user_ns;
994
995 rcu_read_lock();
996 user_ns = get_user_ns(__task_cred(task)->user_ns);
997 rcu_read_unlock();
998
Al Viro3c041182014-11-01 00:25:30 -0400999 return user_ns ? &user_ns->ns : NULL;
Eric W. Biedermancde19752012-07-26 06:24:06 -07001000}
1001
Al Viro64964522014-11-01 00:37:32 -04001002static void userns_put(struct ns_common *ns)
Eric W. Biedermancde19752012-07-26 06:24:06 -07001003{
Al Viro3c041182014-11-01 00:25:30 -04001004 put_user_ns(to_user_ns(ns));
Eric W. Biedermancde19752012-07-26 06:24:06 -07001005}
1006
Al Viro64964522014-11-01 00:37:32 -04001007static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
Eric W. Biedermancde19752012-07-26 06:24:06 -07001008{
Al Viro3c041182014-11-01 00:25:30 -04001009 struct user_namespace *user_ns = to_user_ns(ns);
Eric W. Biedermancde19752012-07-26 06:24:06 -07001010 struct cred *cred;
1011
1012 /* Don't allow gaining capabilities by reentering
1013 * the same user namespace.
1014 */
1015 if (user_ns == current_user_ns())
1016 return -EINVAL;
1017
Eric W. Biedermanfaf00da2015-08-10 18:25:44 -05001018 /* Tasks that share a thread group must share a user namespace */
1019 if (!thread_group_empty(current))
Eric W. Biedermancde19752012-07-26 06:24:06 -07001020 return -EINVAL;
1021
Eric W. Biedermane66eded2013-03-13 11:51:49 -07001022 if (current->fs->users != 1)
1023 return -EINVAL;
1024
Eric W. Biedermancde19752012-07-26 06:24:06 -07001025 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
1026 return -EPERM;
1027
1028 cred = prepare_creds();
1029 if (!cred)
1030 return -ENOMEM;
1031
1032 put_user_ns(cred->user_ns);
1033 set_cred_user_ns(cred, get_user_ns(user_ns));
1034
1035 return commit_creds(cred);
1036}
1037
1038const struct proc_ns_operations userns_operations = {
1039 .name = "user",
1040 .type = CLONE_NEWUSER,
1041 .get = userns_get,
1042 .put = userns_put,
1043 .install = userns_install,
1044};
1045
Pavel Emelyanov61642812011-01-12 17:00:46 -08001046static __init int user_namespaces_init(void)
1047{
1048 user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
1049 return 0;
1050}
Paul Gortmakerc96d6662014-04-03 14:48:35 -07001051subsys_initcall(user_namespaces_init);