Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * The "user cache". |
| 3 | * |
| 4 | * (C) Copyright 1991-2000 Linus Torvalds |
| 5 | * |
| 6 | * We have a per-user structure to keep track of how many |
| 7 | * processes, files etc the user has claimed, in order to be |
| 8 | * able to have per-user limits for system resources. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/sched.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <linux/bitops.h> |
| 15 | #include <linux/key.h> |
Ingo Molnar | 4021cb2 | 2006-01-25 15:23:07 +0100 | [diff] [blame] | 16 | #include <linux/interrupt.h> |
Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 17 | #include <linux/module.h> |
| 18 | #include <linux/user_namespace.h> |
David Howells | d84f4f9 | 2008-11-14 10:39:23 +1100 | [diff] [blame] | 19 | #include "cred-internals.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
Pavel Emelyanov | aee16ce | 2008-02-08 04:18:23 -0800 | [diff] [blame] | 21 | struct user_namespace init_user_ns = { |
| 22 | .kref = { |
Serge E. Hallyn | 1d1e975 | 2009-02-26 18:27:38 -0600 | [diff] [blame] | 23 | .refcount = ATOMIC_INIT(2), |
Pavel Emelyanov | aee16ce | 2008-02-08 04:18:23 -0800 | [diff] [blame] | 24 | }, |
Serge Hallyn | 18b6e04 | 2008-10-15 16:38:45 -0500 | [diff] [blame] | 25 | .creator = &root_user, |
Pavel Emelyanov | aee16ce | 2008-02-08 04:18:23 -0800 | [diff] [blame] | 26 | }; |
| 27 | EXPORT_SYMBOL_GPL(init_user_ns); |
| 28 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | /* |
| 30 | * UID task count cache, to get fast user lookup in "alloc_uid" |
| 31 | * when changing user ID's (ie setuid() and friends). |
| 32 | */ |
| 33 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #define UIDHASH_MASK (UIDHASH_SZ - 1) |
| 35 | #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) |
Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 36 | #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 38 | static struct kmem_cache *uid_cachep; |
Ingo Molnar | 4021cb2 | 2006-01-25 15:23:07 +0100 | [diff] [blame] | 39 | |
| 40 | /* |
| 41 | * The uidhash_lock is mostly taken from process context, but it is |
| 42 | * occasionally also taken from softirq/tasklet context, when |
| 43 | * task-structs get RCU-freed. Hence all locking must be softirq-safe. |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 44 | * But free_uid() is also called with local interrupts disabled, and running |
| 45 | * local_bh_enable() with local interrupts disabled is an error - we'll run |
| 46 | * softirq callbacks, and they can unconditionally enable interrupts, and |
| 47 | * the caller of free_uid() didn't expect that.. |
Ingo Molnar | 4021cb2 | 2006-01-25 15:23:07 +0100 | [diff] [blame] | 48 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | static DEFINE_SPINLOCK(uidhash_lock); |
| 50 | |
Serge Hallyn | 18b6e04 | 2008-10-15 16:38:45 -0500 | [diff] [blame] | 51 | /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | struct user_struct root_user = { |
Serge Hallyn | 18b6e04 | 2008-10-15 16:38:45 -0500 | [diff] [blame] | 53 | .__count = ATOMIC_INIT(2), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | .processes = ATOMIC_INIT(1), |
| 55 | .files = ATOMIC_INIT(0), |
| 56 | .sigpending = ATOMIC_INIT(0), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | .locked_shm = 0, |
Serge Hallyn | 18b6e04 | 2008-10-15 16:38:45 -0500 | [diff] [blame] | 58 | .user_ns = &init_user_ns, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | }; |
| 60 | |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 61 | /* |
| 62 | * These routines must be called with the uidhash spinlock held! |
| 63 | */ |
Alexey Dobriyan | 40aeb40 | 2007-10-16 23:30:09 -0700 | [diff] [blame] | 64 | static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 65 | { |
| 66 | hlist_add_head(&up->uidhash_node, hashent); |
| 67 | } |
| 68 | |
Alexey Dobriyan | 40aeb40 | 2007-10-16 23:30:09 -0700 | [diff] [blame] | 69 | static void uid_hash_remove(struct user_struct *up) |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 70 | { |
| 71 | hlist_del_init(&up->uidhash_node); |
Serge E. Hallyn | fb5ae64 | 2009-02-13 14:04:21 +0000 | [diff] [blame] | 72 | put_user_ns(up->user_ns); |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 73 | } |
| 74 | |
Kay Sievers | 3959214 | 2009-03-24 15:43:30 +0100 | [diff] [blame] | 75 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) |
| 76 | { |
| 77 | struct user_struct *user; |
| 78 | struct hlist_node *h; |
| 79 | |
| 80 | hlist_for_each_entry(user, h, hashent, uidhash_node) { |
| 81 | if (user->uid == uid) { |
| 82 | atomic_inc(&user->__count); |
| 83 | return user; |
| 84 | } |
| 85 | } |
| 86 | |
| 87 | return NULL; |
| 88 | } |
| 89 | |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 90 | /* IRQs are disabled and uidhash_lock is held upon function entry. |
| 91 | * IRQ state (as stored in flags) is restored and uidhash_lock released |
| 92 | * upon function exit. |
| 93 | */ |
Serge Hallyn | 18b6e04 | 2008-10-15 16:38:45 -0500 | [diff] [blame] | 94 | static void free_user(struct user_struct *up, unsigned long flags) |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 95 | { |
| 96 | uid_hash_remove(up); |
| 97 | spin_unlock_irqrestore(&uidhash_lock, flags); |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 98 | key_put(up->uid_keyring); |
| 99 | key_put(up->session_keyring); |
| 100 | kmem_cache_free(uid_cachep, up); |
| 101 | } |
Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 102 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | * Locate the user_struct for the passed UID. If found, take a ref on it. The |
| 105 | * caller must undo that ref with free_uid(). |
| 106 | * |
| 107 | * If the user_struct could not be found, return NULL. |
| 108 | */ |
| 109 | struct user_struct *find_user(uid_t uid) |
| 110 | { |
| 111 | struct user_struct *ret; |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 112 | unsigned long flags; |
Serge Hallyn | 6ded6ab | 2008-11-24 16:24:10 -0500 | [diff] [blame] | 113 | struct user_namespace *ns = current_user_ns(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 115 | spin_lock_irqsave(&uidhash_lock, flags); |
Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 116 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 117 | spin_unlock_irqrestore(&uidhash_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | return ret; |
| 119 | } |
| 120 | |
| 121 | void free_uid(struct user_struct *up) |
| 122 | { |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 123 | unsigned long flags; |
| 124 | |
Andrew Morton | 36f5741 | 2006-03-24 03:15:47 -0800 | [diff] [blame] | 125 | if (!up) |
| 126 | return; |
| 127 | |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 128 | local_irq_save(flags); |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 129 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) |
| 130 | free_user(up, flags); |
| 131 | else |
Andrew Morton | 36f5741 | 2006-03-24 03:15:47 -0800 | [diff] [blame] | 132 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | } |
| 134 | |
Andrew Morton | 354a1f4 | 2008-04-30 00:54:54 -0700 | [diff] [blame] | 135 | struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | { |
Pavel Emelyanov | 735de22 | 2007-09-18 22:46:44 -0700 | [diff] [blame] | 137 | struct hlist_head *hashent = uidhashentry(ns, uid); |
Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 138 | struct user_struct *up, *new; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 140 | /* Make uid_hash_find() + uids_user_create() + uid_hash_insert() |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 141 | * atomic. |
| 142 | */ |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 143 | spin_lock_irq(&uidhash_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | up = uid_hash_find(uid, hashent); |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 145 | spin_unlock_irq(&uidhash_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | |
| 147 | if (!up) { |
Andrew Morton | 354a1f4 | 2008-04-30 00:54:54 -0700 | [diff] [blame] | 148 | new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); |
Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 149 | if (!new) |
| 150 | goto out_unlock; |
Pavel Emelyanov | 5e8869b | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 151 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | new->uid = uid; |
| 153 | atomic_set(&new->__count, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | |
Serge Hallyn | 18b6e04 | 2008-10-15 16:38:45 -0500 | [diff] [blame] | 155 | new->user_ns = get_user_ns(ns); |
| 156 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | /* |
| 158 | * Before adding this, check whether we raced |
| 159 | * on adding the same user already.. |
| 160 | */ |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 161 | spin_lock_irq(&uidhash_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | up = uid_hash_find(uid, hashent); |
| 163 | if (up) { |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 164 | /* This case is not possible when CONFIG_USER_SCHED |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 165 | * is defined, since we serialize alloc_uid() using |
| 166 | * uids_mutex. Hence no need to call |
| 167 | * sched_destroy_user() or remove_user_sysfs_dir(). |
| 168 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | key_put(new->uid_keyring); |
| 170 | key_put(new->session_keyring); |
| 171 | kmem_cache_free(uid_cachep, new); |
| 172 | } else { |
| 173 | uid_hash_insert(new, hashent); |
| 174 | up = new; |
| 175 | } |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 176 | spin_unlock_irq(&uidhash_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | } |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 178 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | return up; |
Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 180 | |
Serge Hallyn | 18b6e04 | 2008-10-15 16:38:45 -0500 | [diff] [blame] | 181 | put_user_ns(new->user_ns); |
Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 182 | kmem_cache_free(uid_cachep, new); |
| 183 | out_unlock: |
Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 184 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | } |
| 186 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | static int __init uid_cache_init(void) |
| 188 | { |
| 189 | int n; |
| 190 | |
| 191 | uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 192 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | |
| 194 | for(n = 0; n < UIDHASH_SZ; ++n) |
Pavel Emelyanov | 735de22 | 2007-09-18 22:46:44 -0700 | [diff] [blame] | 195 | INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | |
| 197 | /* Insert the root user immediately (init already runs as root) */ |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 198 | spin_lock_irq(&uidhash_lock); |
Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 199 | uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 200 | spin_unlock_irq(&uidhash_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | |
| 202 | return 0; |
| 203 | } |
| 204 | |
| 205 | module_init(uid_cache_init); |