Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * The "user cache". |
| 3 | * |
| 4 | * (C) Copyright 1991-2000 Linus Torvalds |
| 5 | * |
| 6 | * We have a per-user structure to keep track of how many |
| 7 | * processes, files etc the user has claimed, in order to be |
| 8 | * able to have per-user limits for system resources. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/sched.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <linux/bitops.h> |
| 15 | #include <linux/key.h> |
Ingo Molnar | 4021cb2 | 2006-01-25 15:23:07 +0100 | [diff] [blame] | 16 | #include <linux/interrupt.h> |
Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 17 | #include <linux/module.h> |
| 18 | #include <linux/user_namespace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
Pavel Emelyanov | aee16ce | 2008-02-08 04:18:23 -0800 | [diff] [blame] | 20 | struct user_namespace init_user_ns = { |
| 21 | .kref = { |
| 22 | .refcount = ATOMIC_INIT(2), |
| 23 | }, |
| 24 | .root_user = &root_user, |
| 25 | }; |
| 26 | EXPORT_SYMBOL_GPL(init_user_ns); |
| 27 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | /* |
| 29 | * UID task count cache, to get fast user lookup in "alloc_uid" |
| 30 | * when changing user ID's (ie setuid() and friends). |
| 31 | */ |
| 32 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #define UIDHASH_MASK (UIDHASH_SZ - 1) |
| 34 | #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) |
Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 35 | #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 37 | static struct kmem_cache *uid_cachep; |
Ingo Molnar | 4021cb2 | 2006-01-25 15:23:07 +0100 | [diff] [blame] | 38 | |
| 39 | /* |
| 40 | * The uidhash_lock is mostly taken from process context, but it is |
| 41 | * occasionally also taken from softirq/tasklet context, when |
| 42 | * task-structs get RCU-freed. Hence all locking must be softirq-safe. |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 43 | * But free_uid() is also called with local interrupts disabled, and running |
| 44 | * local_bh_enable() with local interrupts disabled is an error - we'll run |
| 45 | * softirq callbacks, and they can unconditionally enable interrupts, and |
| 46 | * the caller of free_uid() didn't expect that.. |
Ingo Molnar | 4021cb2 | 2006-01-25 15:23:07 +0100 | [diff] [blame] | 47 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | static DEFINE_SPINLOCK(uidhash_lock); |
| 49 | |
| 50 | struct user_struct root_user = { |
| 51 | .__count = ATOMIC_INIT(1), |
| 52 | .processes = ATOMIC_INIT(1), |
| 53 | .files = ATOMIC_INIT(0), |
| 54 | .sigpending = ATOMIC_INIT(0), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | .locked_shm = 0, |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 56 | #ifdef CONFIG_USER_SCHED |
Ingo Molnar | 4cf86d7 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 57 | .tg = &init_task_group, |
Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 58 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | }; |
| 60 | |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 61 | /* |
| 62 | * These routines must be called with the uidhash spinlock held! |
| 63 | */ |
Alexey Dobriyan | 40aeb40 | 2007-10-16 23:30:09 -0700 | [diff] [blame] | 64 | static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 65 | { |
| 66 | hlist_add_head(&up->uidhash_node, hashent); |
| 67 | } |
| 68 | |
Alexey Dobriyan | 40aeb40 | 2007-10-16 23:30:09 -0700 | [diff] [blame] | 69 | static void uid_hash_remove(struct user_struct *up) |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 70 | { |
| 71 | hlist_del_init(&up->uidhash_node); |
| 72 | } |
| 73 | |
Alexey Dobriyan | 40aeb40 | 2007-10-16 23:30:09 -0700 | [diff] [blame] | 74 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 75 | { |
| 76 | struct user_struct *user; |
| 77 | struct hlist_node *h; |
| 78 | |
| 79 | hlist_for_each_entry(user, h, hashent, uidhash_node) { |
| 80 | if (user->uid == uid) { |
| 81 | atomic_inc(&user->__count); |
| 82 | return user; |
| 83 | } |
| 84 | } |
| 85 | |
| 86 | return NULL; |
| 87 | } |
| 88 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 89 | #ifdef CONFIG_USER_SCHED |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 90 | |
Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 91 | static void sched_destroy_user(struct user_struct *up) |
| 92 | { |
| 93 | sched_destroy_group(up->tg); |
| 94 | } |
| 95 | |
| 96 | static int sched_create_user(struct user_struct *up) |
| 97 | { |
| 98 | int rc = 0; |
| 99 | |
Peter Zijlstra | eff766a | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 100 | up->tg = sched_create_group(&root_task_group); |
Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 101 | if (IS_ERR(up->tg)) |
| 102 | rc = -ENOMEM; |
| 103 | |
| 104 | return rc; |
| 105 | } |
| 106 | |
| 107 | static void sched_switch_user(struct task_struct *p) |
| 108 | { |
| 109 | sched_move_task(p); |
| 110 | } |
| 111 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 112 | #else /* CONFIG_USER_SCHED */ |
Dhaval Giani | b1a8c17 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 113 | |
| 114 | static void sched_destroy_user(struct user_struct *up) { } |
| 115 | static int sched_create_user(struct user_struct *up) { return 0; } |
| 116 | static void sched_switch_user(struct task_struct *p) { } |
| 117 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 118 | #endif /* CONFIG_USER_SCHED */ |
Dhaval Giani | b1a8c17 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 119 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 120 | #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS) |
Dhaval Giani | b1a8c17 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 121 | |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 122 | static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */ |
Dhaval Giani | b1a8c17 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 123 | static DEFINE_MUTEX(uids_mutex); |
| 124 | |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 125 | static inline void uids_mutex_lock(void) |
| 126 | { |
| 127 | mutex_lock(&uids_mutex); |
| 128 | } |
| 129 | |
| 130 | static inline void uids_mutex_unlock(void) |
| 131 | { |
| 132 | mutex_unlock(&uids_mutex); |
| 133 | } |
| 134 | |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 135 | /* uid directory attributes */ |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 136 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 137 | static ssize_t cpu_shares_show(struct kobject *kobj, |
| 138 | struct kobj_attribute *attr, |
| 139 | char *buf) |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 140 | { |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 141 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 142 | |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 143 | return sprintf(buf, "%lu\n", sched_group_shares(up->tg)); |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 144 | } |
| 145 | |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 146 | static ssize_t cpu_shares_store(struct kobject *kobj, |
| 147 | struct kobj_attribute *attr, |
| 148 | const char *buf, size_t size) |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 149 | { |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 150 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 151 | unsigned long shares; |
| 152 | int rc; |
| 153 | |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 154 | sscanf(buf, "%lu", &shares); |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 155 | |
| 156 | rc = sched_group_set_shares(up->tg, shares); |
| 157 | |
| 158 | return (rc ? rc : size); |
| 159 | } |
| 160 | |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 161 | static struct kobj_attribute cpu_share_attr = |
| 162 | __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store); |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 163 | #endif |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 164 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 165 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 166 | static ssize_t cpu_rt_runtime_show(struct kobject *kobj, |
| 167 | struct kobj_attribute *attr, |
| 168 | char *buf) |
| 169 | { |
| 170 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
| 171 | |
Peter Zijlstra | af4491e | 2008-08-19 12:33:02 +0200 | [diff] [blame^] | 172 | return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg)); |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 173 | } |
| 174 | |
| 175 | static ssize_t cpu_rt_runtime_store(struct kobject *kobj, |
| 176 | struct kobj_attribute *attr, |
| 177 | const char *buf, size_t size) |
| 178 | { |
| 179 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
| 180 | unsigned long rt_runtime; |
| 181 | int rc; |
| 182 | |
Peter Zijlstra | af4491e | 2008-08-19 12:33:02 +0200 | [diff] [blame^] | 183 | sscanf(buf, "%ld", &rt_runtime); |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 184 | |
| 185 | rc = sched_group_set_rt_runtime(up->tg, rt_runtime); |
| 186 | |
| 187 | return (rc ? rc : size); |
| 188 | } |
| 189 | |
| 190 | static struct kobj_attribute cpu_rt_runtime_attr = |
| 191 | __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 192 | |
| 193 | static ssize_t cpu_rt_period_show(struct kobject *kobj, |
| 194 | struct kobj_attribute *attr, |
| 195 | char *buf) |
| 196 | { |
| 197 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
| 198 | |
| 199 | return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg)); |
| 200 | } |
| 201 | |
| 202 | static ssize_t cpu_rt_period_store(struct kobject *kobj, |
| 203 | struct kobj_attribute *attr, |
| 204 | const char *buf, size_t size) |
| 205 | { |
| 206 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
| 207 | unsigned long rt_period; |
| 208 | int rc; |
| 209 | |
| 210 | sscanf(buf, "%lu", &rt_period); |
| 211 | |
| 212 | rc = sched_group_set_rt_period(up->tg, rt_period); |
| 213 | |
| 214 | return (rc ? rc : size); |
| 215 | } |
| 216 | |
| 217 | static struct kobj_attribute cpu_rt_period_attr = |
| 218 | __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store); |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 219 | #endif |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 220 | |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 221 | /* default attributes per uid directory */ |
| 222 | static struct attribute *uids_attributes[] = { |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 223 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 224 | &cpu_share_attr.attr, |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 225 | #endif |
| 226 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 227 | &cpu_rt_runtime_attr.attr, |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 228 | &cpu_rt_period_attr.attr, |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 229 | #endif |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 230 | NULL |
| 231 | }; |
| 232 | |
| 233 | /* the lifetime of user_struct is not managed by the core (now) */ |
| 234 | static void uids_release(struct kobject *kobj) |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 235 | { |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 236 | return; |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 237 | } |
| 238 | |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 239 | static struct kobj_type uids_ktype = { |
| 240 | .sysfs_ops = &kobj_sysfs_ops, |
| 241 | .default_attrs = uids_attributes, |
| 242 | .release = uids_release, |
| 243 | }; |
| 244 | |
| 245 | /* create /sys/kernel/uids/<uid>/cpu_share file for this user */ |
| 246 | static int uids_user_create(struct user_struct *up) |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 247 | { |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 248 | struct kobject *kobj = &up->kobj; |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 249 | int error; |
| 250 | |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 251 | memset(kobj, 0, sizeof(struct kobject)); |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 252 | kobj->kset = uids_kset; |
Greg Kroah-Hartman | cf15126 | 2007-12-17 23:05:35 -0700 | [diff] [blame] | 253 | error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); |
| 254 | if (error) { |
| 255 | kobject_put(kobj); |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 256 | goto done; |
Greg Kroah-Hartman | cf15126 | 2007-12-17 23:05:35 -0700 | [diff] [blame] | 257 | } |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 258 | |
Srivatsa Vaddagiri | fb7dde3 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 259 | kobject_uevent(kobj, KOBJ_ADD); |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 260 | done: |
| 261 | return error; |
| 262 | } |
| 263 | |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 264 | /* create these entries in sysfs: |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 265 | * "/sys/kernel/uids" directory |
| 266 | * "/sys/kernel/uids/0" directory (for root user) |
| 267 | * "/sys/kernel/uids/0/cpu_share" file (for root user) |
| 268 | */ |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 269 | int __init uids_sysfs_init(void) |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 270 | { |
Greg Kroah-Hartman | 0ff21e4 | 2007-11-06 10:36:58 -0800 | [diff] [blame] | 271 | uids_kset = kset_create_and_add("uids", NULL, kernel_kobj); |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 272 | if (!uids_kset) |
| 273 | return -ENOMEM; |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 274 | |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 275 | return uids_user_create(&root_user); |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 276 | } |
| 277 | |
| 278 | /* work function to remove sysfs directory for a user and free up |
| 279 | * corresponding structures. |
| 280 | */ |
| 281 | static void remove_user_sysfs_dir(struct work_struct *w) |
| 282 | { |
| 283 | struct user_struct *up = container_of(w, struct user_struct, work); |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 284 | unsigned long flags; |
| 285 | int remove_user = 0; |
| 286 | |
| 287 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() |
| 288 | * atomic. |
| 289 | */ |
| 290 | uids_mutex_lock(); |
| 291 | |
| 292 | local_irq_save(flags); |
| 293 | |
| 294 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { |
| 295 | uid_hash_remove(up); |
| 296 | remove_user = 1; |
| 297 | spin_unlock_irqrestore(&uidhash_lock, flags); |
| 298 | } else { |
| 299 | local_irq_restore(flags); |
| 300 | } |
| 301 | |
| 302 | if (!remove_user) |
| 303 | goto done; |
| 304 | |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 305 | kobject_uevent(&up->kobj, KOBJ_REMOVE); |
| 306 | kobject_del(&up->kobj); |
| 307 | kobject_put(&up->kobj); |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 308 | |
| 309 | sched_destroy_user(up); |
| 310 | key_put(up->uid_keyring); |
| 311 | key_put(up->session_keyring); |
| 312 | kmem_cache_free(uid_cachep, up); |
| 313 | |
| 314 | done: |
| 315 | uids_mutex_unlock(); |
| 316 | } |
| 317 | |
| 318 | /* IRQs are disabled and uidhash_lock is held upon function entry. |
| 319 | * IRQ state (as stored in flags) is restored and uidhash_lock released |
| 320 | * upon function exit. |
| 321 | */ |
| 322 | static inline void free_user(struct user_struct *up, unsigned long flags) |
| 323 | { |
| 324 | /* restore back the count */ |
| 325 | atomic_inc(&up->__count); |
| 326 | spin_unlock_irqrestore(&uidhash_lock, flags); |
| 327 | |
| 328 | INIT_WORK(&up->work, remove_user_sysfs_dir); |
| 329 | schedule_work(&up->work); |
| 330 | } |
| 331 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 332 | #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ |
Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 333 | |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 334 | int uids_sysfs_init(void) { return 0; } |
| 335 | static inline int uids_user_create(struct user_struct *up) { return 0; } |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 336 | static inline void uids_mutex_lock(void) { } |
| 337 | static inline void uids_mutex_unlock(void) { } |
| 338 | |
| 339 | /* IRQs are disabled and uidhash_lock is held upon function entry. |
| 340 | * IRQ state (as stored in flags) is restored and uidhash_lock released |
| 341 | * upon function exit. |
| 342 | */ |
| 343 | static inline void free_user(struct user_struct *up, unsigned long flags) |
| 344 | { |
| 345 | uid_hash_remove(up); |
| 346 | spin_unlock_irqrestore(&uidhash_lock, flags); |
| 347 | sched_destroy_user(up); |
| 348 | key_put(up->uid_keyring); |
| 349 | key_put(up->session_keyring); |
| 350 | kmem_cache_free(uid_cachep, up); |
| 351 | } |
Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 352 | |
Dhaval Giani | b1a8c17 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 353 | #endif |
Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 354 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | * Locate the user_struct for the passed UID. If found, take a ref on it. The |
| 357 | * caller must undo that ref with free_uid(). |
| 358 | * |
| 359 | * If the user_struct could not be found, return NULL. |
| 360 | */ |
| 361 | struct user_struct *find_user(uid_t uid) |
| 362 | { |
| 363 | struct user_struct *ret; |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 364 | unsigned long flags; |
Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 365 | struct user_namespace *ns = current->nsproxy->user_ns; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 367 | spin_lock_irqsave(&uidhash_lock, flags); |
Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 368 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 369 | spin_unlock_irqrestore(&uidhash_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | return ret; |
| 371 | } |
| 372 | |
| 373 | void free_uid(struct user_struct *up) |
| 374 | { |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 375 | unsigned long flags; |
| 376 | |
Andrew Morton | 36f5741 | 2006-03-24 03:15:47 -0800 | [diff] [blame] | 377 | if (!up) |
| 378 | return; |
| 379 | |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 380 | local_irq_save(flags); |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 381 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) |
| 382 | free_user(up, flags); |
| 383 | else |
Andrew Morton | 36f5741 | 2006-03-24 03:15:47 -0800 | [diff] [blame] | 384 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | } |
| 386 | |
Andrew Morton | 354a1f4 | 2008-04-30 00:54:54 -0700 | [diff] [blame] | 387 | struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | { |
Pavel Emelyanov | 735de22 | 2007-09-18 22:46:44 -0700 | [diff] [blame] | 389 | struct hlist_head *hashent = uidhashentry(ns, uid); |
Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 390 | struct user_struct *up, *new; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | |
Kay Sievers | eb41d94 | 2007-11-02 13:47:53 +0100 | [diff] [blame] | 392 | /* Make uid_hash_find() + uids_user_create() + uid_hash_insert() |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 393 | * atomic. |
| 394 | */ |
| 395 | uids_mutex_lock(); |
| 396 | |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 397 | spin_lock_irq(&uidhash_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | up = uid_hash_find(uid, hashent); |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 399 | spin_unlock_irq(&uidhash_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | |
| 401 | if (!up) { |
Andrew Morton | 354a1f4 | 2008-04-30 00:54:54 -0700 | [diff] [blame] | 402 | new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); |
Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 403 | if (!new) |
| 404 | goto out_unlock; |
Pavel Emelyanov | 5e8869b | 2007-11-26 21:21:49 +0100 | [diff] [blame] | 405 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | new->uid = uid; |
| 407 | atomic_set(&new->__count, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | |
Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 409 | if (sched_create_user(new) < 0) |
David Howells | 69664cf | 2008-04-29 01:01:31 -0700 | [diff] [blame] | 410 | goto out_free_user; |
Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 411 | |
Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 412 | if (uids_user_create(new)) |
| 413 | goto out_destoy_sched; |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 414 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | /* |
| 416 | * Before adding this, check whether we raced |
| 417 | * on adding the same user already.. |
| 418 | */ |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 419 | spin_lock_irq(&uidhash_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | up = uid_hash_find(uid, hashent); |
| 421 | if (up) { |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 422 | /* This case is not possible when CONFIG_USER_SCHED |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 423 | * is defined, since we serialize alloc_uid() using |
| 424 | * uids_mutex. Hence no need to call |
| 425 | * sched_destroy_user() or remove_user_sysfs_dir(). |
| 426 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | key_put(new->uid_keyring); |
| 428 | key_put(new->session_keyring); |
| 429 | kmem_cache_free(uid_cachep, new); |
| 430 | } else { |
| 431 | uid_hash_insert(new, hashent); |
| 432 | up = new; |
| 433 | } |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 434 | spin_unlock_irq(&uidhash_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 435 | |
| 436 | } |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 437 | |
| 438 | uids_mutex_unlock(); |
| 439 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | return up; |
Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 441 | |
| 442 | out_destoy_sched: |
| 443 | sched_destroy_user(new); |
Pavel Emelyanov | 8eb703e | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 444 | out_free_user: |
| 445 | kmem_cache_free(uid_cachep, new); |
| 446 | out_unlock: |
| 447 | uids_mutex_unlock(); |
| 448 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | } |
| 450 | |
| 451 | void switch_uid(struct user_struct *new_user) |
| 452 | { |
| 453 | struct user_struct *old_user; |
| 454 | |
| 455 | /* What if a process setreuid()'s and this brings the |
| 456 | * new uid over his NPROC rlimit? We can check this now |
| 457 | * cheaply with the new uid cache, so if it matters |
| 458 | * we should be checking for it. -DaveM |
| 459 | */ |
| 460 | old_user = current->user; |
| 461 | atomic_inc(&new_user->processes); |
| 462 | atomic_dec(&old_user->processes); |
| 463 | switch_uid_keyring(new_user); |
| 464 | current->user = new_user; |
Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 465 | sched_switch_user(current); |
Linus Torvalds | 45c18b0 | 2006-11-04 10:06:02 -0800 | [diff] [blame] | 466 | |
| 467 | /* |
| 468 | * We need to synchronize with __sigqueue_alloc() |
| 469 | * doing a get_uid(p->user).. If that saw the old |
| 470 | * user value, we need to wait until it has exited |
| 471 | * its critical region before we can free the old |
| 472 | * structure. |
| 473 | */ |
| 474 | smp_mb(); |
| 475 | spin_unlock_wait(¤t->sighand->siglock); |
| 476 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | free_uid(old_user); |
| 478 | suid_keys(current); |
| 479 | } |
| 480 | |
Pavel Emelyanov | aee16ce | 2008-02-08 04:18:23 -0800 | [diff] [blame] | 481 | #ifdef CONFIG_USER_NS |
Pavel Emelyanov | 28f300d | 2007-09-18 22:46:45 -0700 | [diff] [blame] | 482 | void release_uids(struct user_namespace *ns) |
| 483 | { |
| 484 | int i; |
| 485 | unsigned long flags; |
| 486 | struct hlist_head *head; |
| 487 | struct hlist_node *nd; |
| 488 | |
| 489 | spin_lock_irqsave(&uidhash_lock, flags); |
| 490 | /* |
| 491 | * collapse the chains so that the user_struct-s will |
| 492 | * be still alive, but not in hashes. subsequent free_uid() |
| 493 | * will free them. |
| 494 | */ |
| 495 | for (i = 0; i < UIDHASH_SZ; i++) { |
| 496 | head = ns->uidhash_table + i; |
| 497 | while (!hlist_empty(head)) { |
| 498 | nd = head->first; |
| 499 | hlist_del_init(nd); |
| 500 | } |
| 501 | } |
| 502 | spin_unlock_irqrestore(&uidhash_lock, flags); |
| 503 | |
| 504 | free_uid(ns->root_user); |
| 505 | } |
Pavel Emelyanov | aee16ce | 2008-02-08 04:18:23 -0800 | [diff] [blame] | 506 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | |
| 508 | static int __init uid_cache_init(void) |
| 509 | { |
| 510 | int n; |
| 511 | |
| 512 | uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 513 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | |
| 515 | for(n = 0; n < UIDHASH_SZ; ++n) |
Pavel Emelyanov | 735de22 | 2007-09-18 22:46:44 -0700 | [diff] [blame] | 516 | INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | |
| 518 | /* Insert the root user immediately (init already runs as root) */ |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 519 | spin_lock_irq(&uidhash_lock); |
Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 520 | uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); |
Andrew Morton | 3fa97c9 | 2006-01-31 16:34:26 -0800 | [diff] [blame] | 521 | spin_unlock_irq(&uidhash_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 522 | |
| 523 | return 0; |
| 524 | } |
| 525 | |
| 526 | module_init(uid_cache_init); |