Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Generic pidhash and scalable, time-bounded PID allocator |
| 3 | * |
| 4 | * (C) 2002-2003 William Irwin, IBM |
| 5 | * (C) 2004 William Irwin, Oracle |
| 6 | * (C) 2002-2004 Ingo Molnar, Red Hat |
| 7 | * |
| 8 | * pid-structures are backing objects for tasks sharing a given ID to chain |
| 9 | * against. There is very little to them aside from hashing them and |
| 10 | * parking tasks using given ID's on a list. |
| 11 | * |
| 12 | * The hash is always changed with the tasklist_lock write-acquired, |
| 13 | * and the hash is only accessed with the tasklist_lock at least |
| 14 | * read-acquired, so there's no additional SMP locking needed here. |
| 15 | * |
| 16 | * We have a list of bitmap pages, which bitmaps represent the PID space. |
| 17 | * Allocating and freeing PIDs is completely lockless. The worst-case |
| 18 | * allocation scenario when all but one out of 1 million PIDs possible are |
| 19 | * allocated already: the scanning of 32 list entries and at most PAGE_SIZE |
| 20 | * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). |
| 21 | */ |
| 22 | |
| 23 | #include <linux/mm.h> |
| 24 | #include <linux/module.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/init.h> |
| 27 | #include <linux/bootmem.h> |
| 28 | #include <linux/hash.h> |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 29 | #include <linux/pid_namespace.h> |
Sukadev Bhattiprolu | 820e45d | 2007-05-10 22:23:00 -0700 | [diff] [blame] | 30 | #include <linux/init_task.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 32 | #define pid_hashfn(nr, ns) \ |
| 33 | hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 34 | static struct hlist_head *pid_hash; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | static int pidhash_shift; |
Sukadev Bhattiprolu | 820e45d | 2007-05-10 22:23:00 -0700 | [diff] [blame] | 36 | struct pid init_struct_pid = INIT_STRUCT_PID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
| 38 | int pid_max = PID_MAX_DEFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
| 40 | #define RESERVED_PIDS 300 |
| 41 | |
| 42 | int pid_max_min = RESERVED_PIDS + 1; |
| 43 | int pid_max_max = PID_MAX_LIMIT; |
| 44 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | #define BITS_PER_PAGE (PAGE_SIZE*8) |
| 46 | #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame] | 47 | |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 48 | static inline int mk_pid(struct pid_namespace *pid_ns, |
| 49 | struct pidmap *map, int off) |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame] | 50 | { |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 51 | return (map - pid_ns->pidmap)*BITS_PER_PAGE + off; |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame] | 52 | } |
| 53 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #define find_next_offset(map, off) \ |
| 55 | find_next_zero_bit((map)->page, BITS_PER_PAGE, off) |
| 56 | |
| 57 | /* |
| 58 | * PID-map pages start out as NULL, they get allocated upon |
| 59 | * first use and are never deallocated. This way a low pid_max |
| 60 | * value does not cause lots of bitmaps to be allocated, but |
| 61 | * the scheme scales to up to 4 million PIDs, runtime. |
| 62 | */ |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 63 | struct pid_namespace init_pid_ns = { |
Cedric Le Goater | 9a575a9 | 2006-12-08 02:37:59 -0800 | [diff] [blame] | 64 | .kref = { |
| 65 | .refcount = ATOMIC_INIT(2), |
| 66 | }, |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame] | 67 | .pidmap = { |
| 68 | [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } |
| 69 | }, |
Sukadev Bhattiprolu | 84d7378 | 2006-12-08 02:38:01 -0800 | [diff] [blame] | 70 | .last_pid = 0, |
Pavel Emelyanov | faacbfd | 2007-10-18 23:40:04 -0700 | [diff] [blame] | 71 | .level = 0, |
| 72 | .child_reaper = &init_task, |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame] | 73 | }; |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 74 | EXPORT_SYMBOL_GPL(init_pid_ns); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | |
Pavel Emelyanov | b461cc0 | 2007-10-18 23:40:09 -0700 | [diff] [blame^] | 76 | int is_container_init(struct task_struct *tsk) |
Serge E. Hallyn | b460cbc | 2007-10-18 23:39:52 -0700 | [diff] [blame] | 77 | { |
Pavel Emelyanov | b461cc0 | 2007-10-18 23:40:09 -0700 | [diff] [blame^] | 78 | int ret = 0; |
| 79 | struct pid *pid; |
| 80 | |
| 81 | rcu_read_lock(); |
| 82 | pid = task_pid(tsk); |
| 83 | if (pid != NULL && pid->numbers[pid->level].nr == 1) |
| 84 | ret = 1; |
| 85 | rcu_read_unlock(); |
| 86 | |
| 87 | return ret; |
Serge E. Hallyn | b460cbc | 2007-10-18 23:39:52 -0700 | [diff] [blame] | 88 | } |
Pavel Emelyanov | b461cc0 | 2007-10-18 23:40:09 -0700 | [diff] [blame^] | 89 | EXPORT_SYMBOL(is_container_init); |
Serge E. Hallyn | b460cbc | 2007-10-18 23:39:52 -0700 | [diff] [blame] | 90 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 91 | /* |
| 92 | * Note: disable interrupts while the pidmap_lock is held as an |
| 93 | * interrupt might come in and do read_lock(&tasklist_lock). |
| 94 | * |
| 95 | * If we don't disable interrupts there is a nasty deadlock between |
| 96 | * detach_pid()->free_pid() and another cpu that does |
| 97 | * spin_lock(&pidmap_lock) followed by an interrupt routine that does |
| 98 | * read_lock(&tasklist_lock); |
| 99 | * |
| 100 | * After we clean up the tasklist_lock and know there are no |
| 101 | * irq handlers that take it we can leave the interrupts enabled. |
| 102 | * For now it is easier to be safe than to prove it can't happen. |
| 103 | */ |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame] | 104 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); |
| 106 | |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 107 | static fastcall void free_pidmap(struct pid_namespace *pid_ns, int pid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | { |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 109 | struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | int offset = pid & BITS_PER_PAGE_MASK; |
| 111 | |
| 112 | clear_bit(offset, map->page); |
| 113 | atomic_inc(&map->nr_free); |
| 114 | } |
| 115 | |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 116 | static int alloc_pidmap(struct pid_namespace *pid_ns) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | { |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 118 | int i, offset, max_scan, pid, last = pid_ns->last_pid; |
Sukadev Bhattiprolu | 6a1f3b8 | 2006-10-02 02:17:20 -0700 | [diff] [blame] | 119 | struct pidmap *map; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | |
| 121 | pid = last + 1; |
| 122 | if (pid >= pid_max) |
| 123 | pid = RESERVED_PIDS; |
| 124 | offset = pid & BITS_PER_PAGE_MASK; |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 125 | map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset; |
| 127 | for (i = 0; i <= max_scan; ++i) { |
| 128 | if (unlikely(!map->page)) { |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame] | 129 | void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | /* |
| 131 | * Free the page if someone raced with us |
| 132 | * installing it: |
| 133 | */ |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 134 | spin_lock_irq(&pidmap_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | if (map->page) |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame] | 136 | kfree(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | else |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame] | 138 | map->page = page; |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 139 | spin_unlock_irq(&pidmap_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | if (unlikely(!map->page)) |
| 141 | break; |
| 142 | } |
| 143 | if (likely(atomic_read(&map->nr_free))) { |
| 144 | do { |
| 145 | if (!test_and_set_bit(offset, map->page)) { |
| 146 | atomic_dec(&map->nr_free); |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 147 | pid_ns->last_pid = pid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | return pid; |
| 149 | } |
| 150 | offset = find_next_offset(map, offset); |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 151 | pid = mk_pid(pid_ns, map, offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | /* |
| 153 | * find_next_offset() found a bit, the pid from it |
| 154 | * is in-bounds, and if we fell back to the last |
| 155 | * bitmap block and the final block was the same |
| 156 | * as the starting point, pid is before last_pid. |
| 157 | */ |
| 158 | } while (offset < BITS_PER_PAGE && pid < pid_max && |
| 159 | (i != max_scan || pid < last || |
| 160 | !((last+1) & BITS_PER_PAGE_MASK))); |
| 161 | } |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 162 | if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | ++map; |
| 164 | offset = 0; |
| 165 | } else { |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 166 | map = &pid_ns->pidmap[0]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | offset = RESERVED_PIDS; |
| 168 | if (unlikely(last == offset)) |
| 169 | break; |
| 170 | } |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 171 | pid = mk_pid(pid_ns, map, offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | } |
| 173 | return -1; |
| 174 | } |
| 175 | |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 176 | static int next_pidmap(struct pid_namespace *pid_ns, int last) |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 177 | { |
| 178 | int offset; |
Eric W. Biederman | f40f50d | 2006-10-02 02:17:25 -0700 | [diff] [blame] | 179 | struct pidmap *map, *end; |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 180 | |
| 181 | offset = (last + 1) & BITS_PER_PAGE_MASK; |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 182 | map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; |
| 183 | end = &pid_ns->pidmap[PIDMAP_ENTRIES]; |
Eric W. Biederman | f40f50d | 2006-10-02 02:17:25 -0700 | [diff] [blame] | 184 | for (; map < end; map++, offset = 0) { |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 185 | if (unlikely(!map->page)) |
| 186 | continue; |
| 187 | offset = find_next_bit((map)->page, BITS_PER_PAGE, offset); |
| 188 | if (offset < BITS_PER_PAGE) |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 189 | return mk_pid(pid_ns, map, offset); |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 190 | } |
| 191 | return -1; |
| 192 | } |
| 193 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 194 | fastcall void put_pid(struct pid *pid) |
| 195 | { |
Pavel Emelianov | baf8f0f | 2007-10-18 23:39:48 -0700 | [diff] [blame] | 196 | struct pid_namespace *ns; |
| 197 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 198 | if (!pid) |
| 199 | return; |
Pavel Emelianov | baf8f0f | 2007-10-18 23:39:48 -0700 | [diff] [blame] | 200 | |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 201 | ns = pid->numbers[pid->level].ns; |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 202 | if ((atomic_read(&pid->count) == 1) || |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 203 | atomic_dec_and_test(&pid->count)) { |
Pavel Emelianov | baf8f0f | 2007-10-18 23:39:48 -0700 | [diff] [blame] | 204 | kmem_cache_free(ns->pid_cachep, pid); |
Pavel Emelyanov | b461cc0 | 2007-10-18 23:40:09 -0700 | [diff] [blame^] | 205 | put_pid_ns(ns); |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 206 | } |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 207 | } |
Eric W. Biederman | bbf7314 | 2006-10-02 02:17:11 -0700 | [diff] [blame] | 208 | EXPORT_SYMBOL_GPL(put_pid); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 209 | |
| 210 | static void delayed_put_pid(struct rcu_head *rhp) |
| 211 | { |
| 212 | struct pid *pid = container_of(rhp, struct pid, rcu); |
| 213 | put_pid(pid); |
| 214 | } |
| 215 | |
| 216 | fastcall void free_pid(struct pid *pid) |
| 217 | { |
| 218 | /* We can be called with write_lock_irq(&tasklist_lock) held */ |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 219 | int i; |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 220 | unsigned long flags; |
| 221 | |
| 222 | spin_lock_irqsave(&pidmap_lock, flags); |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 223 | for (i = 0; i <= pid->level; i++) |
| 224 | hlist_del_rcu(&pid->numbers[i].pid_chain); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 225 | spin_unlock_irqrestore(&pidmap_lock, flags); |
| 226 | |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 227 | for (i = 0; i <= pid->level; i++) |
| 228 | free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr); |
| 229 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 230 | call_rcu(&pid->rcu, delayed_put_pid); |
| 231 | } |
| 232 | |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 233 | struct pid *alloc_pid(struct pid_namespace *ns) |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 234 | { |
| 235 | struct pid *pid; |
| 236 | enum pid_type type; |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 237 | int i, nr; |
| 238 | struct pid_namespace *tmp; |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 239 | struct upid *upid; |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 240 | |
Pavel Emelianov | baf8f0f | 2007-10-18 23:39:48 -0700 | [diff] [blame] | 241 | pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 242 | if (!pid) |
| 243 | goto out; |
| 244 | |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 245 | tmp = ns; |
| 246 | for (i = ns->level; i >= 0; i--) { |
| 247 | nr = alloc_pidmap(tmp); |
| 248 | if (nr < 0) |
| 249 | goto out_free; |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 250 | |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 251 | pid->numbers[i].nr = nr; |
| 252 | pid->numbers[i].ns = tmp; |
| 253 | tmp = tmp->parent; |
| 254 | } |
| 255 | |
Pavel Emelyanov | b461cc0 | 2007-10-18 23:40:09 -0700 | [diff] [blame^] | 256 | get_pid_ns(ns); |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 257 | pid->level = ns->level; |
| 258 | pid->nr = pid->numbers[0].nr; |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 259 | atomic_set(&pid->count, 1); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 260 | for (type = 0; type < PIDTYPE_MAX; ++type) |
| 261 | INIT_HLIST_HEAD(&pid->tasks[type]); |
| 262 | |
| 263 | spin_lock_irq(&pidmap_lock); |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 264 | for (i = ns->level; i >= 0; i--) { |
| 265 | upid = &pid->numbers[i]; |
| 266 | hlist_add_head_rcu(&upid->pid_chain, |
| 267 | &pid_hash[pid_hashfn(upid->nr, upid->ns)]); |
| 268 | } |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 269 | spin_unlock_irq(&pidmap_lock); |
| 270 | |
| 271 | out: |
| 272 | return pid; |
| 273 | |
| 274 | out_free: |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 275 | for (i++; i <= ns->level; i++) |
| 276 | free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr); |
| 277 | |
Pavel Emelianov | baf8f0f | 2007-10-18 23:39:48 -0700 | [diff] [blame] | 278 | kmem_cache_free(ns->pid_cachep, pid); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 279 | pid = NULL; |
| 280 | goto out; |
| 281 | } |
| 282 | |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 283 | struct pid * fastcall find_pid_ns(int nr, struct pid_namespace *ns) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | { |
| 285 | struct hlist_node *elem; |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 286 | struct upid *pnr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 288 | hlist_for_each_entry_rcu(pnr, elem, |
| 289 | &pid_hash[pid_hashfn(nr, ns)], pid_chain) |
| 290 | if (pnr->nr == nr && pnr->ns == ns) |
| 291 | return container_of(pnr, struct pid, |
| 292 | numbers[ns->level]); |
| 293 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | return NULL; |
| 295 | } |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 296 | EXPORT_SYMBOL_GPL(find_pid_ns); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | |
Sukadev Bhattiprolu | e713d0d | 2007-05-10 22:22:58 -0700 | [diff] [blame] | 298 | /* |
| 299 | * attach_pid() must be called with the tasklist_lock write-held. |
| 300 | */ |
| 301 | int fastcall attach_pid(struct task_struct *task, enum pid_type type, |
| 302 | struct pid *pid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | { |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 304 | struct pid_link *link; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 306 | link = &task->pids[type]; |
Sukadev Bhattiprolu | e713d0d | 2007-05-10 22:22:58 -0700 | [diff] [blame] | 307 | link->pid = pid; |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 308 | hlist_add_head_rcu(&link->node, &pid->tasks[type]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | |
| 310 | return 0; |
| 311 | } |
| 312 | |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 313 | void fastcall detach_pid(struct task_struct *task, enum pid_type type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | { |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 315 | struct pid_link *link; |
| 316 | struct pid *pid; |
| 317 | int tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 319 | link = &task->pids[type]; |
| 320 | pid = link->pid; |
| 321 | |
| 322 | hlist_del_rcu(&link->node); |
| 323 | link->pid = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | |
| 325 | for (tmp = PIDTYPE_MAX; --tmp >= 0; ) |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 326 | if (!hlist_empty(&pid->tasks[tmp])) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | return; |
| 328 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 329 | free_pid(pid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | } |
| 331 | |
Eric W. Biederman | c18258c | 2006-09-27 01:51:06 -0700 | [diff] [blame] | 332 | /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ |
| 333 | void fastcall transfer_pid(struct task_struct *old, struct task_struct *new, |
| 334 | enum pid_type type) |
| 335 | { |
| 336 | new->pids[type].pid = old->pids[type].pid; |
| 337 | hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); |
| 338 | old->pids[type].pid = NULL; |
| 339 | } |
| 340 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 341 | struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type) |
| 342 | { |
| 343 | struct task_struct *result = NULL; |
| 344 | if (pid) { |
| 345 | struct hlist_node *first; |
| 346 | first = rcu_dereference(pid->tasks[type].first); |
| 347 | if (first) |
| 348 | result = hlist_entry(first, struct task_struct, pids[(type)].node); |
| 349 | } |
| 350 | return result; |
| 351 | } |
| 352 | |
| 353 | /* |
| 354 | * Must be called under rcu_read_lock() or with tasklist_lock read-held. |
| 355 | */ |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 356 | struct task_struct *find_task_by_pid_type_ns(int type, int nr, |
| 357 | struct pid_namespace *ns) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | { |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 359 | return pid_task(find_pid_ns(nr, ns), type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | } |
| 361 | |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 362 | EXPORT_SYMBOL(find_task_by_pid_type_ns); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | |
Oleg Nesterov | 1a657f78 | 2006-10-02 02:18:59 -0700 | [diff] [blame] | 364 | struct pid *get_task_pid(struct task_struct *task, enum pid_type type) |
| 365 | { |
| 366 | struct pid *pid; |
| 367 | rcu_read_lock(); |
| 368 | pid = get_pid(task->pids[type].pid); |
| 369 | rcu_read_unlock(); |
| 370 | return pid; |
| 371 | } |
| 372 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 373 | struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type) |
| 374 | { |
| 375 | struct task_struct *result; |
| 376 | rcu_read_lock(); |
| 377 | result = pid_task(pid, type); |
| 378 | if (result) |
| 379 | get_task_struct(result); |
| 380 | rcu_read_unlock(); |
| 381 | return result; |
| 382 | } |
| 383 | |
| 384 | struct pid *find_get_pid(pid_t nr) |
| 385 | { |
| 386 | struct pid *pid; |
| 387 | |
| 388 | rcu_read_lock(); |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 389 | pid = get_pid(find_vpid(nr)); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 390 | rcu_read_unlock(); |
| 391 | |
| 392 | return pid; |
| 393 | } |
| 394 | |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 395 | pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) |
| 396 | { |
| 397 | struct upid *upid; |
| 398 | pid_t nr = 0; |
| 399 | |
| 400 | if (pid && ns->level <= pid->level) { |
| 401 | upid = &pid->numbers[ns->level]; |
| 402 | if (upid->ns == ns) |
| 403 | nr = upid->nr; |
| 404 | } |
| 405 | return nr; |
| 406 | } |
| 407 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | /* |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 409 | * Used by proc to find the first pid that is greater then or equal to nr. |
| 410 | * |
| 411 | * If there is a pid at nr this function is exactly the same as find_pid. |
| 412 | */ |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 413 | struct pid *find_ge_pid(int nr, struct pid_namespace *ns) |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 414 | { |
| 415 | struct pid *pid; |
| 416 | |
| 417 | do { |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 418 | pid = find_pid_ns(nr, ns); |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 419 | if (pid) |
| 420 | break; |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 421 | nr = next_pidmap(ns, nr); |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 422 | } while (nr > 0); |
| 423 | |
| 424 | return pid; |
| 425 | } |
Eric W. Biederman | bbf7314 | 2006-10-02 02:17:11 -0700 | [diff] [blame] | 426 | EXPORT_SYMBOL_GPL(find_get_pid); |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 427 | |
Pavel Emelianov | baf8f0f | 2007-10-18 23:39:48 -0700 | [diff] [blame] | 428 | struct pid_cache { |
| 429 | int nr_ids; |
| 430 | char name[16]; |
| 431 | struct kmem_cache *cachep; |
| 432 | struct list_head list; |
| 433 | }; |
| 434 | |
| 435 | static LIST_HEAD(pid_caches_lh); |
| 436 | static DEFINE_MUTEX(pid_caches_mutex); |
| 437 | |
| 438 | /* |
| 439 | * creates the kmem cache to allocate pids from. |
| 440 | * @nr_ids: the number of numerical ids this pid will have to carry |
| 441 | */ |
| 442 | |
| 443 | static struct kmem_cache *create_pid_cachep(int nr_ids) |
| 444 | { |
| 445 | struct pid_cache *pcache; |
| 446 | struct kmem_cache *cachep; |
| 447 | |
| 448 | mutex_lock(&pid_caches_mutex); |
| 449 | list_for_each_entry (pcache, &pid_caches_lh, list) |
| 450 | if (pcache->nr_ids == nr_ids) |
| 451 | goto out; |
| 452 | |
| 453 | pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL); |
| 454 | if (pcache == NULL) |
| 455 | goto err_alloc; |
| 456 | |
| 457 | snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids); |
| 458 | cachep = kmem_cache_create(pcache->name, |
| 459 | /* FIXME add numerical ids here */ |
| 460 | sizeof(struct pid), 0, SLAB_HWCACHE_ALIGN, NULL); |
| 461 | if (cachep == NULL) |
| 462 | goto err_cachep; |
| 463 | |
| 464 | pcache->nr_ids = nr_ids; |
| 465 | pcache->cachep = cachep; |
| 466 | list_add(&pcache->list, &pid_caches_lh); |
| 467 | out: |
| 468 | mutex_unlock(&pid_caches_mutex); |
| 469 | return pcache->cachep; |
| 470 | |
| 471 | err_cachep: |
| 472 | kfree(pcache); |
| 473 | err_alloc: |
| 474 | mutex_unlock(&pid_caches_mutex); |
| 475 | return NULL; |
| 476 | } |
| 477 | |
Eric W. Biederman | 213dd26 | 2007-07-15 23:41:15 -0700 | [diff] [blame] | 478 | struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns) |
Cedric Le Goater | 9a575a9 | 2006-12-08 02:37:59 -0800 | [diff] [blame] | 479 | { |
Badari Pulavarty | e3222c4 | 2007-05-08 00:25:21 -0700 | [diff] [blame] | 480 | BUG_ON(!old_ns); |
Cedric Le Goater | 9a575a9 | 2006-12-08 02:37:59 -0800 | [diff] [blame] | 481 | get_pid_ns(old_ns); |
Badari Pulavarty | e3222c4 | 2007-05-08 00:25:21 -0700 | [diff] [blame] | 482 | return old_ns; |
Cedric Le Goater | 9a575a9 | 2006-12-08 02:37:59 -0800 | [diff] [blame] | 483 | } |
| 484 | |
| 485 | void free_pid_ns(struct kref *kref) |
| 486 | { |
| 487 | struct pid_namespace *ns; |
| 488 | |
| 489 | ns = container_of(kref, struct pid_namespace, kref); |
| 490 | kfree(ns); |
| 491 | } |
| 492 | |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 493 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | * The pid hash table is scaled according to the amount of memory in the |
| 495 | * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or |
| 496 | * more. |
| 497 | */ |
| 498 | void __init pidhash_init(void) |
| 499 | { |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 500 | int i, pidhash_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT); |
| 502 | |
| 503 | pidhash_shift = max(4, fls(megabytes * 4)); |
| 504 | pidhash_shift = min(12, pidhash_shift); |
| 505 | pidhash_size = 1 << pidhash_shift; |
| 506 | |
| 507 | printk("PID hash table entries: %d (order: %d, %Zd bytes)\n", |
| 508 | pidhash_size, pidhash_shift, |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 509 | pidhash_size * sizeof(struct hlist_head)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 511 | pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); |
| 512 | if (!pid_hash) |
| 513 | panic("Could not alloc pidhash!\n"); |
| 514 | for (i = 0; i < pidhash_size; i++) |
| 515 | INIT_HLIST_HEAD(&pid_hash[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | } |
| 517 | |
| 518 | void __init pidmap_init(void) |
| 519 | { |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 520 | init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); |
Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 521 | /* Reserve PID 0. We never call free_pidmap(0) */ |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 522 | set_bit(0, init_pid_ns.pidmap[0].page); |
| 523 | atomic_dec(&init_pid_ns.pidmap[0].nr_free); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 524 | |
Pavel Emelianov | baf8f0f | 2007-10-18 23:39:48 -0700 | [diff] [blame] | 525 | init_pid_ns.pid_cachep = create_pid_cachep(1); |
| 526 | if (init_pid_ns.pid_cachep == NULL) |
| 527 | panic("Can't create pid_1 cachep\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | } |