Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Generic pidhash and scalable, time-bounded PID allocator |
| 3 | * |
| 4 | * (C) 2002-2003 William Irwin, IBM |
| 5 | * (C) 2004 William Irwin, Oracle |
| 6 | * (C) 2002-2004 Ingo Molnar, Red Hat |
| 7 | * |
| 8 | * pid-structures are backing objects for tasks sharing a given ID to chain |
| 9 | * against. There is very little to them aside from hashing them and |
| 10 | * parking tasks using given ID's on a list. |
| 11 | * |
| 12 | * The hash is always changed with the tasklist_lock write-acquired, |
| 13 | * and the hash is only accessed with the tasklist_lock at least |
| 14 | * read-acquired, so there's no additional SMP locking needed here. |
| 15 | * |
| 16 | * We have a list of bitmap pages, which bitmaps represent the PID space. |
| 17 | * Allocating and freeing PIDs is completely lockless. The worst-case |
| 18 | * allocation scenario when all but one out of 1 million PIDs possible are |
| 19 | * allocated already: the scanning of 32 list entries and at most PAGE_SIZE |
| 20 | * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). |
| 21 | */ |
| 22 | |
| 23 | #include <linux/mm.h> |
| 24 | #include <linux/module.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/init.h> |
| 27 | #include <linux/bootmem.h> |
| 28 | #include <linux/hash.h> |
Sukadev Bhattiprolu | aa5a666 | 2006-10-02 02:17:23 -0700 | [diff] [blame] | 29 | #include <linux/pspace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
| 31 | #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift) |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 32 | static struct hlist_head *pid_hash; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | static int pidhash_shift; |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 34 | static kmem_cache_t *pid_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | |
| 36 | int pid_max = PID_MAX_DEFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
| 38 | #define RESERVED_PIDS 300 |
| 39 | |
| 40 | int pid_max_min = RESERVED_PIDS + 1; |
| 41 | int pid_max_max = PID_MAX_LIMIT; |
| 42 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #define BITS_PER_PAGE (PAGE_SIZE*8) |
| 44 | #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 45 | |
| 46 | static inline int mk_pid(struct pspace *pspace, struct pidmap *map, int off) |
| 47 | { |
| 48 | return (map - pspace->pidmap)*BITS_PER_PAGE + off; |
| 49 | } |
| 50 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #define find_next_offset(map, off) \ |
| 52 | find_next_zero_bit((map)->page, BITS_PER_PAGE, off) |
| 53 | |
| 54 | /* |
| 55 | * PID-map pages start out as NULL, they get allocated upon |
| 56 | * first use and are never deallocated. This way a low pid_max |
| 57 | * value does not cause lots of bitmaps to be allocated, but |
| 58 | * the scheme scales to up to 4 million PIDs, runtime. |
| 59 | */ |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 60 | struct pspace init_pspace = { |
| 61 | .pidmap = { |
| 62 | [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } |
| 63 | }, |
| 64 | .last_pid = 0 |
| 65 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 67 | /* |
| 68 | * Note: disable interrupts while the pidmap_lock is held as an |
| 69 | * interrupt might come in and do read_lock(&tasklist_lock). |
| 70 | * |
| 71 | * If we don't disable interrupts there is a nasty deadlock between |
| 72 | * detach_pid()->free_pid() and another cpu that does |
| 73 | * spin_lock(&pidmap_lock) followed by an interrupt routine that does |
| 74 | * read_lock(&tasklist_lock); |
| 75 | * |
| 76 | * After we clean up the tasklist_lock and know there are no |
| 77 | * irq handlers that take it we can leave the interrupts enabled. |
| 78 | * For now it is easier to be safe than to prove it can't happen. |
| 79 | */ |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 80 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); |
| 82 | |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 83 | static fastcall void free_pidmap(struct pspace *pspace, int pid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | { |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 85 | struct pidmap *map = pspace->pidmap + pid / BITS_PER_PAGE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | int offset = pid & BITS_PER_PAGE_MASK; |
| 87 | |
| 88 | clear_bit(offset, map->page); |
| 89 | atomic_inc(&map->nr_free); |
| 90 | } |
| 91 | |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 92 | static int alloc_pidmap(struct pspace *pspace) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | { |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 94 | int i, offset, max_scan, pid, last = pspace->last_pid; |
Sukadev Bhattiprolu | 6a1f3b8 | 2006-10-02 02:17:20 -0700 | [diff] [blame] | 95 | struct pidmap *map; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | |
| 97 | pid = last + 1; |
| 98 | if (pid >= pid_max) |
| 99 | pid = RESERVED_PIDS; |
| 100 | offset = pid & BITS_PER_PAGE_MASK; |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 101 | map = &pspace->pidmap[pid/BITS_PER_PAGE]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset; |
| 103 | for (i = 0; i <= max_scan; ++i) { |
| 104 | if (unlikely(!map->page)) { |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 105 | void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | /* |
| 107 | * Free the page if someone raced with us |
| 108 | * installing it: |
| 109 | */ |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 110 | spin_lock_irq(&pidmap_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | if (map->page) |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 112 | kfree(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | else |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 114 | map->page = page; |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 115 | spin_unlock_irq(&pidmap_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | if (unlikely(!map->page)) |
| 117 | break; |
| 118 | } |
| 119 | if (likely(atomic_read(&map->nr_free))) { |
| 120 | do { |
| 121 | if (!test_and_set_bit(offset, map->page)) { |
| 122 | atomic_dec(&map->nr_free); |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 123 | pspace->last_pid = pid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | return pid; |
| 125 | } |
| 126 | offset = find_next_offset(map, offset); |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 127 | pid = mk_pid(pspace, map, offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | /* |
| 129 | * find_next_offset() found a bit, the pid from it |
| 130 | * is in-bounds, and if we fell back to the last |
| 131 | * bitmap block and the final block was the same |
| 132 | * as the starting point, pid is before last_pid. |
| 133 | */ |
| 134 | } while (offset < BITS_PER_PAGE && pid < pid_max && |
| 135 | (i != max_scan || pid < last || |
| 136 | !((last+1) & BITS_PER_PAGE_MASK))); |
| 137 | } |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 138 | if (map < &pspace->pidmap[(pid_max-1)/BITS_PER_PAGE]) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | ++map; |
| 140 | offset = 0; |
| 141 | } else { |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 142 | map = &pspace->pidmap[0]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | offset = RESERVED_PIDS; |
| 144 | if (unlikely(last == offset)) |
| 145 | break; |
| 146 | } |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 147 | pid = mk_pid(pspace, map, offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | } |
| 149 | return -1; |
| 150 | } |
| 151 | |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 152 | static int next_pidmap(int last) |
| 153 | { |
| 154 | int offset; |
Eric W. Biederman | c88be3e | 2006-10-02 02:17:21 -0700 | [diff] [blame] | 155 | struct pidmap *map; |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 156 | |
| 157 | offset = (last + 1) & BITS_PER_PAGE_MASK; |
| 158 | map = &pidmap_array[(last + 1)/BITS_PER_PAGE]; |
| 159 | for (; map < &pidmap_array[PIDMAP_ENTRIES]; map++, offset = 0) { |
| 160 | if (unlikely(!map->page)) |
| 161 | continue; |
| 162 | offset = find_next_bit((map)->page, BITS_PER_PAGE, offset); |
| 163 | if (offset < BITS_PER_PAGE) |
| 164 | return mk_pid(map, offset); |
| 165 | } |
| 166 | return -1; |
| 167 | } |
| 168 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 169 | fastcall void put_pid(struct pid *pid) |
| 170 | { |
| 171 | if (!pid) |
| 172 | return; |
| 173 | if ((atomic_read(&pid->count) == 1) || |
| 174 | atomic_dec_and_test(&pid->count)) |
| 175 | kmem_cache_free(pid_cachep, pid); |
| 176 | } |
Eric W. Biederman | bbf7314 | 2006-10-02 02:17:11 -0700 | [diff] [blame] | 177 | EXPORT_SYMBOL_GPL(put_pid); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 178 | |
| 179 | static void delayed_put_pid(struct rcu_head *rhp) |
| 180 | { |
| 181 | struct pid *pid = container_of(rhp, struct pid, rcu); |
| 182 | put_pid(pid); |
| 183 | } |
| 184 | |
| 185 | fastcall void free_pid(struct pid *pid) |
| 186 | { |
| 187 | /* We can be called with write_lock_irq(&tasklist_lock) held */ |
| 188 | unsigned long flags; |
| 189 | |
| 190 | spin_lock_irqsave(&pidmap_lock, flags); |
| 191 | hlist_del_rcu(&pid->pid_chain); |
| 192 | spin_unlock_irqrestore(&pidmap_lock, flags); |
| 193 | |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 194 | free_pidmap(&init_pspace, pid->nr); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 195 | call_rcu(&pid->rcu, delayed_put_pid); |
| 196 | } |
| 197 | |
| 198 | struct pid *alloc_pid(void) |
| 199 | { |
| 200 | struct pid *pid; |
| 201 | enum pid_type type; |
| 202 | int nr = -1; |
| 203 | |
| 204 | pid = kmem_cache_alloc(pid_cachep, GFP_KERNEL); |
| 205 | if (!pid) |
| 206 | goto out; |
| 207 | |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 208 | nr = alloc_pidmap(&init_pspace); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 209 | if (nr < 0) |
| 210 | goto out_free; |
| 211 | |
| 212 | atomic_set(&pid->count, 1); |
| 213 | pid->nr = nr; |
| 214 | for (type = 0; type < PIDTYPE_MAX; ++type) |
| 215 | INIT_HLIST_HEAD(&pid->tasks[type]); |
| 216 | |
| 217 | spin_lock_irq(&pidmap_lock); |
| 218 | hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr)]); |
| 219 | spin_unlock_irq(&pidmap_lock); |
| 220 | |
| 221 | out: |
| 222 | return pid; |
| 223 | |
| 224 | out_free: |
| 225 | kmem_cache_free(pid_cachep, pid); |
| 226 | pid = NULL; |
| 227 | goto out; |
| 228 | } |
| 229 | |
| 230 | struct pid * fastcall find_pid(int nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | { |
| 232 | struct hlist_node *elem; |
| 233 | struct pid *pid; |
| 234 | |
Ingo Molnar | e56d090 | 2006-01-08 01:01:37 -0800 | [diff] [blame] | 235 | hlist_for_each_entry_rcu(pid, elem, |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 236 | &pid_hash[pid_hashfn(nr)], pid_chain) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | if (pid->nr == nr) |
| 238 | return pid; |
| 239 | } |
| 240 | return NULL; |
| 241 | } |
Eric W. Biederman | bbf7314 | 2006-10-02 02:17:11 -0700 | [diff] [blame] | 242 | EXPORT_SYMBOL_GPL(find_pid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 244 | int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | { |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 246 | struct pid_link *link; |
| 247 | struct pid *pid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 249 | link = &task->pids[type]; |
| 250 | link->pid = pid = find_pid(nr); |
| 251 | hlist_add_head_rcu(&link->node, &pid->tasks[type]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | |
| 253 | return 0; |
| 254 | } |
| 255 | |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 256 | void fastcall detach_pid(struct task_struct *task, enum pid_type type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | { |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 258 | struct pid_link *link; |
| 259 | struct pid *pid; |
| 260 | int tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 262 | link = &task->pids[type]; |
| 263 | pid = link->pid; |
| 264 | |
| 265 | hlist_del_rcu(&link->node); |
| 266 | link->pid = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | |
| 268 | for (tmp = PIDTYPE_MAX; --tmp >= 0; ) |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 269 | if (!hlist_empty(&pid->tasks[tmp])) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | return; |
| 271 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 272 | free_pid(pid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | } |
| 274 | |
Eric W. Biederman | c18258c | 2006-09-27 01:51:06 -0700 | [diff] [blame] | 275 | /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ |
| 276 | void fastcall transfer_pid(struct task_struct *old, struct task_struct *new, |
| 277 | enum pid_type type) |
| 278 | { |
| 279 | new->pids[type].pid = old->pids[type].pid; |
| 280 | hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); |
| 281 | old->pids[type].pid = NULL; |
| 282 | } |
| 283 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 284 | struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type) |
| 285 | { |
| 286 | struct task_struct *result = NULL; |
| 287 | if (pid) { |
| 288 | struct hlist_node *first; |
| 289 | first = rcu_dereference(pid->tasks[type].first); |
| 290 | if (first) |
| 291 | result = hlist_entry(first, struct task_struct, pids[(type)].node); |
| 292 | } |
| 293 | return result; |
| 294 | } |
| 295 | |
| 296 | /* |
| 297 | * Must be called under rcu_read_lock() or with tasklist_lock read-held. |
| 298 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 299 | struct task_struct *find_task_by_pid_type(int type, int nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | { |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 301 | return pid_task(find_pid(nr), type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | } |
| 303 | |
| 304 | EXPORT_SYMBOL(find_task_by_pid_type); |
| 305 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 306 | struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type) |
| 307 | { |
| 308 | struct task_struct *result; |
| 309 | rcu_read_lock(); |
| 310 | result = pid_task(pid, type); |
| 311 | if (result) |
| 312 | get_task_struct(result); |
| 313 | rcu_read_unlock(); |
| 314 | return result; |
| 315 | } |
| 316 | |
| 317 | struct pid *find_get_pid(pid_t nr) |
| 318 | { |
| 319 | struct pid *pid; |
| 320 | |
| 321 | rcu_read_lock(); |
| 322 | pid = get_pid(find_pid(nr)); |
| 323 | rcu_read_unlock(); |
| 324 | |
| 325 | return pid; |
| 326 | } |
| 327 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | /* |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 329 | * Used by proc to find the first pid that is greater then or equal to nr. |
| 330 | * |
| 331 | * If there is a pid at nr this function is exactly the same as find_pid. |
| 332 | */ |
| 333 | struct pid *find_ge_pid(int nr) |
| 334 | { |
| 335 | struct pid *pid; |
| 336 | |
| 337 | do { |
| 338 | pid = find_pid(nr); |
| 339 | if (pid) |
| 340 | break; |
| 341 | nr = next_pidmap(nr); |
| 342 | } while (nr > 0); |
| 343 | |
| 344 | return pid; |
| 345 | } |
Eric W. Biederman | bbf7314 | 2006-10-02 02:17:11 -0700 | [diff] [blame] | 346 | EXPORT_SYMBOL_GPL(find_get_pid); |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 347 | |
| 348 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | * The pid hash table is scaled according to the amount of memory in the |
| 350 | * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or |
| 351 | * more. |
| 352 | */ |
| 353 | void __init pidhash_init(void) |
| 354 | { |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 355 | int i, pidhash_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT); |
| 357 | |
| 358 | pidhash_shift = max(4, fls(megabytes * 4)); |
| 359 | pidhash_shift = min(12, pidhash_shift); |
| 360 | pidhash_size = 1 << pidhash_shift; |
| 361 | |
| 362 | printk("PID hash table entries: %d (order: %d, %Zd bytes)\n", |
| 363 | pidhash_size, pidhash_shift, |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 364 | pidhash_size * sizeof(struct hlist_head)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 366 | pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); |
| 367 | if (!pid_hash) |
| 368 | panic("Could not alloc pidhash!\n"); |
| 369 | for (i = 0; i < pidhash_size; i++) |
| 370 | INIT_HLIST_HEAD(&pid_hash[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | } |
| 372 | |
| 373 | void __init pidmap_init(void) |
| 374 | { |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 375 | init_pspace.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); |
Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 376 | /* Reserve PID 0. We never call free_pidmap(0) */ |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame^] | 377 | set_bit(0, init_pspace.pidmap[0].page); |
| 378 | atomic_dec(&init_pspace.pidmap[0].nr_free); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 379 | |
| 380 | pid_cachep = kmem_cache_create("pid", sizeof(struct pid), |
| 381 | __alignof__(struct pid), |
| 382 | SLAB_PANIC, NULL, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | } |