Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #include <linux/mm.h> |
| 2 | #include <linux/module.h> |
| 3 | #include <linux/sched.h> |
| 4 | #include <linux/init.h> |
| 5 | #include <linux/init_task.h> |
| 6 | #include <linux/fs.h> |
| 7 | #include <linux/mqueue.h> |
| 8 | |
| 9 | #include <asm/uaccess.h> |
| 10 | #include <asm/pgtable.h> |
| 11 | #include <asm/desc.h> |
| 12 | |
| 13 | static struct fs_struct init_fs = INIT_FS; |
| 14 | static struct files_struct init_files = INIT_FILES; |
| 15 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); |
| 16 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); |
| 17 | struct mm_struct init_mm = INIT_MM(init_mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | EXPORT_SYMBOL(init_mm); |
| 19 | |
| 20 | /* |
| 21 | * Initial thread structure. |
| 22 | * |
| 23 | * We need to make sure that this is THREAD_SIZE aligned due to the |
| 24 | * way process stacks are handled. This is done by having a special |
| 25 | * "init_task" linker map entry.. |
| 26 | */ |
Hiroshi Shimamoto | 7778887 | 2007-10-19 20:35:02 +0200 | [diff] [blame^] | 27 | union thread_union init_thread_union |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | __attribute__((__section__(".data.init_task"))) = |
| 29 | { INIT_THREAD_INFO(init_task) }; |
| 30 | |
| 31 | /* |
| 32 | * Initial task structure. |
| 33 | * |
| 34 | * All other task structs will be allocated on slabs in fork.c |
| 35 | */ |
| 36 | struct task_struct init_task = INIT_TASK(init_task); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | EXPORT_SYMBOL(init_task); |
| 38 | |
| 39 | /* |
| 40 | * per-CPU TSS segments. Threads are completely 'soft' on Linux, |
Hiroshi Shimamoto | 7778887 | 2007-10-19 20:35:02 +0200 | [diff] [blame^] | 41 | * no more per-task TSS's. The TSS size is kept cacheline-aligned |
| 42 | * so they are allowed to end up in the .data.cacheline_aligned |
| 43 | * section. Since TSS's are completely CPU-local, we want them |
| 44 | * on exact cacheline boundaries, to eliminate cacheline ping-pong. |
| 45 | */ |
Fenghua Yu | f34e3b6 | 2007-07-19 01:48:13 -0700 | [diff] [blame] | 46 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |