Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 1 | #include <linux/mm.h> |
| 2 | #include <linux/kernel.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 3 | #include <linux/slab.h> |
Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 4 | #include <linux/sched.h> |
Filippo Arcidiacono | 5d920bb | 2012-04-19 15:45:57 +0900 | [diff] [blame] | 5 | #include <linux/export.h> |
| 6 | #include <linux/stackprotector.h> |
Paul Mundt | 936c163 | 2012-05-24 13:03:20 +0900 | [diff] [blame] | 7 | #include <asm/fpu.h> |
Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 8 | |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 9 | struct kmem_cache *task_xstate_cachep = NULL; |
| 10 | unsigned int xstate_size; |
| 11 | |
Filippo Arcidiacono | 5d920bb | 2012-04-19 15:45:57 +0900 | [diff] [blame] | 12 | #ifdef CONFIG_CC_STACKPROTECTOR |
| 13 | unsigned long __stack_chk_guard __read_mostly; |
| 14 | EXPORT_SYMBOL(__stack_chk_guard); |
| 15 | #endif |
| 16 | |
Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 17 | /* |
| 18 | * this gets called so that we can store lazy state into memory and copy the |
| 19 | * current task into the new thread. |
| 20 | */ |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 21 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
| 22 | { |
Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 23 | #ifdef CONFIG_SUPERH32 |
| 24 | unlazy_fpu(src, task_pt_regs(src)); |
| 25 | #endif |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 26 | *dst = *src; |
| 27 | |
| 28 | if (src->thread.xstate) { |
| 29 | dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, |
| 30 | GFP_KERNEL); |
| 31 | if (!dst->thread.xstate) |
| 32 | return -ENOMEM; |
| 33 | memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); |
| 34 | } |
| 35 | |
| 36 | return 0; |
| 37 | } |
| 38 | |
| 39 | void free_thread_xstate(struct task_struct *tsk) |
| 40 | { |
| 41 | if (tsk->thread.xstate) { |
| 42 | kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); |
| 43 | tsk->thread.xstate = NULL; |
| 44 | } |
| 45 | } |
| 46 | |
Thomas Gleixner | df9a7b9 | 2012-05-05 15:05:46 +0000 | [diff] [blame] | 47 | void arch_release_task_struct(struct task_struct *tsk) |
Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 48 | { |
Thomas Gleixner | df9a7b9 | 2012-05-05 15:05:46 +0000 | [diff] [blame] | 49 | free_thread_xstate(tsk); |
Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 50 | } |
| 51 | |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 52 | void arch_task_cache_init(void) |
| 53 | { |
| 54 | if (!xstate_size) |
| 55 | return; |
| 56 | |
| 57 | task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, |
| 58 | __alignof__(union thread_xstate), |
| 59 | SLAB_PANIC | SLAB_NOTRACK, NULL); |
| 60 | } |
| 61 | |
| 62 | #ifdef CONFIG_SH_FPU_EMU |
| 63 | # define HAVE_SOFTFP 1 |
| 64 | #else |
| 65 | # define HAVE_SOFTFP 0 |
| 66 | #endif |
| 67 | |
Paul Gortmaker | 4603f53 | 2013-06-18 17:10:12 -0400 | [diff] [blame] | 68 | void init_thread_xstate(void) |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 69 | { |
| 70 | if (boot_cpu_data.flags & CPU_HAS_FPU) |
| 71 | xstate_size = sizeof(struct sh_fpu_hard_struct); |
| 72 | else if (HAVE_SOFTFP) |
| 73 | xstate_size = sizeof(struct sh_fpu_soft_struct); |
| 74 | else |
| 75 | xstate_size = 0; |
| 76 | } |