Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 1 | #include <linux/mm.h> |
| 2 | #include <linux/kernel.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 3 | #include <linux/slab.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 4 | #include <linux/sched/signal.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 5 | #include <linux/sched/task_stack.h> |
Filippo Arcidiacono | 5d920bb | 2012-04-19 15:45:57 +0900 | [diff] [blame] | 6 | #include <linux/export.h> |
| 7 | #include <linux/stackprotector.h> |
Paul Mundt | 936c163 | 2012-05-24 13:03:20 +0900 | [diff] [blame] | 8 | #include <asm/fpu.h> |
Ingo Molnar | 4cf421e | 2017-02-03 10:03:42 +0100 | [diff] [blame] | 9 | #include <asm/ptrace.h> |
Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 10 | |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 11 | struct kmem_cache *task_xstate_cachep = NULL; |
| 12 | unsigned int xstate_size; |
| 13 | |
Filippo Arcidiacono | 5d920bb | 2012-04-19 15:45:57 +0900 | [diff] [blame] | 14 | #ifdef CONFIG_CC_STACKPROTECTOR |
| 15 | unsigned long __stack_chk_guard __read_mostly; |
| 16 | EXPORT_SYMBOL(__stack_chk_guard); |
| 17 | #endif |
| 18 | |
Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 19 | /* |
| 20 | * this gets called so that we can store lazy state into memory and copy the |
| 21 | * current task into the new thread. |
| 22 | */ |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 23 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
| 24 | { |
Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 25 | #ifdef CONFIG_SUPERH32 |
| 26 | unlazy_fpu(src, task_pt_regs(src)); |
| 27 | #endif |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 28 | *dst = *src; |
| 29 | |
| 30 | if (src->thread.xstate) { |
| 31 | dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, |
| 32 | GFP_KERNEL); |
| 33 | if (!dst->thread.xstate) |
| 34 | return -ENOMEM; |
| 35 | memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); |
| 36 | } |
| 37 | |
| 38 | return 0; |
| 39 | } |
| 40 | |
| 41 | void free_thread_xstate(struct task_struct *tsk) |
| 42 | { |
| 43 | if (tsk->thread.xstate) { |
| 44 | kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); |
| 45 | tsk->thread.xstate = NULL; |
| 46 | } |
| 47 | } |
| 48 | |
Thomas Gleixner | df9a7b9 | 2012-05-05 15:05:46 +0000 | [diff] [blame] | 49 | void arch_release_task_struct(struct task_struct *tsk) |
Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 50 | { |
Thomas Gleixner | df9a7b9 | 2012-05-05 15:05:46 +0000 | [diff] [blame] | 51 | free_thread_xstate(tsk); |
Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 52 | } |
| 53 | |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 54 | void arch_task_cache_init(void) |
| 55 | { |
| 56 | if (!xstate_size) |
| 57 | return; |
| 58 | |
| 59 | task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, |
| 60 | __alignof__(union thread_xstate), |
| 61 | SLAB_PANIC | SLAB_NOTRACK, NULL); |
| 62 | } |
| 63 | |
| 64 | #ifdef CONFIG_SH_FPU_EMU |
| 65 | # define HAVE_SOFTFP 1 |
| 66 | #else |
| 67 | # define HAVE_SOFTFP 0 |
| 68 | #endif |
| 69 | |
Paul Gortmaker | 4603f53 | 2013-06-18 17:10:12 -0400 | [diff] [blame] | 70 | void init_thread_xstate(void) |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 71 | { |
| 72 | if (boot_cpu_data.flags & CPU_HAS_FPU) |
| 73 | xstate_size = sizeof(struct sh_fpu_hard_struct); |
| 74 | else if (HAVE_SOFTFP) |
| 75 | xstate_size = sizeof(struct sh_fpu_soft_struct); |
| 76 | else |
| 77 | xstate_size = 0; |
| 78 | } |