Kuninori Morimoto | 5933f6d | 2018-12-28 00:32:24 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Paul Mundt | aec5e0e | 2006-12-25 09:51:47 +0900 | [diff] [blame] | 2 | /* |
| 3 | * arch/sh/kernel/process.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
Paul Mundt | aec5e0e | 2006-12-25 09:51:47 +0900 | [diff] [blame] | 5 | * This file handles the architecture-dependent parts of process handling.. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
| 7 | * Copyright (C) 1995 Linus Torvalds |
| 8 | * |
| 9 | * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima |
Ryusuke Sakato | 8ae91b9 | 2006-10-12 12:16:13 +0900 | [diff] [blame] | 10 | * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC |
Paul Mundt | e7ab3cd | 2008-09-21 19:04:55 +0900 | [diff] [blame] | 11 | * Copyright (C) 2002 - 2008 Paul Mundt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/mm.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 15 | #include <linux/sched/debug.h> |
Ingo Molnar | 2993002 | 2017-02-08 18:51:36 +0100 | [diff] [blame] | 16 | #include <linux/sched/task.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 17 | #include <linux/sched/task_stack.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 18 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/elfcore.h> |
Paul Mundt | e06c4e5 | 2007-07-31 13:01:43 +0900 | [diff] [blame] | 20 | #include <linux/fs.h> |
Matt Fleming | 7816fec | 2009-07-11 00:29:04 +0000 | [diff] [blame] | 21 | #include <linux/ftrace.h> |
Paul Mundt | 09a0729 | 2009-11-09 16:27:40 +0900 | [diff] [blame] | 22 | #include <linux/hw_breakpoint.h> |
Paul Mundt | 0f0ebd9 | 2011-05-24 17:25:23 +0900 | [diff] [blame] | 23 | #include <linux/prefetch.h> |
Filippo Arcidiacono | 5d920bb | 2012-04-19 15:45:57 +0900 | [diff] [blame] | 24 | #include <linux/stackprotector.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 25 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <asm/mmu_context.h> |
Paul Mundt | 9bbafce | 2008-03-26 19:02:47 +0900 | [diff] [blame] | 27 | #include <asm/fpu.h> |
Paul Mundt | fa43972 | 2008-09-04 18:53:58 +0900 | [diff] [blame] | 28 | #include <asm/syscalls.h> |
Paul Mundt | f03c486 | 2012-03-30 19:29:57 +0900 | [diff] [blame] | 29 | #include <asm/switch_to.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | void show_regs(struct pt_regs * regs) |
| 32 | { |
| 33 | printk("\n"); |
Tejun Heo | a43cb95 | 2013-04-30 15:27:17 -0700 | [diff] [blame] | 34 | show_regs_print_info(KERN_DEFAULT); |
Paul Mundt | 7d96169 | 2008-08-08 01:23:34 +0900 | [diff] [blame] | 35 | |
Sergey Senozhatsky | 9e6a42f | 2017-12-11 21:50:18 +0900 | [diff] [blame] | 36 | printk("PC is at %pS\n", (void *)instruction_pointer(regs)); |
| 37 | printk("PR is at %pS\n", (void *)regs->pr); |
Paul Mundt | 7d96169 | 2008-08-08 01:23:34 +0900 | [diff] [blame] | 38 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | printk("PC : %08lx SP : %08lx SR : %08lx ", |
| 40 | regs->pc, regs->regs[15], regs->sr); |
| 41 | #ifdef CONFIG_MMU |
Paul Mundt | 9d56dd3 | 2010-01-26 12:58:40 +0900 | [diff] [blame] | 42 | printk("TEA : %08x\n", __raw_readl(MMU_TEA)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #else |
Paul Mundt | 7d96169 | 2008-08-08 01:23:34 +0900 | [diff] [blame] | 44 | printk("\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | |
| 47 | printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", |
| 48 | regs->regs[0],regs->regs[1], |
| 49 | regs->regs[2],regs->regs[3]); |
| 50 | printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", |
| 51 | regs->regs[4],regs->regs[5], |
| 52 | regs->regs[6],regs->regs[7]); |
| 53 | printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n", |
| 54 | regs->regs[8],regs->regs[9], |
| 55 | regs->regs[10],regs->regs[11]); |
| 56 | printk("R12 : %08lx R13 : %08lx R14 : %08lx\n", |
| 57 | regs->regs[12],regs->regs[13], |
| 58 | regs->regs[14]); |
| 59 | printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n", |
| 60 | regs->mach, regs->macl, regs->gbr, regs->pr); |
| 61 | |
Paul Mundt | 6b00223 | 2006-10-12 17:07:45 +0900 | [diff] [blame] | 62 | show_trace(NULL, (unsigned long *)regs->regs[15], regs); |
Paul Mundt | 9cfc9a9 | 2008-11-26 14:31:03 +0900 | [diff] [blame] | 63 | show_code(regs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | } |
| 65 | |
Paul Mundt | 70e068e | 2010-01-12 18:52:00 +0900 | [diff] [blame] | 66 | void start_thread(struct pt_regs *regs, unsigned long new_pc, |
| 67 | unsigned long new_sp) |
| 68 | { |
Paul Mundt | 70e068e | 2010-01-12 18:52:00 +0900 | [diff] [blame] | 69 | regs->pr = 0; |
| 70 | regs->sr = SR_FD; |
| 71 | regs->pc = new_pc; |
| 72 | regs->regs[15] = new_sp; |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 73 | |
| 74 | free_thread_xstate(current); |
Paul Mundt | 70e068e | 2010-01-12 18:52:00 +0900 | [diff] [blame] | 75 | } |
| 76 | EXPORT_SYMBOL(start_thread); |
| 77 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | void flush_thread(void) |
| 79 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | struct task_struct *tsk = current; |
Paul Mundt | 09a0729 | 2009-11-09 16:27:40 +0900 | [diff] [blame] | 81 | |
| 82 | flush_ptrace_hw_breakpoint(tsk); |
| 83 | |
| 84 | #if defined(CONFIG_SH_FPU) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | /* Forget lazy FPU state */ |
Al Viro | 3cf0f4e | 2006-01-12 01:05:44 -0800 | [diff] [blame] | 86 | clear_fpu(tsk, task_pt_regs(tsk)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | clear_used_math(); |
| 88 | #endif |
| 89 | } |
| 90 | |
| 91 | void release_thread(struct task_struct *dead_task) |
| 92 | { |
| 93 | /* do nothing */ |
| 94 | } |
| 95 | |
| 96 | /* Fill in the fpu structure for a core dump.. */ |
| 97 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) |
| 98 | { |
| 99 | int fpvalid = 0; |
| 100 | |
| 101 | #if defined(CONFIG_SH_FPU) |
| 102 | struct task_struct *tsk = current; |
| 103 | |
| 104 | fpvalid = !!tsk_used_math(tsk); |
Paul Mundt | e7ab3cd | 2008-09-21 19:04:55 +0900 | [diff] [blame] | 105 | if (fpvalid) |
| 106 | fpvalid = !fpregs_get(tsk, NULL, 0, |
| 107 | sizeof(struct user_fpu_struct), |
| 108 | fpu, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | #endif |
| 110 | |
| 111 | return fpvalid; |
| 112 | } |
Paul Mundt | 4c978ca3 | 2009-10-27 11:51:19 +0900 | [diff] [blame] | 113 | EXPORT_SYMBOL(dump_fpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | asmlinkage void ret_from_fork(void); |
Al Viro | 7147e21 | 2012-10-14 01:41:42 -0400 | [diff] [blame] | 116 | asmlinkage void ret_from_kernel_thread(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | |
Alexey Dobriyan | 6f2c55b | 2009-04-02 16:56:59 -0700 | [diff] [blame] | 118 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
Al Viro | afa86fc | 2012-10-22 22:51:14 -0400 | [diff] [blame] | 119 | unsigned long arg, struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | { |
Paul Mundt | 2991be7 | 2006-09-27 17:07:07 +0900 | [diff] [blame] | 121 | struct thread_info *ti = task_thread_info(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | struct pt_regs *childregs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | |
Michael Trimarchi | 01ab103 | 2009-04-03 17:32:33 +0000 | [diff] [blame] | 124 | #if defined(CONFIG_SH_DSP) |
Paul Mundt | 6424db5 | 2009-12-08 15:47:12 +0900 | [diff] [blame] | 125 | struct task_struct *tsk = current; |
| 126 | |
Michael Trimarchi | 01ab103 | 2009-04-03 17:32:33 +0000 | [diff] [blame] | 127 | if (is_dsp_enabled(tsk)) { |
| 128 | /* We can use the __save_dsp or just copy the struct: |
| 129 | * __save_dsp(p); |
| 130 | * p->thread.dsp_status.status |= SR_DSP |
| 131 | */ |
| 132 | p->thread.dsp_status = tsk->thread.dsp_status; |
| 133 | } |
| 134 | #endif |
| 135 | |
Al Viro | 7147e21 | 2012-10-14 01:41:42 -0400 | [diff] [blame] | 136 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | |
Al Viro | 7147e21 | 2012-10-14 01:41:42 -0400 | [diff] [blame] | 138 | childregs = task_pt_regs(p); |
| 139 | p->thread.sp = (unsigned long) childregs; |
| 140 | if (unlikely(p->flags & PF_KTHREAD)) { |
| 141 | memset(childregs, 0, sizeof(struct pt_regs)); |
| 142 | p->thread.pc = (unsigned long) ret_from_kernel_thread; |
| 143 | childregs->regs[4] = arg; |
| 144 | childregs->regs[5] = usp; |
| 145 | childregs->sr = SR_MD; |
| 146 | #if defined(CONFIG_SH_FPU) |
| 147 | childregs->sr |= SR_FD; |
| 148 | #endif |
Paul Mundt | 2991be7 | 2006-09-27 17:07:07 +0900 | [diff] [blame] | 149 | ti->addr_limit = KERNEL_DS; |
Stuart Menefy | d3ea9fa | 2009-09-25 18:25:10 +0100 | [diff] [blame] | 150 | ti->status &= ~TS_USEDFPU; |
Vineet Gupta | 616c05d | 2013-11-12 15:08:45 -0800 | [diff] [blame] | 151 | p->thread.fpu_counter = 0; |
Al Viro | 7147e21 | 2012-10-14 01:41:42 -0400 | [diff] [blame] | 152 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | } |
Al Viro | 0ad9513 | 2012-10-27 00:13:51 -0400 | [diff] [blame] | 154 | *childregs = *current_pt_regs(); |
Al Viro | 7147e21 | 2012-10-14 01:41:42 -0400 | [diff] [blame] | 155 | |
Al Viro | 0ad9513 | 2012-10-27 00:13:51 -0400 | [diff] [blame] | 156 | if (usp) |
| 157 | childregs->regs[15] = usp; |
Al Viro | 7147e21 | 2012-10-14 01:41:42 -0400 | [diff] [blame] | 158 | ti->addr_limit = USER_DS; |
Paul Mundt | aec5e0e | 2006-12-25 09:51:47 +0900 | [diff] [blame] | 159 | |
Hideo Saito | e6bcf56 | 2007-02-28 18:35:42 +0900 | [diff] [blame] | 160 | if (clone_flags & CLONE_SETTLS) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | childregs->gbr = childregs->regs[0]; |
Paul Mundt | aec5e0e | 2006-12-25 09:51:47 +0900 | [diff] [blame] | 162 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | childregs->regs[0] = 0; /* Set return value for child */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | p->thread.pc = (unsigned long) ret_from_fork; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | return 0; |
| 166 | } |
| 167 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | /* |
| 169 | * switch_to(x,y) should switch tasks from x to y. |
| 170 | * |
| 171 | */ |
Matt Fleming | 7816fec | 2009-07-11 00:29:04 +0000 | [diff] [blame] | 172 | __notrace_funcgraph struct task_struct * |
| 173 | __switch_to(struct task_struct *prev, struct task_struct *next) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | { |
Giuseppe CAVALLARO | a0458b07 | 2009-07-07 16:25:10 +0200 | [diff] [blame] | 175 | struct thread_struct *next_t = &next->thread; |
| 176 | |
Linus Torvalds | 050e9ba | 2018-06-14 12:21:18 +0900 | [diff] [blame] | 177 | #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) |
Filippo Arcidiacono | 5d920bb | 2012-04-19 15:45:57 +0900 | [diff] [blame] | 178 | __stack_chk_guard = next->stack_canary; |
| 179 | #endif |
| 180 | |
Al Viro | 3cf0f4e | 2006-01-12 01:05:44 -0800 | [diff] [blame] | 181 | unlazy_fpu(prev, task_pt_regs(prev)); |
Giuseppe CAVALLARO | a0458b07 | 2009-07-07 16:25:10 +0200 | [diff] [blame] | 182 | |
| 183 | /* we're going to use this soon, after a few expensive things */ |
Vineet Gupta | 616c05d | 2013-11-12 15:08:45 -0800 | [diff] [blame] | 184 | if (next->thread.fpu_counter > 5) |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 185 | prefetch(next_t->xstate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | |
Yoshinori Sato | a2d1a5f | 2006-09-27 17:25:07 +0900 | [diff] [blame] | 187 | #ifdef CONFIG_MMU |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | /* |
| 189 | * Restore the kernel mode register |
Paul Mundt | aec5e0e | 2006-12-25 09:51:47 +0900 | [diff] [blame] | 190 | * k7 (r7_bank1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | */ |
| 192 | asm volatile("ldc %0, r7_bank" |
| 193 | : /* no output */ |
Al Viro | cafcfca | 2006-01-12 01:05:45 -0800 | [diff] [blame] | 194 | : "r" (task_thread_info(next))); |
Yoshinori Sato | a2d1a5f | 2006-09-27 17:25:07 +0900 | [diff] [blame] | 195 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | |
Paul Mundt | 6ba6538 | 2009-11-25 12:07:31 +0900 | [diff] [blame] | 197 | /* |
| 198 | * If the task has used fpu the last 5 timeslices, just do a full |
Giuseppe CAVALLARO | a0458b07 | 2009-07-07 16:25:10 +0200 | [diff] [blame] | 199 | * restore of the math state immediately to avoid the trap; the |
| 200 | * chances of needing FPU soon are obviously high now |
| 201 | */ |
Vineet Gupta | 616c05d | 2013-11-12 15:08:45 -0800 | [diff] [blame] | 202 | if (next->thread.fpu_counter > 5) |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 203 | __fpu_state_restore(); |
Giuseppe CAVALLARO | a0458b07 | 2009-07-07 16:25:10 +0200 | [diff] [blame] | 204 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | return prev; |
| 206 | } |
| 207 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | unsigned long get_wchan(struct task_struct *p) |
| 209 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | unsigned long pc; |
| 211 | |
| 212 | if (!p || p == current || p->state == TASK_RUNNING) |
| 213 | return 0; |
| 214 | |
| 215 | /* |
| 216 | * The same comment as on the Alpha applies here, too ... |
| 217 | */ |
| 218 | pc = thread_saved_pc(p); |
David McCullough | c64ac9f | 2007-07-26 17:46:07 +0900 | [diff] [blame] | 219 | |
| 220 | #ifdef CONFIG_FRAME_POINTER |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | if (in_sched_functions(pc)) { |
David McCullough | c64ac9f | 2007-07-26 17:46:07 +0900 | [diff] [blame] | 222 | unsigned long schedule_frame = (unsigned long)p->thread.sp; |
Paul Mundt | b652c23 | 2006-12-08 17:46:29 +0900 | [diff] [blame] | 223 | return ((unsigned long *)schedule_frame)[21]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | } |
David McCullough | c64ac9f | 2007-07-26 17:46:07 +0900 | [diff] [blame] | 225 | #endif |
Paul Mundt | b652c23 | 2006-12-08 17:46:29 +0900 | [diff] [blame] | 226 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | return pc; |
| 228 | } |