David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 1 | /* MN10300 Process handling code |
| 2 | * |
| 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public Licence |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the Licence, or (at your option) any later version. |
| 10 | */ |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/errno.h> |
| 13 | #include <linux/sched.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/mm.h> |
| 16 | #include <linux/smp.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 17 | #include <linux/stddef.h> |
| 18 | #include <linux/unistd.h> |
| 19 | #include <linux/ptrace.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 20 | #include <linux/user.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 21 | #include <linux/interrupt.h> |
| 22 | #include <linux/delay.h> |
| 23 | #include <linux/reboot.h> |
| 24 | #include <linux/percpu.h> |
| 25 | #include <linux/err.h> |
| 26 | #include <linux/fs.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 27 | #include <linux/slab.h> |
Frederic Weisbecker | 5b0753a | 2012-08-22 17:27:34 +0200 | [diff] [blame] | 28 | #include <linux/rcupdate.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 29 | #include <asm/uaccess.h> |
| 30 | #include <asm/pgtable.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 31 | #include <asm/io.h> |
| 32 | #include <asm/processor.h> |
| 33 | #include <asm/mmu_context.h> |
| 34 | #include <asm/fpu.h> |
| 35 | #include <asm/reset-regs.h> |
| 36 | #include <asm/gdb-stub.h> |
| 37 | #include "internal.h" |
| 38 | |
| 39 | /* |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 40 | * return saved PC of a blocked thread. |
| 41 | */ |
| 42 | unsigned long thread_saved_pc(struct task_struct *tsk) |
| 43 | { |
| 44 | return ((unsigned long *) tsk->thread.sp)[3]; |
| 45 | } |
| 46 | |
| 47 | /* |
| 48 | * power off function, if any |
| 49 | */ |
| 50 | void (*pm_power_off)(void); |
| 51 | EXPORT_SYMBOL(pm_power_off); |
| 52 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 53 | /* |
| 54 | * On SMP it's slightly faster (but much more power-consuming!) |
| 55 | * to poll the ->work.need_resched flag instead of waiting for the |
| 56 | * cross-CPU IPI to arrive. Use this option with caution. |
Thomas Gleixner | af695cd | 2013-03-21 22:49:53 +0100 | [diff] [blame] | 57 | * |
| 58 | * tglx: No idea why this depends on HOTPLUG_CPU !?! |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 59 | */ |
Thomas Gleixner | af695cd | 2013-03-21 22:49:53 +0100 | [diff] [blame] | 60 | #if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) |
| 61 | void arch_cpu_idle(void) |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 62 | { |
Thomas Gleixner | af695cd | 2013-03-21 22:49:53 +0100 | [diff] [blame] | 63 | safe_halt(); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 64 | } |
Thomas Gleixner | af695cd | 2013-03-21 22:49:53 +0100 | [diff] [blame] | 65 | #endif |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 66 | |
| 67 | void release_segments(struct mm_struct *mm) |
| 68 | { |
| 69 | } |
| 70 | |
| 71 | void machine_restart(char *cmd) |
| 72 | { |
David Howells | 044264b | 2011-03-18 16:54:31 +0000 | [diff] [blame] | 73 | #ifdef CONFIG_KERNEL_DEBUGGER |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 74 | gdbstub_exit(0); |
| 75 | #endif |
| 76 | |
| 77 | #ifdef mn10300_unit_hard_reset |
| 78 | mn10300_unit_hard_reset(); |
| 79 | #else |
| 80 | mn10300_proc_hard_reset(); |
| 81 | #endif |
| 82 | } |
| 83 | |
| 84 | void machine_halt(void) |
| 85 | { |
David Howells | 044264b | 2011-03-18 16:54:31 +0000 | [diff] [blame] | 86 | #ifdef CONFIG_KERNEL_DEBUGGER |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 87 | gdbstub_exit(0); |
| 88 | #endif |
| 89 | } |
| 90 | |
| 91 | void machine_power_off(void) |
| 92 | { |
David Howells | 044264b | 2011-03-18 16:54:31 +0000 | [diff] [blame] | 93 | #ifdef CONFIG_KERNEL_DEBUGGER |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 94 | gdbstub_exit(0); |
| 95 | #endif |
| 96 | } |
| 97 | |
| 98 | void show_regs(struct pt_regs *regs) |
| 99 | { |
Tejun Heo | a43cb95 | 2013-04-30 15:27:17 -0700 | [diff] [blame] | 100 | show_regs_print_info(KERN_DEFAULT); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 101 | } |
| 102 | |
| 103 | /* |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 104 | * free current thread data structures etc.. |
| 105 | */ |
| 106 | void exit_thread(void) |
| 107 | { |
| 108 | exit_fpu(); |
| 109 | } |
| 110 | |
| 111 | void flush_thread(void) |
| 112 | { |
| 113 | flush_fpu(); |
| 114 | } |
| 115 | |
| 116 | void release_thread(struct task_struct *dead_task) |
| 117 | { |
| 118 | } |
| 119 | |
| 120 | /* |
| 121 | * we do not have to muck with descriptors here, that is |
| 122 | * done in switch_mm() as needed. |
| 123 | */ |
| 124 | void copy_segments(struct task_struct *p, struct mm_struct *new_mm) |
| 125 | { |
| 126 | } |
| 127 | |
| 128 | /* |
Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 129 | * this gets called so that we can store lazy state into memory and copy the |
| 130 | * current task into the new thread. |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 131 | */ |
Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 132 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 133 | { |
Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 134 | unlazy_fpu(src); |
| 135 | *dst = *src; |
| 136 | return 0; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 137 | } |
| 138 | |
| 139 | /* |
| 140 | * set up the kernel stack for a new thread and copy arch-specific thread |
| 141 | * control information |
| 142 | */ |
Alexey Dobriyan | 6f2c55b | 2009-04-02 16:56:59 -0700 | [diff] [blame] | 143 | int copy_thread(unsigned long clone_flags, |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 144 | unsigned long c_usp, unsigned long ustk_size, |
Al Viro | afa86fc | 2012-10-22 22:51:14 -0400 | [diff] [blame] | 145 | struct task_struct *p) |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 146 | { |
David Howells | 7c7fcf7 | 2010-10-27 17:29:01 +0100 | [diff] [blame] | 147 | struct thread_info *ti = task_thread_info(p); |
Al Viro | 255461c | 2012-09-19 13:05:49 -0400 | [diff] [blame] | 148 | struct pt_regs *c_regs; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 149 | unsigned long c_ksp; |
| 150 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 151 | c_ksp = (unsigned long) task_stack_page(p) + THREAD_SIZE; |
| 152 | |
| 153 | /* allocate the userspace exception frame and set it up */ |
| 154 | c_ksp -= sizeof(struct pt_regs); |
Al Viro | 255461c | 2012-09-19 13:05:49 -0400 | [diff] [blame] | 155 | c_regs = (struct pt_regs *) c_ksp; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 156 | c_ksp -= 12; /* allocate function call ABI slack */ |
| 157 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 158 | /* set up things up so the scheduler can start the new task */ |
Al Viro | 61b7fbc | 2012-09-22 18:18:23 -0400 | [diff] [blame] | 159 | p->thread.uregs = c_regs; |
Al Viro | 255461c | 2012-09-19 13:05:49 -0400 | [diff] [blame] | 160 | ti->frame = c_regs; |
| 161 | p->thread.a3 = (unsigned long) c_regs; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 162 | p->thread.sp = c_ksp; |
Al Viro | 255461c | 2012-09-19 13:05:49 -0400 | [diff] [blame] | 163 | p->thread.wchan = p->thread.pc; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 164 | p->thread.usp = c_usp; |
| 165 | |
Al Viro | 1ea2a016 | 2012-10-21 16:43:13 -0400 | [diff] [blame] | 166 | if (unlikely(p->flags & PF_KTHREAD)) { |
Al Viro | 61b7fbc | 2012-09-22 18:18:23 -0400 | [diff] [blame] | 167 | memset(c_regs, 0, sizeof(struct pt_regs)); |
| 168 | c_regs->a0 = c_usp; /* function */ |
| 169 | c_regs->d0 = ustk_size; /* argument */ |
| 170 | local_save_flags(c_regs->epsw); |
| 171 | c_regs->epsw |= EPSW_IE | EPSW_IM_7; |
| 172 | p->thread.pc = (unsigned long) ret_from_kernel_thread; |
| 173 | return 0; |
| 174 | } |
Al Viro | 1ea2a016 | 2012-10-21 16:43:13 -0400 | [diff] [blame] | 175 | *c_regs = *current_pt_regs(); |
| 176 | if (c_usp) |
| 177 | c_regs->sp = c_usp; |
Al Viro | 61b7fbc | 2012-09-22 18:18:23 -0400 | [diff] [blame] | 178 | c_regs->epsw &= ~EPSW_FE; /* my FPU */ |
| 179 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 180 | /* the new TLS pointer is passed in as arg #5 to sys_clone() */ |
| 181 | if (clone_flags & CLONE_SETTLS) |
Al Viro | 61b7fbc | 2012-09-22 18:18:23 -0400 | [diff] [blame] | 182 | c_regs->e2 = current_frame()->d3; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 183 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 184 | p->thread.pc = (unsigned long) ret_from_fork; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 185 | |
| 186 | return 0; |
| 187 | } |
| 188 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 189 | unsigned long get_wchan(struct task_struct *p) |
| 190 | { |
| 191 | return p->thread.wchan; |
| 192 | } |