David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 1 | /* MN10300 Process handling code |
| 2 | * |
| 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public Licence |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the Licence, or (at your option) any later version. |
| 10 | */ |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/errno.h> |
| 13 | #include <linux/sched.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/mm.h> |
| 16 | #include <linux/smp.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 17 | #include <linux/stddef.h> |
| 18 | #include <linux/unistd.h> |
| 19 | #include <linux/ptrace.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 20 | #include <linux/user.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 21 | #include <linux/interrupt.h> |
| 22 | #include <linux/delay.h> |
| 23 | #include <linux/reboot.h> |
| 24 | #include <linux/percpu.h> |
| 25 | #include <linux/err.h> |
| 26 | #include <linux/fs.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 27 | #include <linux/slab.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 28 | #include <asm/uaccess.h> |
| 29 | #include <asm/pgtable.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 30 | #include <asm/io.h> |
| 31 | #include <asm/processor.h> |
| 32 | #include <asm/mmu_context.h> |
| 33 | #include <asm/fpu.h> |
| 34 | #include <asm/reset-regs.h> |
| 35 | #include <asm/gdb-stub.h> |
| 36 | #include "internal.h" |
| 37 | |
| 38 | /* |
| 39 | * power management idle function, if any.. |
| 40 | */ |
| 41 | void (*pm_idle)(void); |
| 42 | EXPORT_SYMBOL(pm_idle); |
| 43 | |
| 44 | /* |
| 45 | * return saved PC of a blocked thread. |
| 46 | */ |
| 47 | unsigned long thread_saved_pc(struct task_struct *tsk) |
| 48 | { |
| 49 | return ((unsigned long *) tsk->thread.sp)[3]; |
| 50 | } |
| 51 | |
| 52 | /* |
| 53 | * power off function, if any |
| 54 | */ |
| 55 | void (*pm_power_off)(void); |
| 56 | EXPORT_SYMBOL(pm_power_off); |
| 57 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 58 | #if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 59 | /* |
| 60 | * we use this if we don't have any better idle routine |
| 61 | */ |
| 62 | static void default_idle(void) |
| 63 | { |
| 64 | local_irq_disable(); |
| 65 | if (!need_resched()) |
| 66 | safe_halt(); |
| 67 | else |
| 68 | local_irq_enable(); |
| 69 | } |
| 70 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 71 | #else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */ |
| 72 | /* |
| 73 | * On SMP it's slightly faster (but much more power-consuming!) |
| 74 | * to poll the ->work.need_resched flag instead of waiting for the |
| 75 | * cross-CPU IPI to arrive. Use this option with caution. |
| 76 | */ |
| 77 | static inline void poll_idle(void) |
| 78 | { |
| 79 | int oldval; |
| 80 | |
| 81 | local_irq_enable(); |
| 82 | |
| 83 | /* |
| 84 | * Deal with another CPU just having chosen a thread to |
| 85 | * run here: |
| 86 | */ |
| 87 | oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); |
| 88 | |
| 89 | if (!oldval) { |
| 90 | set_thread_flag(TIF_POLLING_NRFLAG); |
| 91 | while (!need_resched()) |
| 92 | cpu_relax(); |
| 93 | clear_thread_flag(TIF_POLLING_NRFLAG); |
| 94 | } else { |
| 95 | set_need_resched(); |
| 96 | } |
| 97 | } |
| 98 | #endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */ |
| 99 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 100 | /* |
| 101 | * the idle thread |
| 102 | * - there's no useful work to be done, so just try to conserve power and have |
| 103 | * a low exit latency (ie sit in a loop waiting for somebody to say that |
| 104 | * they'd like to reschedule) |
| 105 | */ |
| 106 | void cpu_idle(void) |
| 107 | { |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 108 | /* endless idle loop with no priority at all */ |
| 109 | for (;;) { |
| 110 | while (!need_resched()) { |
| 111 | void (*idle)(void); |
| 112 | |
| 113 | smp_rmb(); |
| 114 | idle = pm_idle; |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 115 | if (!idle) { |
| 116 | #if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU) |
| 117 | idle = poll_idle; |
| 118 | #else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */ |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 119 | idle = default_idle; |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 120 | #endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */ |
| 121 | } |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 122 | idle(); |
| 123 | } |
| 124 | |
Thomas Gleixner | bd2f553 | 2011-03-21 12:33:18 +0100 | [diff] [blame] | 125 | schedule_preempt_disabled(); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 126 | } |
| 127 | } |
| 128 | |
| 129 | void release_segments(struct mm_struct *mm) |
| 130 | { |
| 131 | } |
| 132 | |
| 133 | void machine_restart(char *cmd) |
| 134 | { |
David Howells | 044264b | 2011-03-18 16:54:31 +0000 | [diff] [blame] | 135 | #ifdef CONFIG_KERNEL_DEBUGGER |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 136 | gdbstub_exit(0); |
| 137 | #endif |
| 138 | |
| 139 | #ifdef mn10300_unit_hard_reset |
| 140 | mn10300_unit_hard_reset(); |
| 141 | #else |
| 142 | mn10300_proc_hard_reset(); |
| 143 | #endif |
| 144 | } |
| 145 | |
| 146 | void machine_halt(void) |
| 147 | { |
David Howells | 044264b | 2011-03-18 16:54:31 +0000 | [diff] [blame] | 148 | #ifdef CONFIG_KERNEL_DEBUGGER |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 149 | gdbstub_exit(0); |
| 150 | #endif |
| 151 | } |
| 152 | |
| 153 | void machine_power_off(void) |
| 154 | { |
David Howells | 044264b | 2011-03-18 16:54:31 +0000 | [diff] [blame] | 155 | #ifdef CONFIG_KERNEL_DEBUGGER |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 156 | gdbstub_exit(0); |
| 157 | #endif |
| 158 | } |
| 159 | |
| 160 | void show_regs(struct pt_regs *regs) |
| 161 | { |
| 162 | } |
| 163 | |
| 164 | /* |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 165 | * free current thread data structures etc.. |
| 166 | */ |
| 167 | void exit_thread(void) |
| 168 | { |
| 169 | exit_fpu(); |
| 170 | } |
| 171 | |
| 172 | void flush_thread(void) |
| 173 | { |
| 174 | flush_fpu(); |
| 175 | } |
| 176 | |
| 177 | void release_thread(struct task_struct *dead_task) |
| 178 | { |
| 179 | } |
| 180 | |
| 181 | /* |
| 182 | * we do not have to muck with descriptors here, that is |
| 183 | * done in switch_mm() as needed. |
| 184 | */ |
| 185 | void copy_segments(struct task_struct *p, struct mm_struct *new_mm) |
| 186 | { |
| 187 | } |
| 188 | |
| 189 | /* |
Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 190 | * this gets called so that we can store lazy state into memory and copy the |
| 191 | * current task into the new thread. |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 192 | */ |
Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 193 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 194 | { |
Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 195 | unlazy_fpu(src); |
| 196 | *dst = *src; |
| 197 | return 0; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | /* |
| 201 | * set up the kernel stack for a new thread and copy arch-specific thread |
| 202 | * control information |
| 203 | */ |
Alexey Dobriyan | 6f2c55b | 2009-04-02 16:56:59 -0700 | [diff] [blame] | 204 | int copy_thread(unsigned long clone_flags, |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 205 | unsigned long c_usp, unsigned long ustk_size, |
| 206 | struct task_struct *p, struct pt_regs *kregs) |
| 207 | { |
David Howells | 7c7fcf7 | 2010-10-27 17:29:01 +0100 | [diff] [blame] | 208 | struct thread_info *ti = task_thread_info(p); |
Al Viro | 255461c | 2012-09-19 13:05:49 -0400 | [diff] [blame] | 209 | struct pt_regs *c_regs; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 210 | unsigned long c_ksp; |
| 211 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 212 | c_ksp = (unsigned long) task_stack_page(p) + THREAD_SIZE; |
| 213 | |
| 214 | /* allocate the userspace exception frame and set it up */ |
| 215 | c_ksp -= sizeof(struct pt_regs); |
Al Viro | 255461c | 2012-09-19 13:05:49 -0400 | [diff] [blame] | 216 | c_regs = (struct pt_regs *) c_ksp; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 217 | c_ksp -= 12; /* allocate function call ABI slack */ |
| 218 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 219 | /* set up things up so the scheduler can start the new task */ |
Al Viro | 61b7fbc | 2012-09-22 18:18:23 -0400 | [diff] [blame^] | 220 | p->thread.uregs = c_regs; |
Al Viro | 255461c | 2012-09-19 13:05:49 -0400 | [diff] [blame] | 221 | ti->frame = c_regs; |
| 222 | p->thread.a3 = (unsigned long) c_regs; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 223 | p->thread.sp = c_ksp; |
Al Viro | 255461c | 2012-09-19 13:05:49 -0400 | [diff] [blame] | 224 | p->thread.wchan = p->thread.pc; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 225 | p->thread.usp = c_usp; |
| 226 | |
Al Viro | 61b7fbc | 2012-09-22 18:18:23 -0400 | [diff] [blame^] | 227 | if (unlikely(!kregs)) { |
| 228 | memset(c_regs, 0, sizeof(struct pt_regs)); |
| 229 | c_regs->a0 = c_usp; /* function */ |
| 230 | c_regs->d0 = ustk_size; /* argument */ |
| 231 | local_save_flags(c_regs->epsw); |
| 232 | c_regs->epsw |= EPSW_IE | EPSW_IM_7; |
| 233 | p->thread.pc = (unsigned long) ret_from_kernel_thread; |
| 234 | return 0; |
| 235 | } |
| 236 | *c_regs = *kregs; |
| 237 | c_regs->sp = c_usp; |
| 238 | c_regs->epsw &= ~EPSW_FE; /* my FPU */ |
| 239 | |
| 240 | /* the new TLS pointer is passed in as arg #5 to sys_clone() */ |
| 241 | if (clone_flags & CLONE_SETTLS) |
| 242 | c_regs->e2 = current_frame()->d3; |
| 243 | |
| 244 | p->thread.pc = (unsigned long) ret_from_fork; |
| 245 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 246 | return 0; |
| 247 | } |
| 248 | |
| 249 | /* |
| 250 | * clone a process |
David Howells | 7c7fcf7 | 2010-10-27 17:29:01 +0100 | [diff] [blame] | 251 | * - tlsptr is retrieved by copy_thread() from current_frame()->d3 |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 252 | */ |
| 253 | asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, |
| 254 | int __user *parent_tidptr, int __user *child_tidptr, |
| 255 | int __user *tlsptr) |
| 256 | { |
David Howells | 7c7fcf7 | 2010-10-27 17:29:01 +0100 | [diff] [blame] | 257 | return do_fork(clone_flags, newsp ?: current_frame()->sp, |
| 258 | current_frame(), 0, parent_tidptr, child_tidptr); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 259 | } |
| 260 | |
| 261 | asmlinkage long sys_fork(void) |
| 262 | { |
David Howells | 7c7fcf7 | 2010-10-27 17:29:01 +0100 | [diff] [blame] | 263 | return do_fork(SIGCHLD, current_frame()->sp, |
| 264 | current_frame(), 0, NULL, NULL); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 265 | } |
| 266 | |
| 267 | asmlinkage long sys_vfork(void) |
| 268 | { |
David Howells | 7c7fcf7 | 2010-10-27 17:29:01 +0100 | [diff] [blame] | 269 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, current_frame()->sp, |
| 270 | current_frame(), 0, NULL, NULL); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 271 | } |
| 272 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 273 | unsigned long get_wchan(struct task_struct *p) |
| 274 | { |
| 275 | return p->thread.wchan; |
| 276 | } |