David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 1 | /* MN10300 Process handling code |
| 2 | * |
| 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public Licence |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the Licence, or (at your option) any later version. |
| 10 | */ |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/errno.h> |
| 13 | #include <linux/sched.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/mm.h> |
| 16 | #include <linux/smp.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 17 | #include <linux/stddef.h> |
| 18 | #include <linux/unistd.h> |
| 19 | #include <linux/ptrace.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 20 | #include <linux/user.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 21 | #include <linux/interrupt.h> |
| 22 | #include <linux/delay.h> |
| 23 | #include <linux/reboot.h> |
| 24 | #include <linux/percpu.h> |
| 25 | #include <linux/err.h> |
| 26 | #include <linux/fs.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 27 | #include <linux/slab.h> |
Frederic Weisbecker | 5b0753a | 2012-08-22 17:27:34 +0200 | [diff] [blame] | 28 | #include <linux/rcupdate.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 29 | #include <asm/uaccess.h> |
| 30 | #include <asm/pgtable.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 31 | #include <asm/io.h> |
| 32 | #include <asm/processor.h> |
| 33 | #include <asm/mmu_context.h> |
| 34 | #include <asm/fpu.h> |
| 35 | #include <asm/reset-regs.h> |
| 36 | #include <asm/gdb-stub.h> |
| 37 | #include "internal.h" |
| 38 | |
| 39 | /* |
| 40 | * power management idle function, if any.. |
| 41 | */ |
| 42 | void (*pm_idle)(void); |
| 43 | EXPORT_SYMBOL(pm_idle); |
| 44 | |
| 45 | /* |
| 46 | * return saved PC of a blocked thread. |
| 47 | */ |
| 48 | unsigned long thread_saved_pc(struct task_struct *tsk) |
| 49 | { |
| 50 | return ((unsigned long *) tsk->thread.sp)[3]; |
| 51 | } |
| 52 | |
| 53 | /* |
| 54 | * power off function, if any |
| 55 | */ |
| 56 | void (*pm_power_off)(void); |
| 57 | EXPORT_SYMBOL(pm_power_off); |
| 58 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 59 | #if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 60 | /* |
| 61 | * we use this if we don't have any better idle routine |
| 62 | */ |
| 63 | static void default_idle(void) |
| 64 | { |
| 65 | local_irq_disable(); |
| 66 | if (!need_resched()) |
| 67 | safe_halt(); |
| 68 | else |
| 69 | local_irq_enable(); |
| 70 | } |
| 71 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 72 | #else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */ |
| 73 | /* |
| 74 | * On SMP it's slightly faster (but much more power-consuming!) |
| 75 | * to poll the ->work.need_resched flag instead of waiting for the |
| 76 | * cross-CPU IPI to arrive. Use this option with caution. |
| 77 | */ |
| 78 | static inline void poll_idle(void) |
| 79 | { |
| 80 | int oldval; |
| 81 | |
| 82 | local_irq_enable(); |
| 83 | |
| 84 | /* |
| 85 | * Deal with another CPU just having chosen a thread to |
| 86 | * run here: |
| 87 | */ |
| 88 | oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); |
| 89 | |
| 90 | if (!oldval) { |
| 91 | set_thread_flag(TIF_POLLING_NRFLAG); |
| 92 | while (!need_resched()) |
| 93 | cpu_relax(); |
| 94 | clear_thread_flag(TIF_POLLING_NRFLAG); |
| 95 | } else { |
| 96 | set_need_resched(); |
| 97 | } |
| 98 | } |
| 99 | #endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */ |
| 100 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 101 | /* |
| 102 | * the idle thread |
| 103 | * - there's no useful work to be done, so just try to conserve power and have |
| 104 | * a low exit latency (ie sit in a loop waiting for somebody to say that |
| 105 | * they'd like to reschedule) |
| 106 | */ |
| 107 | void cpu_idle(void) |
| 108 | { |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 109 | /* endless idle loop with no priority at all */ |
| 110 | for (;;) { |
Frederic Weisbecker | 5b0753a | 2012-08-22 17:27:34 +0200 | [diff] [blame] | 111 | rcu_idle_enter(); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 112 | while (!need_resched()) { |
| 113 | void (*idle)(void); |
| 114 | |
| 115 | smp_rmb(); |
| 116 | idle = pm_idle; |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 117 | if (!idle) { |
| 118 | #if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU) |
| 119 | idle = poll_idle; |
| 120 | #else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */ |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 121 | idle = default_idle; |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 122 | #endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */ |
| 123 | } |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 124 | idle(); |
| 125 | } |
Frederic Weisbecker | 5b0753a | 2012-08-22 17:27:34 +0200 | [diff] [blame] | 126 | rcu_idle_exit(); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 127 | |
Thomas Gleixner | bd2f553 | 2011-03-21 12:33:18 +0100 | [diff] [blame] | 128 | schedule_preempt_disabled(); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 129 | } |
| 130 | } |
| 131 | |
| 132 | void release_segments(struct mm_struct *mm) |
| 133 | { |
| 134 | } |
| 135 | |
| 136 | void machine_restart(char *cmd) |
| 137 | { |
David Howells | 044264b | 2011-03-18 16:54:31 +0000 | [diff] [blame] | 138 | #ifdef CONFIG_KERNEL_DEBUGGER |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 139 | gdbstub_exit(0); |
| 140 | #endif |
| 141 | |
| 142 | #ifdef mn10300_unit_hard_reset |
| 143 | mn10300_unit_hard_reset(); |
| 144 | #else |
| 145 | mn10300_proc_hard_reset(); |
| 146 | #endif |
| 147 | } |
| 148 | |
| 149 | void machine_halt(void) |
| 150 | { |
David Howells | 044264b | 2011-03-18 16:54:31 +0000 | [diff] [blame] | 151 | #ifdef CONFIG_KERNEL_DEBUGGER |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 152 | gdbstub_exit(0); |
| 153 | #endif |
| 154 | } |
| 155 | |
| 156 | void machine_power_off(void) |
| 157 | { |
David Howells | 044264b | 2011-03-18 16:54:31 +0000 | [diff] [blame] | 158 | #ifdef CONFIG_KERNEL_DEBUGGER |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 159 | gdbstub_exit(0); |
| 160 | #endif |
| 161 | } |
| 162 | |
| 163 | void show_regs(struct pt_regs *regs) |
| 164 | { |
| 165 | } |
| 166 | |
| 167 | /* |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 168 | * free current thread data structures etc.. |
| 169 | */ |
| 170 | void exit_thread(void) |
| 171 | { |
| 172 | exit_fpu(); |
| 173 | } |
| 174 | |
| 175 | void flush_thread(void) |
| 176 | { |
| 177 | flush_fpu(); |
| 178 | } |
| 179 | |
| 180 | void release_thread(struct task_struct *dead_task) |
| 181 | { |
| 182 | } |
| 183 | |
| 184 | /* |
| 185 | * we do not have to muck with descriptors here, that is |
| 186 | * done in switch_mm() as needed. |
| 187 | */ |
| 188 | void copy_segments(struct task_struct *p, struct mm_struct *new_mm) |
| 189 | { |
| 190 | } |
| 191 | |
| 192 | /* |
Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 193 | * this gets called so that we can store lazy state into memory and copy the |
| 194 | * current task into the new thread. |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 195 | */ |
Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 196 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 197 | { |
Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 198 | unlazy_fpu(src); |
| 199 | *dst = *src; |
| 200 | return 0; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 201 | } |
| 202 | |
| 203 | /* |
| 204 | * set up the kernel stack for a new thread and copy arch-specific thread |
| 205 | * control information |
| 206 | */ |
Alexey Dobriyan | 6f2c55b | 2009-04-02 16:56:59 -0700 | [diff] [blame] | 207 | int copy_thread(unsigned long clone_flags, |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 208 | unsigned long c_usp, unsigned long ustk_size, |
Al Viro | afa86fc | 2012-10-22 22:51:14 -0400 | [diff] [blame^] | 209 | struct task_struct *p) |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 210 | { |
David Howells | 7c7fcf7 | 2010-10-27 17:29:01 +0100 | [diff] [blame] | 211 | struct thread_info *ti = task_thread_info(p); |
Al Viro | 255461c | 2012-09-19 13:05:49 -0400 | [diff] [blame] | 212 | struct pt_regs *c_regs; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 213 | unsigned long c_ksp; |
| 214 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 215 | c_ksp = (unsigned long) task_stack_page(p) + THREAD_SIZE; |
| 216 | |
| 217 | /* allocate the userspace exception frame and set it up */ |
| 218 | c_ksp -= sizeof(struct pt_regs); |
Al Viro | 255461c | 2012-09-19 13:05:49 -0400 | [diff] [blame] | 219 | c_regs = (struct pt_regs *) c_ksp; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 220 | c_ksp -= 12; /* allocate function call ABI slack */ |
| 221 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 222 | /* set up things up so the scheduler can start the new task */ |
Al Viro | 61b7fbc | 2012-09-22 18:18:23 -0400 | [diff] [blame] | 223 | p->thread.uregs = c_regs; |
Al Viro | 255461c | 2012-09-19 13:05:49 -0400 | [diff] [blame] | 224 | ti->frame = c_regs; |
| 225 | p->thread.a3 = (unsigned long) c_regs; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 226 | p->thread.sp = c_ksp; |
Al Viro | 255461c | 2012-09-19 13:05:49 -0400 | [diff] [blame] | 227 | p->thread.wchan = p->thread.pc; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 228 | p->thread.usp = c_usp; |
| 229 | |
Al Viro | 1ea2a016 | 2012-10-21 16:43:13 -0400 | [diff] [blame] | 230 | if (unlikely(p->flags & PF_KTHREAD)) { |
Al Viro | 61b7fbc | 2012-09-22 18:18:23 -0400 | [diff] [blame] | 231 | memset(c_regs, 0, sizeof(struct pt_regs)); |
| 232 | c_regs->a0 = c_usp; /* function */ |
| 233 | c_regs->d0 = ustk_size; /* argument */ |
| 234 | local_save_flags(c_regs->epsw); |
| 235 | c_regs->epsw |= EPSW_IE | EPSW_IM_7; |
| 236 | p->thread.pc = (unsigned long) ret_from_kernel_thread; |
| 237 | return 0; |
| 238 | } |
Al Viro | 1ea2a016 | 2012-10-21 16:43:13 -0400 | [diff] [blame] | 239 | *c_regs = *current_pt_regs(); |
| 240 | if (c_usp) |
| 241 | c_regs->sp = c_usp; |
Al Viro | 61b7fbc | 2012-09-22 18:18:23 -0400 | [diff] [blame] | 242 | c_regs->epsw &= ~EPSW_FE; /* my FPU */ |
| 243 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 244 | /* the new TLS pointer is passed in as arg #5 to sys_clone() */ |
| 245 | if (clone_flags & CLONE_SETTLS) |
Al Viro | 61b7fbc | 2012-09-22 18:18:23 -0400 | [diff] [blame] | 246 | c_regs->e2 = current_frame()->d3; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 247 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 248 | p->thread.pc = (unsigned long) ret_from_fork; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 249 | |
| 250 | return 0; |
| 251 | } |
| 252 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 253 | unsigned long get_wchan(struct task_struct *p) |
| 254 | { |
| 255 | return p->thread.wchan; |
| 256 | } |