blob: 8d56452e1dbd36bff6bd3ad48f0b073717a48f9a [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
7 *
8 * PowerPC version
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
Paul Mackerras14cf11a2005-09-26 16:04:21 +100017#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100022#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/elf.h>
28#include <linux/init.h>
29#include <linux/prctl.h>
30#include <linux/init_task.h>
Paul Gortmaker4b16f8e2011-07-22 18:24:23 -040031#include <linux/export.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100032#include <linux/kallsyms.h>
33#include <linux/mqueue.h>
34#include <linux/hardirq.h>
Paul Mackerras06d67d52005-10-10 22:29:05 +100035#include <linux/utsname.h>
Steven Rostedt6794c782009-02-09 21:10:27 -080036#include <linux/ftrace.h>
Martin Schwidefsky79741dd2008-12-31 15:11:38 +010037#include <linux/kernel_stat.h>
Anton Blanchardd8390882009-02-22 01:50:03 +000038#include <linux/personality.h>
39#include <linux/random.h>
K.Prasad5aae8a52010-06-15 11:35:19 +053040#include <linux/hw_breakpoint.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100041
42#include <asm/pgtable.h>
43#include <asm/uaccess.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100044#include <asm/io.h>
45#include <asm/processor.h>
46#include <asm/mmu.h>
47#include <asm/prom.h>
Michael Ellerman76032de2005-11-07 13:12:03 +110048#include <asm/machdep.h>
Paul Mackerrasc6622f62006-02-24 10:06:59 +110049#include <asm/time.h>
David Howellsae3a1972012-03-28 18:30:02 +010050#include <asm/runlatch.h>
Arnd Bergmanna7f31842006-03-23 00:00:08 +010051#include <asm/syscalls.h>
David Howellsae3a1972012-03-28 18:30:02 +010052#include <asm/switch_to.h>
53#include <asm/debug.h>
Paul Mackerras06d67d52005-10-10 22:29:05 +100054#ifdef CONFIG_PPC64
55#include <asm/firmware.h>
Paul Mackerras06d67d52005-10-10 22:29:05 +100056#endif
Luis Machadod6a61bf2008-07-24 02:10:41 +100057#include <linux/kprobes.h>
58#include <linux/kdebug.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100059
60extern unsigned long _get_SP(void);
61
62#ifndef CONFIG_SMP
63struct task_struct *last_task_used_math = NULL;
64struct task_struct *last_task_used_altivec = NULL;
Michael Neulingce48b212008-06-25 14:07:18 +100065struct task_struct *last_task_used_vsx = NULL;
Paul Mackerras14cf11a2005-09-26 16:04:21 +100066struct task_struct *last_task_used_spe = NULL;
67#endif
68
Paul Mackerras14cf11a2005-09-26 16:04:21 +100069/*
70 * Make sure the floating-point register state in the
71 * the thread_struct is up to date for task tsk.
72 */
73void flush_fp_to_thread(struct task_struct *tsk)
74{
75 if (tsk->thread.regs) {
76 /*
77 * We need to disable preemption here because if we didn't,
78 * another process could get scheduled after the regs->msr
79 * test but before we have finished saving the FP registers
80 * to the thread_struct. That process could take over the
81 * FPU, and then when we get scheduled again we would store
82 * bogus values for the remaining FP registers.
83 */
84 preempt_disable();
85 if (tsk->thread.regs->msr & MSR_FP) {
86#ifdef CONFIG_SMP
87 /*
88 * This should only ever be called for current or
89 * for a stopped child process. Since we save away
90 * the FP register state on context switch on SMP,
91 * there is something wrong if a stopped child appears
92 * to still have its FP state in the CPU registers.
93 */
94 BUG_ON(tsk != current);
95#endif
Kumar Gala0ee6c152007-08-28 21:15:53 -050096 giveup_fpu(tsk);
Paul Mackerras14cf11a2005-09-26 16:04:21 +100097 }
98 preempt_enable();
99 }
100}
Paul Mackerrasde56a942011-06-29 00:21:34 +0000101EXPORT_SYMBOL_GPL(flush_fp_to_thread);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000102
103void enable_kernel_fp(void)
104{
105 WARN_ON(preemptible());
106
107#ifdef CONFIG_SMP
108 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
109 giveup_fpu(current);
110 else
111 giveup_fpu(NULL); /* just enables FP for kernel */
112#else
113 giveup_fpu(last_task_used_math);
114#endif /* CONFIG_SMP */
115}
116EXPORT_SYMBOL(enable_kernel_fp);
117
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000118#ifdef CONFIG_ALTIVEC
119void enable_kernel_altivec(void)
120{
121 WARN_ON(preemptible());
122
123#ifdef CONFIG_SMP
124 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
125 giveup_altivec(current);
126 else
Anton Blanchard35000872012-04-15 20:56:45 +0000127 giveup_altivec_notask();
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000128#else
129 giveup_altivec(last_task_used_altivec);
130#endif /* CONFIG_SMP */
131}
132EXPORT_SYMBOL(enable_kernel_altivec);
133
134/*
135 * Make sure the VMX/Altivec register state in the
136 * the thread_struct is up to date for task tsk.
137 */
138void flush_altivec_to_thread(struct task_struct *tsk)
139{
140 if (tsk->thread.regs) {
141 preempt_disable();
142 if (tsk->thread.regs->msr & MSR_VEC) {
143#ifdef CONFIG_SMP
144 BUG_ON(tsk != current);
145#endif
Kumar Gala0ee6c152007-08-28 21:15:53 -0500146 giveup_altivec(tsk);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000147 }
148 preempt_enable();
149 }
150}
Paul Mackerrasde56a942011-06-29 00:21:34 +0000151EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000152#endif /* CONFIG_ALTIVEC */
153
Michael Neulingce48b212008-06-25 14:07:18 +1000154#ifdef CONFIG_VSX
155#if 0
156/* not currently used, but some crazy RAID module might want to later */
157void enable_kernel_vsx(void)
158{
159 WARN_ON(preemptible());
160
161#ifdef CONFIG_SMP
162 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
163 giveup_vsx(current);
164 else
165 giveup_vsx(NULL); /* just enable vsx for kernel - force */
166#else
167 giveup_vsx(last_task_used_vsx);
168#endif /* CONFIG_SMP */
169}
170EXPORT_SYMBOL(enable_kernel_vsx);
171#endif
172
Michael Neuling7c292172008-07-11 16:29:12 +1000173void giveup_vsx(struct task_struct *tsk)
174{
175 giveup_fpu(tsk);
176 giveup_altivec(tsk);
177 __giveup_vsx(tsk);
178}
179
Michael Neulingce48b212008-06-25 14:07:18 +1000180void flush_vsx_to_thread(struct task_struct *tsk)
181{
182 if (tsk->thread.regs) {
183 preempt_disable();
184 if (tsk->thread.regs->msr & MSR_VSX) {
185#ifdef CONFIG_SMP
186 BUG_ON(tsk != current);
187#endif
188 giveup_vsx(tsk);
189 }
190 preempt_enable();
191 }
192}
Paul Mackerrasde56a942011-06-29 00:21:34 +0000193EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
Michael Neulingce48b212008-06-25 14:07:18 +1000194#endif /* CONFIG_VSX */
195
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000196#ifdef CONFIG_SPE
197
198void enable_kernel_spe(void)
199{
200 WARN_ON(preemptible());
201
202#ifdef CONFIG_SMP
203 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
204 giveup_spe(current);
205 else
206 giveup_spe(NULL); /* just enable SPE for kernel - force */
207#else
208 giveup_spe(last_task_used_spe);
209#endif /* __SMP __ */
210}
211EXPORT_SYMBOL(enable_kernel_spe);
212
213void flush_spe_to_thread(struct task_struct *tsk)
214{
215 if (tsk->thread.regs) {
216 preempt_disable();
217 if (tsk->thread.regs->msr & MSR_SPE) {
218#ifdef CONFIG_SMP
219 BUG_ON(tsk != current);
220#endif
yu liu685659e2011-06-14 18:34:25 -0500221 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
Kumar Gala0ee6c152007-08-28 21:15:53 -0500222 giveup_spe(tsk);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000223 }
224 preempt_enable();
225 }
226}
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000227#endif /* CONFIG_SPE */
228
Paul Mackerras5388fb12006-01-11 22:11:39 +1100229#ifndef CONFIG_SMP
Paul Mackerras48abec02005-11-30 13:20:54 +1100230/*
231 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
232 * and the current task has some state, discard it.
233 */
Paul Mackerras5388fb12006-01-11 22:11:39 +1100234void discard_lazy_cpu_state(void)
Paul Mackerras48abec02005-11-30 13:20:54 +1100235{
Paul Mackerras48abec02005-11-30 13:20:54 +1100236 preempt_disable();
237 if (last_task_used_math == current)
238 last_task_used_math = NULL;
239#ifdef CONFIG_ALTIVEC
240 if (last_task_used_altivec == current)
241 last_task_used_altivec = NULL;
242#endif /* CONFIG_ALTIVEC */
Michael Neulingce48b212008-06-25 14:07:18 +1000243#ifdef CONFIG_VSX
244 if (last_task_used_vsx == current)
245 last_task_used_vsx = NULL;
246#endif /* CONFIG_VSX */
Paul Mackerras48abec02005-11-30 13:20:54 +1100247#ifdef CONFIG_SPE
248 if (last_task_used_spe == current)
249 last_task_used_spe = NULL;
250#endif
251 preempt_enable();
Paul Mackerras48abec02005-11-30 13:20:54 +1100252}
Paul Mackerras5388fb12006-01-11 22:11:39 +1100253#endif /* CONFIG_SMP */
Paul Mackerras48abec02005-11-30 13:20:54 +1100254
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000255#ifdef CONFIG_PPC_ADV_DEBUG_REGS
256void do_send_trap(struct pt_regs *regs, unsigned long address,
257 unsigned long error_code, int signal_code, int breakpt)
258{
259 siginfo_t info;
260
Ananth N Mavinakayanahalli41ab5262012-08-23 21:27:09 +0000261 current->thread.trap_nr = signal_code;
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000262 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
263 11, SIGSEGV) == NOTIFY_STOP)
264 return;
265
266 /* Deliver the signal to userspace */
267 info.si_signo = SIGTRAP;
268 info.si_errno = breakpt; /* breakpoint or watchpoint id */
269 info.si_code = signal_code;
270 info.si_addr = (void __user *)address;
271 force_sig_info(SIGTRAP, &info, current);
272}
273#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
Michael Neuling9422de32012-12-20 14:06:44 +0000274void do_break (struct pt_regs *regs, unsigned long address,
Luis Machadod6a61bf2008-07-24 02:10:41 +1000275 unsigned long error_code)
276{
277 siginfo_t info;
278
Ananth N Mavinakayanahalli41ab5262012-08-23 21:27:09 +0000279 current->thread.trap_nr = TRAP_HWBKPT;
Luis Machadod6a61bf2008-07-24 02:10:41 +1000280 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
281 11, SIGSEGV) == NOTIFY_STOP)
282 return;
283
Michael Neuling9422de32012-12-20 14:06:44 +0000284 if (debugger_break_match(regs))
Luis Machadod6a61bf2008-07-24 02:10:41 +1000285 return;
286
Michael Neuling9422de32012-12-20 14:06:44 +0000287 /* Clear the breakpoint */
288 hw_breakpoint_disable();
Luis Machadod6a61bf2008-07-24 02:10:41 +1000289
290 /* Deliver the signal to userspace */
291 info.si_signo = SIGTRAP;
292 info.si_errno = 0;
293 info.si_code = TRAP_HWBKPT;
294 info.si_addr = (void __user *)address;
295 force_sig_info(SIGTRAP, &info, current);
296}
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000297#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
Luis Machadod6a61bf2008-07-24 02:10:41 +1000298
Michael Neuling9422de32012-12-20 14:06:44 +0000299static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
Michael Ellermana2ceff52008-03-28 19:11:48 +1100300
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000301#ifdef CONFIG_PPC_ADV_DEBUG_REGS
302/*
303 * Set the debug registers back to their default "safe" values.
304 */
305static void set_debug_reg_defaults(struct thread_struct *thread)
306{
307 thread->iac1 = thread->iac2 = 0;
308#if CONFIG_PPC_ADV_DEBUG_IACS > 2
309 thread->iac3 = thread->iac4 = 0;
310#endif
311 thread->dac1 = thread->dac2 = 0;
312#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
313 thread->dvc1 = thread->dvc2 = 0;
314#endif
315 thread->dbcr0 = 0;
316#ifdef CONFIG_BOOKE
317 /*
318 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
319 */
320 thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
321 DBCR1_IAC3US | DBCR1_IAC4US;
322 /*
323 * Force Data Address Compare User/Supervisor bits to be User-only
324 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
325 */
326 thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
327#else
328 thread->dbcr1 = 0;
329#endif
330}
331
332static void prime_debug_regs(struct thread_struct *thread)
333{
334 mtspr(SPRN_IAC1, thread->iac1);
335 mtspr(SPRN_IAC2, thread->iac2);
336#if CONFIG_PPC_ADV_DEBUG_IACS > 2
337 mtspr(SPRN_IAC3, thread->iac3);
338 mtspr(SPRN_IAC4, thread->iac4);
339#endif
340 mtspr(SPRN_DAC1, thread->dac1);
341 mtspr(SPRN_DAC2, thread->dac2);
342#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
343 mtspr(SPRN_DVC1, thread->dvc1);
344 mtspr(SPRN_DVC2, thread->dvc2);
345#endif
346 mtspr(SPRN_DBCR0, thread->dbcr0);
347 mtspr(SPRN_DBCR1, thread->dbcr1);
348#ifdef CONFIG_BOOKE
349 mtspr(SPRN_DBCR2, thread->dbcr2);
350#endif
351}
352/*
353 * Unless neither the old or new thread are making use of the
354 * debug registers, set the debug registers from the values
355 * stored in the new thread.
356 */
357static void switch_booke_debug_regs(struct thread_struct *new_thread)
358{
359 if ((current->thread.dbcr0 & DBCR0_IDM)
360 || (new_thread->dbcr0 & DBCR0_IDM))
361 prime_debug_regs(new_thread);
362}
363#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
K.Prasade0780b72011-02-10 04:44:35 +0000364#ifndef CONFIG_HAVE_HW_BREAKPOINT
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000365static void set_debug_reg_defaults(struct thread_struct *thread)
366{
Michael Neuling9422de32012-12-20 14:06:44 +0000367 thread->hw_brk.address = 0;
368 thread->hw_brk.type = 0;
369 set_break(&thread->hw_brk);
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000370}
K.Prasade0780b72011-02-10 04:44:35 +0000371#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000372#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
373
Dave Kleikamp172ae2e2010-02-08 11:50:57 +0000374#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Michael Neuling9422de32012-12-20 14:06:44 +0000375static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
376{
Benjamin Herrenschmidtc6c9eac2009-09-08 14:16:58 +0000377 mtspr(SPRN_DAC1, dabr);
Dave Kleikamp221c1852010-03-05 10:43:24 +0000378#ifdef CONFIG_PPC_47x
379 isync();
380#endif
Michael Neuling9422de32012-12-20 14:06:44 +0000381 return 0;
382}
Benjamin Herrenschmidtc6c9eac2009-09-08 14:16:58 +0000383#elif defined(CONFIG_PPC_BOOK3S)
Michael Neuling9422de32012-12-20 14:06:44 +0000384static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
385{
Michael Ellermancab0af92005-11-03 15:30:49 +1100386 mtspr(SPRN_DABR, dabr);
Michael Neuling4474ef02012-09-06 21:24:56 +0000387 mtspr(SPRN_DABRX, dabrx);
Michael Ellermancab0af92005-11-03 15:30:49 +1100388 return 0;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000389}
Michael Neuling9422de32012-12-20 14:06:44 +0000390#else
391static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
392{
393 return -EINVAL;
394}
395#endif
396
397static inline int set_dabr(struct arch_hw_breakpoint *brk)
398{
399 unsigned long dabr, dabrx;
400
401 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
402 dabrx = ((brk->type >> 3) & 0x7);
403
404 if (ppc_md.set_dabr)
405 return ppc_md.set_dabr(dabr, dabrx);
406
407 return __set_dabr(dabr, dabrx);
408}
409
Michael Neulingbf99de32012-12-20 14:06:45 +0000410static inline int set_dawr(struct arch_hw_breakpoint *brk)
411{
412 unsigned long dawr, dawrx;
413
414 dawr = brk->address;
415
416 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
417 << (63 - 58); //* read/write bits */
418 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
419 << (63 - 59); //* translate */
420 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
421 >> 3; //* PRIM bits */
422
423 if (ppc_md.set_dawr)
424 return ppc_md.set_dawr(dawr, dawrx);
425 mtspr(SPRN_DAWR, dawr);
426 mtspr(SPRN_DAWRX, dawrx);
427 return 0;
428}
429
Michael Neuling9422de32012-12-20 14:06:44 +0000430int set_break(struct arch_hw_breakpoint *brk)
431{
432 __get_cpu_var(current_brk) = *brk;
433
Michael Neulingbf99de32012-12-20 14:06:45 +0000434 if (cpu_has_feature(CPU_FTR_DAWR))
435 return set_dawr(brk);
436
Michael Neuling9422de32012-12-20 14:06:44 +0000437 return set_dabr(brk);
438}
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000439
Paul Mackerras06d67d52005-10-10 22:29:05 +1000440#ifdef CONFIG_PPC64
441DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
Paul Mackerras06d67d52005-10-10 22:29:05 +1000442#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000443
Michael Neuling9422de32012-12-20 14:06:44 +0000444static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
445 struct arch_hw_breakpoint *b)
446{
447 if (a->address != b->address)
448 return false;
449 if (a->type != b->type)
450 return false;
451 if (a->len != b->len)
452 return false;
453 return true;
454}
455
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000456struct task_struct *__switch_to(struct task_struct *prev,
457 struct task_struct *new)
458{
459 struct thread_struct *new_thread, *old_thread;
460 unsigned long flags;
461 struct task_struct *last;
Peter Zijlstrad6bf29b2011-05-24 17:11:48 -0700462#ifdef CONFIG_PPC_BOOK3S_64
463 struct ppc64_tlb_batch *batch;
464#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000465
466#ifdef CONFIG_SMP
467 /* avoid complexity of lazy save/restore of fpu
468 * by just saving it every time we switch out if
469 * this task used the fpu during the last quantum.
470 *
471 * If it tries to use the fpu again, it'll trap and
472 * reload its fp regs. So we don't have to do a restore
473 * every switch, just a save.
474 * -- Cort
475 */
476 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
477 giveup_fpu(prev);
478#ifdef CONFIG_ALTIVEC
479 /*
480 * If the previous thread used altivec in the last quantum
481 * (thus changing altivec regs) then save them.
482 * We used to check the VRSAVE register but not all apps
483 * set it, so we don't rely on it now (and in fact we need
484 * to save & restore VSCR even if VRSAVE == 0). -- paulus
485 *
486 * On SMP we always save/restore altivec regs just to avoid the
487 * complexity of changing processors.
488 * -- Cort
489 */
490 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
491 giveup_altivec(prev);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000492#endif /* CONFIG_ALTIVEC */
Michael Neulingce48b212008-06-25 14:07:18 +1000493#ifdef CONFIG_VSX
494 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
Michael Neuling7c292172008-07-11 16:29:12 +1000495 /* VMX and FPU registers are already save here */
496 __giveup_vsx(prev);
Michael Neulingce48b212008-06-25 14:07:18 +1000497#endif /* CONFIG_VSX */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000498#ifdef CONFIG_SPE
499 /*
500 * If the previous thread used spe in the last quantum
501 * (thus changing spe regs) then save them.
502 *
503 * On SMP we always save/restore spe regs just to avoid the
504 * complexity of changing processors.
505 */
506 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
507 giveup_spe(prev);
Paul Mackerrasc0c0d992005-10-01 13:49:08 +1000508#endif /* CONFIG_SPE */
509
510#else /* CONFIG_SMP */
511#ifdef CONFIG_ALTIVEC
512 /* Avoid the trap. On smp this this never happens since
513 * we don't set last_task_used_altivec -- Cort
514 */
515 if (new->thread.regs && last_task_used_altivec == new)
516 new->thread.regs->msr |= MSR_VEC;
517#endif /* CONFIG_ALTIVEC */
Michael Neulingce48b212008-06-25 14:07:18 +1000518#ifdef CONFIG_VSX
519 if (new->thread.regs && last_task_used_vsx == new)
520 new->thread.regs->msr |= MSR_VSX;
521#endif /* CONFIG_VSX */
Paul Mackerrasc0c0d992005-10-01 13:49:08 +1000522#ifdef CONFIG_SPE
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000523 /* Avoid the trap. On smp this this never happens since
524 * we don't set last_task_used_spe
525 */
526 if (new->thread.regs && last_task_used_spe == new)
527 new->thread.regs->msr |= MSR_SPE;
528#endif /* CONFIG_SPE */
Paul Mackerrasc0c0d992005-10-01 13:49:08 +1000529
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000530#endif /* CONFIG_SMP */
531
Dave Kleikamp172ae2e2010-02-08 11:50:57 +0000532#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000533 switch_booke_debug_regs(&new->thread);
Benjamin Herrenschmidtc6c9eac2009-09-08 14:16:58 +0000534#else
K.Prasad5aae8a52010-06-15 11:35:19 +0530535/*
536 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
537 * schedule DABR
538 */
539#ifndef CONFIG_HAVE_HW_BREAKPOINT
Michael Neuling9422de32012-12-20 14:06:44 +0000540 if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
541 set_break(&new->thread.hw_brk);
K.Prasad5aae8a52010-06-15 11:35:19 +0530542#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Luis Machadod6a61bf2008-07-24 02:10:41 +1000543#endif
544
Benjamin Herrenschmidtc6c9eac2009-09-08 14:16:58 +0000545
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000546 new_thread = &new->thread;
547 old_thread = &current->thread;
Paul Mackerras06d67d52005-10-10 22:29:05 +1000548
549#ifdef CONFIG_PPC64
550 /*
551 * Collect processor utilization data per process
552 */
553 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
554 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
555 long unsigned start_tb, current_tb;
556 start_tb = old_thread->start_tb;
557 cu->current_tb = current_tb = mfspr(SPRN_PURR);
558 old_thread->accum_tb += (current_tb - start_tb);
559 new_thread->start_tb = current_tb;
560 }
Peter Zijlstrad6bf29b2011-05-24 17:11:48 -0700561#endif /* CONFIG_PPC64 */
562
563#ifdef CONFIG_PPC_BOOK3S_64
564 batch = &__get_cpu_var(ppc64_tlb_batch);
565 if (batch->active) {
566 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
567 if (batch->index)
568 __flush_tlb_pending(batch);
569 batch->active = 0;
570 }
571#endif /* CONFIG_PPC_BOOK3S_64 */
Paul Mackerras06d67d52005-10-10 22:29:05 +1000572
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000573 local_irq_save(flags);
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100574
Anton Blanchard44387e92008-03-17 15:27:09 +1100575 /*
576 * We can't take a PMU exception inside _switch() since there is a
577 * window where the kernel stack SLB and the kernel stack are out
578 * of sync. Hard disable here.
579 */
580 hard_irq_disable();
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000581 last = _switch(old_thread, new_thread);
582
Peter Zijlstrad6bf29b2011-05-24 17:11:48 -0700583#ifdef CONFIG_PPC_BOOK3S_64
584 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
585 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
586 batch = &__get_cpu_var(ppc64_tlb_batch);
587 batch->active = 1;
588 }
589#endif /* CONFIG_PPC_BOOK3S_64 */
590
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000591 local_irq_restore(flags);
592
593 return last;
594}
595
Paul Mackerras06d67d52005-10-10 22:29:05 +1000596static int instructions_to_print = 16;
597
Paul Mackerras06d67d52005-10-10 22:29:05 +1000598static void show_instructions(struct pt_regs *regs)
599{
600 int i;
601 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
602 sizeof(int));
603
604 printk("Instruction dump:");
605
606 for (i = 0; i < instructions_to_print; i++) {
607 int instr;
608
609 if (!(i % 8))
610 printk("\n");
611
Scott Wood0de2d822007-09-28 04:38:55 +1000612#if !defined(CONFIG_BOOKE)
613 /* If executing with the IMMU off, adjust pc rather
614 * than print XXXXXXXX.
615 */
616 if (!(regs->msr & MSR_IR))
617 pc = (unsigned long)phys_to_virt(pc);
618#endif
619
Stephen Rothwellaf308372006-03-23 17:38:10 +1100620 /* We use __get_user here *only* to avoid an OOPS on a
621 * bad address because the pc *should* only be a
622 * kernel address.
623 */
Anton Blanchard00ae36d2006-10-13 12:17:16 +1000624 if (!__kernel_text_address(pc) ||
625 __get_user(instr, (unsigned int __user *)pc)) {
Ira Snyder40c8cef2012-01-06 12:34:07 +0000626 printk(KERN_CONT "XXXXXXXX ");
Paul Mackerras06d67d52005-10-10 22:29:05 +1000627 } else {
628 if (regs->nip == pc)
Ira Snyder40c8cef2012-01-06 12:34:07 +0000629 printk(KERN_CONT "<%08x> ", instr);
Paul Mackerras06d67d52005-10-10 22:29:05 +1000630 else
Ira Snyder40c8cef2012-01-06 12:34:07 +0000631 printk(KERN_CONT "%08x ", instr);
Paul Mackerras06d67d52005-10-10 22:29:05 +1000632 }
633
634 pc += sizeof(int);
635 }
636
637 printk("\n");
638}
639
640static struct regbit {
641 unsigned long bit;
642 const char *name;
643} msr_bits[] = {
Anton Blanchard3bfd0c9c2011-11-24 19:35:57 +0000644#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
645 {MSR_SF, "SF"},
646 {MSR_HV, "HV"},
647#endif
648 {MSR_VEC, "VEC"},
649 {MSR_VSX, "VSX"},
650#ifdef CONFIG_BOOKE
651 {MSR_CE, "CE"},
652#endif
Paul Mackerras06d67d52005-10-10 22:29:05 +1000653 {MSR_EE, "EE"},
654 {MSR_PR, "PR"},
655 {MSR_FP, "FP"},
656 {MSR_ME, "ME"},
Anton Blanchard3bfd0c9c2011-11-24 19:35:57 +0000657#ifdef CONFIG_BOOKE
Kumar Gala1b983262008-11-19 04:39:53 +0000658 {MSR_DE, "DE"},
Anton Blanchard3bfd0c9c2011-11-24 19:35:57 +0000659#else
660 {MSR_SE, "SE"},
661 {MSR_BE, "BE"},
662#endif
Paul Mackerras06d67d52005-10-10 22:29:05 +1000663 {MSR_IR, "IR"},
664 {MSR_DR, "DR"},
Anton Blanchard3bfd0c9c2011-11-24 19:35:57 +0000665 {MSR_PMM, "PMM"},
666#ifndef CONFIG_BOOKE
667 {MSR_RI, "RI"},
668 {MSR_LE, "LE"},
669#endif
Paul Mackerras06d67d52005-10-10 22:29:05 +1000670 {0, NULL}
671};
672
673static void printbits(unsigned long val, struct regbit *bits)
674{
675 const char *sep = "";
676
677 printk("<");
678 for (; bits->bit; ++bits)
679 if (val & bits->bit) {
680 printk("%s%s", sep, bits->name);
681 sep = ",";
682 }
683 printk(">");
684}
685
686#ifdef CONFIG_PPC64
anton@samba.orgf6f7dde2007-03-20 20:38:19 -0500687#define REG "%016lx"
Paul Mackerras06d67d52005-10-10 22:29:05 +1000688#define REGS_PER_LINE 4
689#define LAST_VOLATILE 13
690#else
anton@samba.orgf6f7dde2007-03-20 20:38:19 -0500691#define REG "%08lx"
Paul Mackerras06d67d52005-10-10 22:29:05 +1000692#define REGS_PER_LINE 8
693#define LAST_VOLATILE 12
694#endif
695
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000696void show_regs(struct pt_regs * regs)
697{
698 int i, trap;
699
Paul Mackerras06d67d52005-10-10 22:29:05 +1000700 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
701 regs->nip, regs->link, regs->ctr);
702 printk("REGS: %p TRAP: %04lx %s (%s)\n",
Serge E. Hallyn96b644b2006-10-02 02:18:13 -0700703 regs, regs->trap, print_tainted(), init_utsname()->release);
Paul Mackerras06d67d52005-10-10 22:29:05 +1000704 printk("MSR: "REG" ", regs->msr);
705 printbits(regs->msr, msr_bits);
anton@samba.orgf6f7dde2007-03-20 20:38:19 -0500706 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100707#ifdef CONFIG_PPC64
708 printk("SOFTE: %ld\n", regs->softe);
709#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000710 trap = TRAP(regs);
Michael Neuling5115a022011-07-14 19:25:12 +0000711 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
712 printk("CFAR: "REG"\n", regs->orig_gpr3);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000713 if (trap == 0x300 || trap == 0x600)
Kumar Galaba28c9a2011-10-06 02:53:38 +0000714#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala14170782007-07-26 00:46:15 -0500715 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
716#else
Anton Blanchard70718542011-01-11 19:44:30 +0000717 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
Kumar Gala14170782007-07-26 00:46:15 -0500718#endif
Paul Mackerras06d67d52005-10-10 22:29:05 +1000719 printk("TASK = %p[%d] '%s' THREAD: %p",
Alexey Dobriyan19c58702007-10-18 23:40:41 -0700720 current, task_pid_nr(current), current->comm, task_thread_info(current));
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000721
722#ifdef CONFIG_SMP
Hugh Dickins79ccd1b2008-02-09 05:25:13 +1100723 printk(" CPU: %d", raw_smp_processor_id());
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000724#endif /* CONFIG_SMP */
725
726 for (i = 0; i < 32; i++) {
Paul Mackerras06d67d52005-10-10 22:29:05 +1000727 if ((i % REGS_PER_LINE) == 0)
Kumar Galaa2367192009-06-18 22:29:55 +0000728 printk("\nGPR%02d: ", i);
Paul Mackerras06d67d52005-10-10 22:29:05 +1000729 printk(REG " ", regs->gpr[i]);
730 if (i == LAST_VOLATILE && !FULL_REGS(regs))
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000731 break;
732 }
733 printk("\n");
734#ifdef CONFIG_KALLSYMS
735 /*
736 * Lookup NIP late so we have the best change of getting the
737 * above info out without failing
738 */
Benjamin Herrenschmidt058c78f2008-07-07 13:44:31 +1000739 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
740 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000741#endif
742 show_stack(current, (unsigned long *) regs->gpr[1]);
Paul Mackerras06d67d52005-10-10 22:29:05 +1000743 if (!user_mode(regs))
744 show_instructions(regs);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000745}
746
747void exit_thread(void)
748{
Paul Mackerras48abec02005-11-30 13:20:54 +1100749 discard_lazy_cpu_state();
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000750}
751
752void flush_thread(void)
753{
Paul Mackerras48abec02005-11-30 13:20:54 +1100754 discard_lazy_cpu_state();
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000755
K.Prasade0780b72011-02-10 04:44:35 +0000756#ifdef CONFIG_HAVE_HW_BREAKPOINT
K.Prasad5aae8a52010-06-15 11:35:19 +0530757 flush_ptrace_hw_breakpoint(current);
K.Prasade0780b72011-02-10 04:44:35 +0000758#else /* CONFIG_HAVE_HW_BREAKPOINT */
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000759 set_debug_reg_defaults(&current->thread);
K.Prasade0780b72011-02-10 04:44:35 +0000760#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000761}
762
763void
764release_thread(struct task_struct *t)
765{
766}
767
768/*
Suresh Siddha55ccf3f2012-05-16 15:03:51 -0700769 * this gets called so that we can store coprocessor state into memory and
770 * copy the current task into the new thread.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000771 */
Suresh Siddha55ccf3f2012-05-16 15:03:51 -0700772int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000773{
Suresh Siddha55ccf3f2012-05-16 15:03:51 -0700774 flush_fp_to_thread(src);
775 flush_altivec_to_thread(src);
776 flush_vsx_to_thread(src);
777 flush_spe_to_thread(src);
K.Prasad5aae8a52010-06-15 11:35:19 +0530778#ifdef CONFIG_HAVE_HW_BREAKPOINT
Suresh Siddha55ccf3f2012-05-16 15:03:51 -0700779 flush_ptrace_hw_breakpoint(src);
K.Prasad5aae8a52010-06-15 11:35:19 +0530780#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Suresh Siddha55ccf3f2012-05-16 15:03:51 -0700781
782 *dst = *src;
783 return 0;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000784}
785
786/*
787 * Copy a thread..
788 */
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000789extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
790
Alexey Dobriyan6f2c55b2009-04-02 16:56:59 -0700791int copy_thread(unsigned long clone_flags, unsigned long usp,
Al Viroafa86fc2012-10-22 22:51:14 -0400792 unsigned long arg, struct task_struct *p)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000793{
794 struct pt_regs *childregs, *kregs;
795 extern void ret_from_fork(void);
Al Viro58254e12012-09-12 18:32:42 -0400796 extern void ret_from_kernel_thread(void);
797 void (*f)(void);
Al Viro0cec6fd2006-01-12 01:06:02 -0800798 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000799
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000800 /* Copy registers */
801 sp -= sizeof(struct pt_regs);
802 childregs = (struct pt_regs *) sp;
Al Viroab758192012-10-21 22:33:39 -0400803 if (unlikely(p->flags & PF_KTHREAD)) {
Al Viro138d1ce2012-10-11 08:41:43 -0400804 struct thread_info *ti = (void *)task_stack_page(p);
Al Viro58254e12012-09-12 18:32:42 -0400805 memset(childregs, 0, sizeof(struct pt_regs));
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000806 childregs->gpr[1] = sp + sizeof(struct pt_regs);
Al Viro53b50f942012-10-21 16:50:34 -0400807 childregs->gpr[14] = usp; /* function */
Al Viro58254e12012-09-12 18:32:42 -0400808#ifdef CONFIG_PPC64
Al Virob5e2fc12006-01-12 01:06:01 -0800809 clear_tsk_thread_flag(p, TIF_32BIT);
Al Viro138d1ce2012-10-11 08:41:43 -0400810 childregs->softe = 1;
Paul Mackerras06d67d52005-10-10 22:29:05 +1000811#endif
Al Viro58254e12012-09-12 18:32:42 -0400812 childregs->gpr[15] = arg;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000813 p->thread.regs = NULL; /* no user register state */
Al Viro138d1ce2012-10-11 08:41:43 -0400814 ti->flags |= _TIF_RESTOREALL;
Al Viro58254e12012-09-12 18:32:42 -0400815 f = ret_from_kernel_thread;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000816 } else {
Al Viroafa86fc2012-10-22 22:51:14 -0400817 struct pt_regs *regs = current_pt_regs();
Al Viro58254e12012-09-12 18:32:42 -0400818 CHECK_FULL_REGS(regs);
819 *childregs = *regs;
Al Viroea516b12012-10-21 22:28:43 -0400820 if (usp)
821 childregs->gpr[1] = usp;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000822 p->thread.regs = childregs;
Al Viro58254e12012-09-12 18:32:42 -0400823 childregs->gpr[3] = 0; /* Result from fork() */
Paul Mackerras06d67d52005-10-10 22:29:05 +1000824 if (clone_flags & CLONE_SETTLS) {
825#ifdef CONFIG_PPC64
Denis Kirjanov9904b002010-07-29 22:04:39 +0000826 if (!is_32bit_task())
Paul Mackerras06d67d52005-10-10 22:29:05 +1000827 childregs->gpr[13] = childregs->gpr[6];
828 else
829#endif
830 childregs->gpr[2] = childregs->gpr[6];
831 }
Al Viro58254e12012-09-12 18:32:42 -0400832
833 f = ret_from_fork;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000834 }
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000835 sp -= STACK_FRAME_OVERHEAD;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000836
837 /*
838 * The way this works is that at some point in the future
839 * some task will call _switch to switch to the new task.
840 * That will pop off the stack frame created below and start
841 * the new task running at ret_from_fork. The new task will
842 * do some house keeping and then return from the fork or clone
843 * system call, using the stack frame created above.
844 */
845 sp -= sizeof(struct pt_regs);
846 kregs = (struct pt_regs *) sp;
847 sp -= STACK_FRAME_OVERHEAD;
848 p->thread.ksp = sp;
Kumar Gala85218822008-04-28 16:21:22 +1000849 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
850 _ALIGN_UP(sizeof(struct thread_info), 16);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000851
Benjamin Herrenschmidt94491682009-06-02 21:17:45 +0000852#ifdef CONFIG_PPC_STD_MMU_64
Matt Evans44ae3ab2011-04-06 19:48:50 +0000853 if (mmu_has_feature(MMU_FTR_SLB)) {
Paul Mackerras1189be62007-10-11 20:37:10 +1000854 unsigned long sp_vsid;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100855 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
Paul Mackerras06d67d52005-10-10 22:29:05 +1000856
Matt Evans44ae3ab2011-04-06 19:48:50 +0000857 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
Paul Mackerras1189be62007-10-11 20:37:10 +1000858 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
859 << SLB_VSID_SHIFT_1T;
860 else
861 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
862 << SLB_VSID_SHIFT;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100863 sp_vsid |= SLB_VSID_KERNEL | llp;
Paul Mackerras06d67d52005-10-10 22:29:05 +1000864 p->thread.ksp_vsid = sp_vsid;
865 }
Benjamin Herrenschmidt747bea92009-07-23 23:15:27 +0000866#endif /* CONFIG_PPC_STD_MMU_64 */
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000867#ifdef CONFIG_PPC64
868 if (cpu_has_feature(CPU_FTR_DSCR)) {
Anton Blanchard1021cb22012-09-03 16:49:47 +0000869 p->thread.dscr_inherit = current->thread.dscr_inherit;
870 p->thread.dscr = current->thread.dscr;
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000871 }
Haren Myneni92779242012-12-06 21:49:56 +0000872 if (cpu_has_feature(CPU_FTR_HAS_PPR))
873 p->thread.ppr = INIT_PPR;
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000874#endif
Paul Mackerras06d67d52005-10-10 22:29:05 +1000875 /*
876 * The PPC64 ABI makes use of a TOC to contain function
877 * pointers. The function (ret_from_except) is actually a pointer
878 * to the TOC entry. The first entry is a pointer to the actual
879 * function.
Al Viro58254e12012-09-12 18:32:42 -0400880 */
Benjamin Herrenschmidt747bea92009-07-23 23:15:27 +0000881#ifdef CONFIG_PPC64
Al Viro58254e12012-09-12 18:32:42 -0400882 kregs->nip = *((unsigned long *)f);
Paul Mackerras06d67d52005-10-10 22:29:05 +1000883#else
Al Viro58254e12012-09-12 18:32:42 -0400884 kregs->nip = (unsigned long)f;
Paul Mackerras06d67d52005-10-10 22:29:05 +1000885#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000886 return 0;
887}
888
889/*
890 * Set up a thread for executing a new program
891 */
Paul Mackerras06d67d52005-10-10 22:29:05 +1000892void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000893{
Michael Ellerman90eac722005-10-21 16:01:33 +1000894#ifdef CONFIG_PPC64
895 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
896#endif
897
Paul Mackerras06d67d52005-10-10 22:29:05 +1000898 /*
899 * If we exec out of a kernel thread then thread.regs will not be
900 * set. Do it now.
901 */
902 if (!current->thread.regs) {
Al Viro0cec6fd2006-01-12 01:06:02 -0800903 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
904 current->thread.regs = regs - 1;
Paul Mackerras06d67d52005-10-10 22:29:05 +1000905 }
906
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000907 memset(regs->gpr, 0, sizeof(regs->gpr));
908 regs->ctr = 0;
909 regs->link = 0;
910 regs->xer = 0;
911 regs->ccr = 0;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000912 regs->gpr[1] = sp;
Paul Mackerras06d67d52005-10-10 22:29:05 +1000913
Roland McGrath474f8192007-09-24 16:52:44 -0700914 /*
915 * We have just cleared all the nonvolatile GPRs, so make
916 * FULL_REGS(regs) return true. This is necessary to allow
917 * ptrace to examine the thread immediately after exec.
918 */
919 regs->trap &= ~1UL;
920
Paul Mackerras06d67d52005-10-10 22:29:05 +1000921#ifdef CONFIG_PPC32
922 regs->mq = 0;
923 regs->nip = start;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000924 regs->msr = MSR_USER;
Paul Mackerras06d67d52005-10-10 22:29:05 +1000925#else
Denis Kirjanov9904b002010-07-29 22:04:39 +0000926 if (!is_32bit_task()) {
Michael Ellerman90eac722005-10-21 16:01:33 +1000927 unsigned long entry, toc;
Paul Mackerras06d67d52005-10-10 22:29:05 +1000928
929 /* start is a relocated pointer to the function descriptor for
930 * the elf _start routine. The first entry in the function
931 * descriptor is the entry address of _start and the second
932 * entry is the TOC value we need to use.
933 */
934 __get_user(entry, (unsigned long __user *)start);
935 __get_user(toc, (unsigned long __user *)start+1);
936
937 /* Check whether the e_entry function descriptor entries
938 * need to be relocated before we can use them.
939 */
940 if (load_addr != 0) {
941 entry += load_addr;
942 toc += load_addr;
943 }
944 regs->nip = entry;
945 regs->gpr[2] = toc;
946 regs->msr = MSR_USER64;
Stephen Rothwelld4bf9a72005-10-13 13:40:54 +1000947 } else {
948 regs->nip = start;
949 regs->gpr[2] = 0;
950 regs->msr = MSR_USER32;
Paul Mackerras06d67d52005-10-10 22:29:05 +1000951 }
952#endif
953
Paul Mackerras48abec02005-11-30 13:20:54 +1100954 discard_lazy_cpu_state();
Michael Neulingce48b212008-06-25 14:07:18 +1000955#ifdef CONFIG_VSX
956 current->thread.used_vsr = 0;
957#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000958 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
David Gibson25c8a782005-10-27 16:27:25 +1000959 current->thread.fpscr.val = 0;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000960#ifdef CONFIG_ALTIVEC
961 memset(current->thread.vr, 0, sizeof(current->thread.vr));
962 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
Paul Mackerras06d67d52005-10-10 22:29:05 +1000963 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000964 current->thread.vrsave = 0;
965 current->thread.used_vr = 0;
966#endif /* CONFIG_ALTIVEC */
967#ifdef CONFIG_SPE
968 memset(current->thread.evr, 0, sizeof(current->thread.evr));
969 current->thread.acc = 0;
970 current->thread.spefscr = 0;
971 current->thread.used_spe = 0;
972#endif /* CONFIG_SPE */
973}
974
975#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
976 | PR_FP_EXC_RES | PR_FP_EXC_INV)
977
978int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
979{
980 struct pt_regs *regs = tsk->thread.regs;
981
982 /* This is a bit hairy. If we are an SPE enabled processor
983 * (have embedded fp) we store the IEEE exception enable flags in
984 * fpexc_mode. fpexc_mode is also used for setting FP exception
985 * mode (asyn, precise, disabled) for 'Classic' FP. */
986 if (val & PR_FP_EXC_SW_ENABLE) {
987#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500988 if (cpu_has_feature(CPU_FTR_SPE)) {
989 tsk->thread.fpexc_mode = val &
990 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
991 return 0;
992 } else {
993 return -EINVAL;
994 }
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000995#else
996 return -EINVAL;
997#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000998 }
Paul Mackerras06d67d52005-10-10 22:29:05 +1000999
1000 /* on a CONFIG_SPE this does not hurt us. The bits that
1001 * __pack_fe01 use do not overlap with bits used for
1002 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1003 * on CONFIG_SPE implementations are reserved so writing to
1004 * them does not change anything */
1005 if (val > PR_FP_EXC_PRECISE)
1006 return -EINVAL;
1007 tsk->thread.fpexc_mode = __pack_fe01(val);
1008 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1009 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1010 | tsk->thread.fpexc_mode;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001011 return 0;
1012}
1013
1014int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1015{
1016 unsigned int val;
1017
1018 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1019#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -05001020 if (cpu_has_feature(CPU_FTR_SPE))
1021 val = tsk->thread.fpexc_mode;
1022 else
1023 return -EINVAL;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001024#else
1025 return -EINVAL;
1026#endif
1027 else
1028 val = __unpack_fe01(tsk->thread.fpexc_mode);
1029 return put_user(val, (unsigned int __user *) adr);
1030}
1031
Paul Mackerrasfab5db92006-06-07 16:14:40 +10001032int set_endian(struct task_struct *tsk, unsigned int val)
1033{
1034 struct pt_regs *regs = tsk->thread.regs;
1035
1036 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1037 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1038 return -EINVAL;
1039
1040 if (regs == NULL)
1041 return -EINVAL;
1042
1043 if (val == PR_ENDIAN_BIG)
1044 regs->msr &= ~MSR_LE;
1045 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1046 regs->msr |= MSR_LE;
1047 else
1048 return -EINVAL;
1049
1050 return 0;
1051}
1052
1053int get_endian(struct task_struct *tsk, unsigned long adr)
1054{
1055 struct pt_regs *regs = tsk->thread.regs;
1056 unsigned int val;
1057
1058 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1059 !cpu_has_feature(CPU_FTR_REAL_LE))
1060 return -EINVAL;
1061
1062 if (regs == NULL)
1063 return -EINVAL;
1064
1065 if (regs->msr & MSR_LE) {
1066 if (cpu_has_feature(CPU_FTR_REAL_LE))
1067 val = PR_ENDIAN_LITTLE;
1068 else
1069 val = PR_ENDIAN_PPC_LITTLE;
1070 } else
1071 val = PR_ENDIAN_BIG;
1072
1073 return put_user(val, (unsigned int __user *)adr);
1074}
1075
Paul Mackerrase9370ae2006-06-07 16:15:39 +10001076int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1077{
1078 tsk->thread.align_ctl = val;
1079 return 0;
1080}
1081
1082int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1083{
1084 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1085}
1086
Paul Mackerrasbb72c482007-02-19 11:42:42 +11001087static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1088 unsigned long nbytes)
1089{
1090 unsigned long stack_page;
1091 unsigned long cpu = task_cpu(p);
1092
1093 /*
1094 * Avoid crashing if the stack has overflowed and corrupted
1095 * task_cpu(p), which is in the thread_info struct.
1096 */
1097 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1098 stack_page = (unsigned long) hardirq_ctx[cpu];
1099 if (sp >= stack_page + sizeof(struct thread_struct)
1100 && sp <= stack_page + THREAD_SIZE - nbytes)
1101 return 1;
1102
1103 stack_page = (unsigned long) softirq_ctx[cpu];
1104 if (sp >= stack_page + sizeof(struct thread_struct)
1105 && sp <= stack_page + THREAD_SIZE - nbytes)
1106 return 1;
1107 }
1108 return 0;
1109}
1110
Anton Blanchard2f251942006-03-27 11:46:18 +11001111int validate_sp(unsigned long sp, struct task_struct *p,
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001112 unsigned long nbytes)
1113{
Al Viro0cec6fd2006-01-12 01:06:02 -08001114 unsigned long stack_page = (unsigned long)task_stack_page(p);
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001115
1116 if (sp >= stack_page + sizeof(struct thread_struct)
1117 && sp <= stack_page + THREAD_SIZE - nbytes)
1118 return 1;
1119
Paul Mackerrasbb72c482007-02-19 11:42:42 +11001120 return valid_irq_stack(sp, p, nbytes);
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001121}
1122
Anton Blanchard2f251942006-03-27 11:46:18 +11001123EXPORT_SYMBOL(validate_sp);
1124
Paul Mackerras06d67d52005-10-10 22:29:05 +10001125unsigned long get_wchan(struct task_struct *p)
1126{
1127 unsigned long ip, sp;
1128 int count = 0;
1129
1130 if (!p || p == current || p->state == TASK_RUNNING)
1131 return 0;
1132
1133 sp = p->thread.ksp;
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +10001134 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
Paul Mackerras06d67d52005-10-10 22:29:05 +10001135 return 0;
1136
1137 do {
1138 sp = *(unsigned long *)sp;
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +10001139 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
Paul Mackerras06d67d52005-10-10 22:29:05 +10001140 return 0;
1141 if (count > 0) {
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +10001142 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
Paul Mackerras06d67d52005-10-10 22:29:05 +10001143 if (!in_sched_functions(ip))
1144 return ip;
1145 }
1146 } while (count++ < 16);
1147 return 0;
1148}
Paul Mackerras06d67d52005-10-10 22:29:05 +10001149
Johannes Bergc4d04be2008-11-20 03:24:07 +00001150static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001151
1152void show_stack(struct task_struct *tsk, unsigned long *stack)
1153{
Paul Mackerras06d67d52005-10-10 22:29:05 +10001154 unsigned long sp, ip, lr, newsp;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001155 int count = 0;
Paul Mackerras06d67d52005-10-10 22:29:05 +10001156 int firstframe = 1;
Steven Rostedt6794c782009-02-09 21:10:27 -08001157#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1158 int curr_frame = current->curr_ret_stack;
1159 extern void return_to_handler(void);
Steven Rostedt9135c3c2009-09-15 08:20:15 -07001160 unsigned long rth = (unsigned long)return_to_handler;
1161 unsigned long mrth = -1;
Steven Rostedt6794c782009-02-09 21:10:27 -08001162#ifdef CONFIG_PPC64
Steven Rostedt9135c3c2009-09-15 08:20:15 -07001163 extern void mod_return_to_handler(void);
1164 rth = *(unsigned long *)rth;
1165 mrth = (unsigned long)mod_return_to_handler;
1166 mrth = *(unsigned long *)mrth;
Steven Rostedt6794c782009-02-09 21:10:27 -08001167#endif
1168#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001169
1170 sp = (unsigned long) stack;
1171 if (tsk == NULL)
1172 tsk = current;
1173 if (sp == 0) {
1174 if (tsk == current)
1175 asm("mr %0,1" : "=r" (sp));
1176 else
1177 sp = tsk->thread.ksp;
1178 }
1179
Paul Mackerras06d67d52005-10-10 22:29:05 +10001180 lr = 0;
1181 printk("Call Trace:\n");
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001182 do {
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +10001183 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
Paul Mackerras06d67d52005-10-10 22:29:05 +10001184 return;
1185
1186 stack = (unsigned long *) sp;
1187 newsp = stack[0];
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +10001188 ip = stack[STACK_FRAME_LR_SAVE];
Paul Mackerras06d67d52005-10-10 22:29:05 +10001189 if (!firstframe || ip != lr) {
Benjamin Herrenschmidt058c78f2008-07-07 13:44:31 +10001190 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
Steven Rostedt6794c782009-02-09 21:10:27 -08001191#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt9135c3c2009-09-15 08:20:15 -07001192 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
Steven Rostedt6794c782009-02-09 21:10:27 -08001193 printk(" (%pS)",
1194 (void *)current->ret_stack[curr_frame].ret);
1195 curr_frame--;
1196 }
1197#endif
Paul Mackerras06d67d52005-10-10 22:29:05 +10001198 if (firstframe)
1199 printk(" (unreliable)");
1200 printk("\n");
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001201 }
Paul Mackerras06d67d52005-10-10 22:29:05 +10001202 firstframe = 0;
1203
1204 /*
1205 * See if this is an exception frame.
1206 * We look for the "regshere" marker in the current frame.
1207 */
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +10001208 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1209 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
Paul Mackerras06d67d52005-10-10 22:29:05 +10001210 struct pt_regs *regs = (struct pt_regs *)
1211 (sp + STACK_FRAME_OVERHEAD);
Paul Mackerras06d67d52005-10-10 22:29:05 +10001212 lr = regs->link;
Benjamin Herrenschmidt058c78f2008-07-07 13:44:31 +10001213 printk("--- Exception: %lx at %pS\n LR = %pS\n",
1214 regs->trap, (void *)regs->nip, (void *)lr);
Paul Mackerras06d67d52005-10-10 22:29:05 +10001215 firstframe = 1;
1216 }
1217
1218 sp = newsp;
1219 } while (count++ < kstack_depth_to_print);
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001220}
Paul Mackerras06d67d52005-10-10 22:29:05 +10001221
1222void dump_stack(void)
1223{
1224 show_stack(current, NULL);
1225}
1226EXPORT_SYMBOL(dump_stack);
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11001227
1228#ifdef CONFIG_PPC64
Benjamin Herrenschmidtfe1952f2012-03-01 12:45:27 +11001229/* Called with hard IRQs off */
1230void __ppc64_runlatch_on(void)
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11001231{
Benjamin Herrenschmidtfe1952f2012-03-01 12:45:27 +11001232 struct thread_info *ti = current_thread_info();
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11001233 unsigned long ctrl;
1234
Benjamin Herrenschmidtfe1952f2012-03-01 12:45:27 +11001235 ctrl = mfspr(SPRN_CTRLF);
1236 ctrl |= CTRL_RUNLATCH;
1237 mtspr(SPRN_CTRLT, ctrl);
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11001238
Benjamin Herrenschmidtfae2e0f2012-04-11 10:42:15 +10001239 ti->local_flags |= _TLF_RUNLATCH;
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11001240}
1241
Benjamin Herrenschmidtfe1952f2012-03-01 12:45:27 +11001242/* Called with hard IRQs off */
Anton Blanchard4138d652010-08-06 03:28:19 +00001243void __ppc64_runlatch_off(void)
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11001244{
Benjamin Herrenschmidtfe1952f2012-03-01 12:45:27 +11001245 struct thread_info *ti = current_thread_info();
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11001246 unsigned long ctrl;
1247
Benjamin Herrenschmidtfae2e0f2012-04-11 10:42:15 +10001248 ti->local_flags &= ~_TLF_RUNLATCH;
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11001249
Anton Blanchard4138d652010-08-06 03:28:19 +00001250 ctrl = mfspr(SPRN_CTRLF);
1251 ctrl &= ~CTRL_RUNLATCH;
1252 mtspr(SPRN_CTRLT, ctrl);
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11001253}
Benjamin Herrenschmidtfe1952f2012-03-01 12:45:27 +11001254#endif /* CONFIG_PPC64 */
Benjamin Herrenschmidtf6a61682008-04-18 16:56:17 +10001255
Anton Blanchardd8390882009-02-22 01:50:03 +00001256unsigned long arch_align_stack(unsigned long sp)
1257{
1258 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1259 sp -= get_random_int() & ~PAGE_MASK;
1260 return sp & ~0xf;
1261}
Anton Blanchard912f9ee2009-02-22 01:50:04 +00001262
1263static inline unsigned long brk_rnd(void)
1264{
1265 unsigned long rnd = 0;
1266
1267 /* 8MB for 32bit, 1GB for 64bit */
1268 if (is_32bit_task())
1269 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1270 else
1271 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1272
1273 return rnd << PAGE_SHIFT;
1274}
1275
1276unsigned long arch_randomize_brk(struct mm_struct *mm)
1277{
Anton Blanchard8bbde7a2009-09-21 16:52:35 +00001278 unsigned long base = mm->brk;
1279 unsigned long ret;
1280
Kumar Galace7a35c2009-10-16 07:05:17 +00001281#ifdef CONFIG_PPC_STD_MMU_64
Anton Blanchard8bbde7a2009-09-21 16:52:35 +00001282 /*
1283 * If we are using 1TB segments and we are allowed to randomise
1284 * the heap, we can put it above 1TB so it is backed by a 1TB
1285 * segment. Otherwise the heap will be in the bottom 1TB
1286 * which always uses 256MB segments and this may result in a
1287 * performance penalty.
1288 */
1289 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1290 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1291#endif
1292
1293 ret = PAGE_ALIGN(base + brk_rnd());
Anton Blanchard912f9ee2009-02-22 01:50:04 +00001294
1295 if (ret < mm->brk)
1296 return mm->brk;
1297
1298 return ret;
1299}
Anton Blanchard501cb162009-02-22 01:50:07 +00001300
1301unsigned long randomize_et_dyn(unsigned long base)
1302{
1303 unsigned long ret = PAGE_ALIGN(base + brk_rnd());
1304
1305 if (ret < base)
1306 return base;
1307
1308 return ret;
1309}