blob: d23d7526d37a1693c2070dad0b68f3a5c36119e1 [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
Paul Mackerras9994a332005-10-10 22:36:14 +10002 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
Paul Mackerras9994a332005-10-10 22:36:14 +100021#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
Stephen Rothwell3f639ee2006-09-25 18:19:00 +100030#include <asm/firmware.h>
David Woodhouse007d88d2007-01-01 18:45:34 +000031#include <asm/bug.h>
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100032#include <asm/ptrace.h>
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +100033#include <asm/irqflags.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053034#include <asm/ftrace.h>
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +110035#include <asm/hw_irq.h>
Li Zhong5d1c5742013-05-13 16:16:43 +000036#include <asm/context_tracking.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100037
38/*
39 * System calls.
40 */
41 .section ".toc","aw"
Anton Blanchardc857c432014-02-04 16:05:53 +110042SYS_CALL_TABLE:
43 .tc sys_call_table[TC],sys_call_table
Paul Mackerras9994a332005-10-10 22:36:14 +100044
45/* This value is used to mark exception frames on the stack. */
46exception_marker:
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100047 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
Paul Mackerras9994a332005-10-10 22:36:14 +100048
49 .section ".text"
50 .align 7
51
52#undef SHOW_SYSCALLS
53
54 .globl system_call_common
55system_call_common:
56 andi. r10,r12,MSR_PR
57 mr r10,r1
58 addi r1,r1,-INT_FRAME_SIZE
59 beq- 1f
60 ld r1,PACAKSAVE(r13)
611: std r10,0(r1)
62 std r11,_NIP(r1)
63 std r12,_MSR(r1)
64 std r0,GPR0(r1)
65 std r10,GPR1(r1)
Haren Myneni5d75b262012-12-06 21:46:37 +000066 beq 2f /* if from kernel mode */
Paul Mackerrasc6622f62006-02-24 10:06:59 +110067 ACCOUNT_CPU_USER_ENTRY(r10, r11)
Haren Myneni5d75b262012-12-06 21:46:37 +0000682: std r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100069 std r3,GPR3(r1)
Anton Blanchardfd6c40f2012-04-05 03:44:48 +000070 mfcr r2
Paul Mackerras9994a332005-10-10 22:36:14 +100071 std r4,GPR4(r1)
72 std r5,GPR5(r1)
73 std r6,GPR6(r1)
74 std r7,GPR7(r1)
75 std r8,GPR8(r1)
76 li r11,0
77 std r11,GPR9(r1)
78 std r11,GPR10(r1)
79 std r11,GPR11(r1)
80 std r11,GPR12(r1)
Anton Blanchard823df432012-04-04 18:24:29 +000081 std r11,_XER(r1)
Anton Blanchard82087412012-04-04 18:26:39 +000082 std r11,_CTR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100083 std r9,GPR13(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100084 mflr r10
Anton Blanchardfd6c40f2012-04-05 03:44:48 +000085 /*
86 * This clears CR0.SO (bit 28), which is the error indication on
87 * return from this system call.
88 */
89 rldimi r2,r11,28,(63-28)
Paul Mackerras9994a332005-10-10 22:36:14 +100090 li r11,0xc01
Paul Mackerras9994a332005-10-10 22:36:14 +100091 std r10,_LINK(r1)
92 std r11,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100093 std r3,ORIG_GPR3(r1)
Anton Blanchardfd6c40f2012-04-05 03:44:48 +000094 std r2,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100095 ld r2,PACATOC(r13)
96 addi r9,r1,STACK_FRAME_OVERHEAD
97 ld r11,exception_marker@toc(r2)
98 std r11,-16(r9) /* "regshere" marker */
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +020099#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000100BEGIN_FW_FTR_SECTION
101 beq 33f
102 /* if from user, see if there are any DTL entries to process */
103 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
104 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
Anton Blanchard7ffcf8e2013-08-07 02:01:46 +1000105 addi r10,r10,LPPACA_DTLIDX
106 LDX_BE r10,0,r10 /* get log write index */
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000107 cmpd cr1,r11,r10
108 beq+ cr1,33f
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100109 bl accumulate_stolen_time
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000110 REST_GPR(0,r1)
111 REST_4GPRS(3,r1)
112 REST_2GPRS(7,r1)
113 addi r9,r1,STACK_FRAME_OVERHEAD
11433:
115END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200116#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000117
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100118 /*
119 * A syscall should always be called with interrupts enabled
120 * so we just unconditionally hard-enable here. When some kind
121 * of irq tracing is used, we additionally check that condition
122 * is correct
123 */
124#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
125 lbz r10,PACASOFTIRQEN(r13)
126 xori r10,r10,1
1271: tdnei r10,0
128 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
129#endif
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000130
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000131#ifdef CONFIG_PPC_BOOK3E
132 wrteei 1
133#else
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100134 ld r11,PACAKMSR(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +1000135 ori r11,r11,MSR_EE
136 mtmsrd r11,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000137#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000138
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100139 /* We do need to set SOFTE in the stack frame or the return
140 * from interrupt will be painful
141 */
142 li r10,1
143 std r10,SOFTE(r1)
144
Paul Mackerras9994a332005-10-10 22:36:14 +1000145#ifdef SHOW_SYSCALLS
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100146 bl do_show_syscall
Paul Mackerras9994a332005-10-10 22:36:14 +1000147 REST_GPR(0,r1)
148 REST_4GPRS(3,r1)
149 REST_2GPRS(7,r1)
150 addi r9,r1,STACK_FRAME_OVERHEAD
151#endif
Stuart Yoder9778b692012-07-05 04:41:35 +0000152 CURRENT_THREAD_INFO(r11, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000153 ld r10,TI_FLAGS(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000154 andi. r11,r10,_TIF_SYSCALL_T_OR_A
Anton Blanchard25403342013-01-09 10:47:36 +1100155 bne syscall_dotrace
Anton Blanchardd14299d2012-04-04 18:23:27 +0000156.Lsyscall_dotrace_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +1000157 cmpldi 0,r0,NR_syscalls
158 bge- syscall_enosys
159
160system_call: /* label this so stack traces look sane */
161/*
162 * Need to vector to 32 Bit or default sys_call_table here,
163 * based on caller's run-mode / personality.
164 */
Anton Blanchardc857c432014-02-04 16:05:53 +1100165 ld r11,SYS_CALL_TABLE@toc(2)
Paul Mackerras9994a332005-10-10 22:36:14 +1000166 andi. r10,r10,_TIF_32BIT
167 beq 15f
168 addi r11,r11,8 /* use 32-bit syscall entries */
169 clrldi r3,r3,32
170 clrldi r4,r4,32
171 clrldi r5,r5,32
172 clrldi r6,r6,32
173 clrldi r7,r7,32
174 clrldi r8,r8,32
17515:
176 slwi r0,r0,4
Anton Blanchardcc7efbf2014-02-04 16:07:47 +1100177 ldx r12,r11,r0 /* Fetch system call handler [ptr] */
178 mtctr r12
Paul Mackerras9994a332005-10-10 22:36:14 +1000179 bctrl /* Call handler */
180
181syscall_exit:
Paul Mackerras9994a332005-10-10 22:36:14 +1000182 std r3,RESULT(r1)
David Woodhouse401d1f02005-11-15 18:52:18 +0000183#ifdef SHOW_SYSCALLS
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100184 bl do_show_syscall_exit
David Woodhouse401d1f02005-11-15 18:52:18 +0000185 ld r3,RESULT(r1)
186#endif
Stuart Yoder9778b692012-07-05 04:41:35 +0000187 CURRENT_THREAD_INFO(r12, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000188
Paul Mackerras9994a332005-10-10 22:36:14 +1000189 ld r8,_MSR(r1)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000190#ifdef CONFIG_PPC_BOOK3S
191 /* No MSR:RI on BookE */
Paul Mackerras9994a332005-10-10 22:36:14 +1000192 andi. r10,r8,MSR_RI
193 beq- unrecov_restore
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000194#endif
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100195 /*
196 * Disable interrupts so current_thread_info()->flags can't change,
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000197 * and so that we don't get interrupted after loading SRR0/1.
198 */
199#ifdef CONFIG_PPC_BOOK3E
200 wrteei 0
201#else
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100202 ld r10,PACAKMSR(r13)
Anton Blanchardac1dc362012-05-29 12:22:00 +0000203 /*
204 * For performance reasons we clear RI the same time that we
205 * clear EE. We only need to clear RI just before we restore r13
206 * below, but batching it with EE saves us one expensive mtmsrd call.
207 * We have to be careful to restore RI if we branch anywhere from
208 * here (eg syscall_exit_work).
209 */
210 li r9,MSR_RI
211 andc r11,r10,r9
212 mtmsrd r11,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000213#endif /* CONFIG_PPC_BOOK3E */
214
Paul Mackerras9994a332005-10-10 22:36:14 +1000215 ld r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000216 li r11,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100217 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000218 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000219 cmpld r3,r11
220 ld r5,_CCR(r1)
221 bge- syscall_error
Anton Blanchardd14299d2012-04-04 18:23:27 +0000222.Lsyscall_error_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +1000223 ld r7,_NIP(r1)
Anton Blanchardf89451f2010-08-11 01:40:27 +0000224BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000225 stdcx. r0,0,r1 /* to clear the reservation */
Anton Blanchardf89451f2010-08-11 01:40:27 +0000226END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
Paul Mackerras9994a332005-10-10 22:36:14 +1000227 andi. r6,r8,MSR_PR
228 ld r4,_LINK(r1)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000229
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100230 beq- 1f
231 ACCOUNT_CPU_USER_EXIT(r11, r12)
Haren Myneni44e93092012-12-06 21:51:04 +0000232 HMT_MEDIUM_LOW_HAS_PPR
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100233 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
Paul Mackerras9994a332005-10-10 22:36:14 +10002341: ld r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000235 ld r1,GPR1(r1)
236 mtlr r4
237 mtcr r5
238 mtspr SPRN_SRR0,r7
239 mtspr SPRN_SRR1,r8
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000240 RFI
Paul Mackerras9994a332005-10-10 22:36:14 +1000241 b . /* prevent speculative execution */
242
David Woodhouse401d1f02005-11-15 18:52:18 +0000243syscall_error:
Paul Mackerras9994a332005-10-10 22:36:14 +1000244 oris r5,r5,0x1000 /* Set SO bit in CR */
David Woodhouse401d1f02005-11-15 18:52:18 +0000245 neg r3,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000246 std r5,_CCR(r1)
Anton Blanchardd14299d2012-04-04 18:23:27 +0000247 b .Lsyscall_error_cont
David Woodhouse401d1f02005-11-15 18:52:18 +0000248
Paul Mackerras9994a332005-10-10 22:36:14 +1000249/* Traced system call support */
250syscall_dotrace:
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100251 bl save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000252 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100253 bl do_syscall_trace_enter
Roland McGrath4f72c422008-07-27 16:51:03 +1000254 /*
255 * Restore argument registers possibly just changed.
256 * We use the return value of do_syscall_trace_enter
257 * for the call number to look up in the table (r0).
258 */
259 mr r0,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000260 ld r3,GPR3(r1)
261 ld r4,GPR4(r1)
262 ld r5,GPR5(r1)
263 ld r6,GPR6(r1)
264 ld r7,GPR7(r1)
265 ld r8,GPR8(r1)
266 addi r9,r1,STACK_FRAME_OVERHEAD
Stuart Yoder9778b692012-07-05 04:41:35 +0000267 CURRENT_THREAD_INFO(r10, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000268 ld r10,TI_FLAGS(r10)
Anton Blanchardd14299d2012-04-04 18:23:27 +0000269 b .Lsyscall_dotrace_cont
Paul Mackerras9994a332005-10-10 22:36:14 +1000270
David Woodhouse401d1f02005-11-15 18:52:18 +0000271syscall_enosys:
272 li r3,-ENOSYS
273 b syscall_exit
274
275syscall_exit_work:
Anton Blanchardac1dc362012-05-29 12:22:00 +0000276#ifdef CONFIG_PPC_BOOK3S
277 mtmsrd r10,1 /* Restore RI */
278#endif
David Woodhouse401d1f02005-11-15 18:52:18 +0000279 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
280 If TIF_NOERROR is set, just save r3 as it is. */
281
282 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100283 beq+ 0f
284 REST_NVGPRS(r1)
285 b 2f
2860: cmpld r3,r11 /* r10 is -LAST_ERRNO */
David Woodhouse401d1f02005-11-15 18:52:18 +0000287 blt+ 1f
288 andi. r0,r9,_TIF_NOERROR
289 bne- 1f
290 ld r5,_CCR(r1)
291 neg r3,r3
292 oris r5,r5,0x1000 /* Set SO bit in CR */
293 std r5,_CCR(r1)
2941: std r3,GPR3(r1)
2952: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
296 beq 4f
297
Paul Mackerras1bd79332006-03-08 13:24:22 +1100298 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000299
300 li r11,_TIF_PERSYSCALL_MASK
301 addi r12,r12,TI_FLAGS
3023: ldarx r10,0,r12
303 andc r10,r10,r11
304 stdcx. r10,0,r12
305 bne- 3b
306 subi r12,r12,TI_FLAGS
Paul Mackerras1bd79332006-03-08 13:24:22 +1100307
3084: /* Anything else left to do? */
Alistair Popple05e38e52013-04-15 11:44:14 +1000309 SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100310 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100311 beq ret_from_except_lite
David Woodhouse401d1f02005-11-15 18:52:18 +0000312
313 /* Re-enable interrupts */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000314#ifdef CONFIG_PPC_BOOK3E
315 wrteei 1
316#else
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100317 ld r10,PACAKMSR(r13)
David Woodhouse401d1f02005-11-15 18:52:18 +0000318 ori r10,r10,MSR_EE
319 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000320#endif /* CONFIG_PPC_BOOK3E */
David Woodhouse401d1f02005-11-15 18:52:18 +0000321
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100322 bl save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000323 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100324 bl do_syscall_trace_leave
325 b ret_from_except
Paul Mackerras9994a332005-10-10 22:36:14 +1000326
327/* Save non-volatile GPRs, if not already saved. */
328_GLOBAL(save_nvgprs)
329 ld r11,_TRAP(r1)
330 andi. r0,r11,1
331 beqlr-
332 SAVE_NVGPRS(r1)
333 clrrdi r0,r11,1
334 std r0,_TRAP(r1)
335 blr
336
David Woodhouse401d1f02005-11-15 18:52:18 +0000337
Paul Mackerras9994a332005-10-10 22:36:14 +1000338/*
339 * The sigsuspend and rt_sigsuspend system calls can call do_signal
340 * and thus put the process into the stopped state where we might
341 * want to examine its user state with ptrace. Therefore we need
342 * to save all the nonvolatile registers (r14 - r31) before calling
343 * the C code. Similarly, fork, vfork and clone need the full
344 * register state on the stack so that it can be copied to the child.
345 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000346
347_GLOBAL(ppc_fork)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100348 bl save_nvgprs
349 bl sys_fork
Paul Mackerras9994a332005-10-10 22:36:14 +1000350 b syscall_exit
351
352_GLOBAL(ppc_vfork)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100353 bl save_nvgprs
354 bl sys_vfork
Paul Mackerras9994a332005-10-10 22:36:14 +1000355 b syscall_exit
356
357_GLOBAL(ppc_clone)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100358 bl save_nvgprs
359 bl sys_clone
Paul Mackerras9994a332005-10-10 22:36:14 +1000360 b syscall_exit
361
Paul Mackerras1bd79332006-03-08 13:24:22 +1100362_GLOBAL(ppc32_swapcontext)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100363 bl save_nvgprs
364 bl compat_sys_swapcontext
Paul Mackerras1bd79332006-03-08 13:24:22 +1100365 b syscall_exit
366
367_GLOBAL(ppc64_swapcontext)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100368 bl save_nvgprs
369 bl sys_swapcontext
Paul Mackerras1bd79332006-03-08 13:24:22 +1100370 b syscall_exit
371
Paul Mackerras9994a332005-10-10 22:36:14 +1000372_GLOBAL(ret_from_fork)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100373 bl schedule_tail
Paul Mackerras9994a332005-10-10 22:36:14 +1000374 REST_NVGPRS(r1)
375 li r3,0
376 b syscall_exit
377
Al Viro58254e12012-09-12 18:32:42 -0400378_GLOBAL(ret_from_kernel_thread)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100379 bl schedule_tail
Al Viro58254e12012-09-12 18:32:42 -0400380 REST_NVGPRS(r1)
Al Viro53b50f92012-10-21 16:50:34 -0400381 ld r14, 0(r14)
Al Viro58254e12012-09-12 18:32:42 -0400382 mtlr r14
383 mr r3,r15
384 blrl
385 li r3,0
Al Virobe6abfa72012-08-31 15:48:05 -0400386 b syscall_exit
387
Anton Blanchard71433282012-09-03 16:51:10 +0000388 .section ".toc","aw"
389DSCR_DEFAULT:
390 .tc dscr_default[TC],dscr_default
391
392 .section ".text"
393
Paul Mackerras9994a332005-10-10 22:36:14 +1000394/*
395 * This routine switches between two different tasks. The process
396 * state of one is saved on its kernel stack. Then the state
397 * of the other is restored from its kernel stack. The memory
398 * management hardware is updated to the second process's state.
399 * Finally, we can return to the second process, via ret_from_except.
400 * On entry, r3 points to the THREAD for the current task, r4
401 * points to the THREAD for the new task.
402 *
403 * Note: there are two ways to get to the "going out" portion
404 * of this code; either by coming in via the entry (_switch)
405 * or via "fork" which must set up an environment equivalent
406 * to the "_switch" path. If you change this you'll have to change
407 * the fork code also.
408 *
409 * The code which creates the new task context is in 'copy_thread'
Jon Mason2ef94812006-01-23 10:58:20 -0600410 * in arch/powerpc/kernel/process.c
Paul Mackerras9994a332005-10-10 22:36:14 +1000411 */
412 .align 7
413_GLOBAL(_switch)
414 mflr r0
415 std r0,16(r1)
416 stdu r1,-SWITCH_FRAME_SIZE(r1)
417 /* r3-r13 are caller saved -- Cort */
418 SAVE_8GPRS(14, r1)
419 SAVE_10GPRS(22, r1)
420 mflr r20 /* Return to switch caller */
421 mfmsr r22
422 li r0, MSR_FP
Michael Neulingce48b212008-06-25 14:07:18 +1000423#ifdef CONFIG_VSX
424BEGIN_FTR_SECTION
425 oris r0,r0,MSR_VSX@h /* Disable VSX */
426END_FTR_SECTION_IFSET(CPU_FTR_VSX)
427#endif /* CONFIG_VSX */
Paul Mackerras9994a332005-10-10 22:36:14 +1000428#ifdef CONFIG_ALTIVEC
429BEGIN_FTR_SECTION
430 oris r0,r0,MSR_VEC@h /* Disable altivec */
431 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
432 std r24,THREAD_VRSAVE(r3)
433END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
434#endif /* CONFIG_ALTIVEC */
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000435#ifdef CONFIG_PPC64
436BEGIN_FTR_SECTION
437 mfspr r25,SPRN_DSCR
438 std r25,THREAD_DSCR(r3)
439END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
440#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000441 and. r0,r0,r22
442 beq+ 1f
443 andc r22,r22,r0
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000444 MTMSRD(r22)
Paul Mackerras9994a332005-10-10 22:36:14 +1000445 isync
4461: std r20,_NIP(r1)
447 mfcr r23
448 std r23,_CCR(r1)
449 std r1,KSP(r3) /* Set old stack pointer */
450
Ian Munsie2468dcf2013-02-07 15:46:58 +0000451#ifdef CONFIG_PPC_BOOK3S_64
452BEGIN_FTR_SECTION
Michael Ellerman93533742013-04-30 20:17:04 +0000453 /* Event based branch registers */
454 mfspr r0, SPRN_BESCR
455 std r0, THREAD_BESCR(r3)
456 mfspr r0, SPRN_EBBHR
457 std r0, THREAD_EBBHR(r3)
458 mfspr r0, SPRN_EBBRR
459 std r0, THREAD_EBBRR(r3)
Michael Ellerman1de2bd42013-04-30 20:17:02 +0000460END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Ian Munsie2468dcf2013-02-07 15:46:58 +0000461#endif
462
Paul Mackerras9994a332005-10-10 22:36:14 +1000463#ifdef CONFIG_SMP
464 /* We need a sync somewhere here to make sure that if the
465 * previous task gets rescheduled on another CPU, it sees all
466 * stores it has performed on this one.
467 */
468 sync
469#endif /* CONFIG_SMP */
470
Anton Blanchardf89451f2010-08-11 01:40:27 +0000471 /*
472 * If we optimise away the clear of the reservation in system
473 * calls because we know the CPU tracks the address of the
474 * reservation, then we need to clear it here to cover the
475 * case that the kernel context switch path has no larx
476 * instructions.
477 */
478BEGIN_FTR_SECTION
479 ldarx r6,0,r1
480END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
481
Michael Neulinga5153482013-05-29 19:34:27 +0000482#ifdef CONFIG_PPC_BOOK3S
483/* Cancel all explict user streams as they will have no use after context
484 * switch and will stop the HW from creating streams itself
485 */
486 DCBT_STOP_ALL_STREAM_IDS(r6)
487#endif
488
Paul Mackerras9994a332005-10-10 22:36:14 +1000489 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
490 std r6,PACACURRENT(r13) /* Set new 'current' */
491
492 ld r8,KSP(r4) /* new stack pointer */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000493#ifdef CONFIG_PPC_BOOK3S
Paul Mackerras9994a332005-10-10 22:36:14 +1000494BEGIN_FTR_SECTION
Michael Ellermanc2303282008-06-24 11:33:05 +1000495 BEGIN_FTR_SECTION_NESTED(95)
Paul Mackerras9994a332005-10-10 22:36:14 +1000496 clrrdi r6,r8,28 /* get its ESID */
497 clrrdi r9,r1,28 /* get current sp ESID */
Michael Ellermanc2303282008-06-24 11:33:05 +1000498 FTR_SECTION_ELSE_NESTED(95)
Paul Mackerras1189be62007-10-11 20:37:10 +1000499 clrrdi r6,r8,40 /* get its 1T ESID */
500 clrrdi r9,r1,40 /* get current sp 1T ESID */
Matt Evans44ae3ab2011-04-06 19:48:50 +0000501 ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95)
Michael Ellermanc2303282008-06-24 11:33:05 +1000502FTR_SECTION_ELSE
503 b 2f
Matt Evans44ae3ab2011-04-06 19:48:50 +0000504ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB)
Paul Mackerras9994a332005-10-10 22:36:14 +1000505 clrldi. r0,r6,2 /* is new ESID c00000000? */
506 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
507 cror eq,4*cr1+eq,eq
508 beq 2f /* if yes, don't slbie it */
509
510 /* Bolt in the new stack SLB entry */
511 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
512 oris r0,r6,(SLB_ESID_V)@h
513 ori r0,r0,(SLB_NUM_BOLTED-1)@l
Paul Mackerras1189be62007-10-11 20:37:10 +1000514BEGIN_FTR_SECTION
515 li r9,MMU_SEGSIZE_1T /* insert B field */
516 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
517 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
Matt Evans44ae3ab2011-04-06 19:48:50 +0000518END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
Michael Neuling2f6093c2006-08-07 16:19:19 +1000519
Michael Neuling00efee72007-08-24 16:58:37 +1000520 /* Update the last bolted SLB. No write barriers are needed
521 * here, provided we only update the current CPU's SLB shadow
522 * buffer.
523 */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000524 ld r9,PACA_SLBSHADOWPTR(r13)
Michael Neuling11a27ad2006-08-09 17:00:30 +1000525 li r12,0
Anton Blanchard7ffcf8e2013-08-07 02:01:46 +1000526 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
527 li r12,SLBSHADOW_STACKVSID
528 STDX_BE r7,r12,r9 /* Save VSID */
529 li r12,SLBSHADOW_STACKESID
530 STDX_BE r0,r12,r9 /* Save ESID */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000531
Matt Evans44ae3ab2011-04-06 19:48:50 +0000532 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
Olof Johanssonf66bce52007-10-16 00:58:59 +1000533 * we have 1TB segments, the only CPUs known to have the errata
534 * only support less than 1TB of system memory and we'll never
535 * actually hit this code path.
536 */
537
Paul Mackerras9994a332005-10-10 22:36:14 +1000538 slbie r6
539 slbie r6 /* Workaround POWER5 < DD2.1 issue */
540 slbmte r7,r0
541 isync
Paul Mackerras9994a332005-10-10 22:36:14 +10005422:
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000543#endif /* !CONFIG_PPC_BOOK3S */
544
Stuart Yoder9778b692012-07-05 04:41:35 +0000545 CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
Paul Mackerras9994a332005-10-10 22:36:14 +1000546 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
547 because we don't need to leave the 288-byte ABI gap at the
548 top of the kernel stack. */
549 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
550
551 mr r1,r8 /* start using new stack pointer */
552 std r7,PACAKSAVE(r13)
553
Ian Munsie2468dcf2013-02-07 15:46:58 +0000554#ifdef CONFIG_PPC_BOOK3S_64
555BEGIN_FTR_SECTION
Michael Ellerman93533742013-04-30 20:17:04 +0000556 /* Event based branch registers */
557 ld r0, THREAD_BESCR(r4)
558 mtspr SPRN_BESCR, r0
559 ld r0, THREAD_EBBHR(r4)
560 mtspr SPRN_EBBHR, r0
561 ld r0, THREAD_EBBRR(r4)
562 mtspr SPRN_EBBRR, r0
563
Ian Munsie2468dcf2013-02-07 15:46:58 +0000564 ld r0,THREAD_TAR(r4)
565 mtspr SPRN_TAR,r0
Michael Ellerman1de2bd42013-04-30 20:17:02 +0000566END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Ian Munsie2468dcf2013-02-07 15:46:58 +0000567#endif
568
Paul Mackerras9994a332005-10-10 22:36:14 +1000569#ifdef CONFIG_ALTIVEC
570BEGIN_FTR_SECTION
571 ld r0,THREAD_VRSAVE(r4)
572 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
573END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
574#endif /* CONFIG_ALTIVEC */
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000575#ifdef CONFIG_PPC64
576BEGIN_FTR_SECTION
Anton Blanchard71433282012-09-03 16:51:10 +0000577 lwz r6,THREAD_DSCR_INHERIT(r4)
578 ld r7,DSCR_DEFAULT@toc(2)
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000579 ld r0,THREAD_DSCR(r4)
Anton Blanchard71433282012-09-03 16:51:10 +0000580 cmpwi r6,0
581 bne 1f
582 ld r0,0(r7)
Michael Neuling25176172013-08-09 17:29:29 +10005831:
Michael Neulingbc683a72013-08-26 13:55:57 +1000584BEGIN_FTR_SECTION_NESTED(70)
585 mfspr r8, SPRN_FSCR
586 rldimi r8, r6, FSCR_DSCR_LG, (63 - FSCR_DSCR_LG)
587 mtspr SPRN_FSCR, r8
588END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
589 cmpd r0,r25
Anton Blanchard71433282012-09-03 16:51:10 +0000590 beq 2f
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000591 mtspr SPRN_DSCR,r0
Anton Blanchard71433282012-09-03 16:51:10 +00005922:
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000593END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
594#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000595
Anton Blanchard71433282012-09-03 16:51:10 +0000596 ld r6,_CCR(r1)
597 mtcrf 0xFF,r6
598
Paul Mackerras9994a332005-10-10 22:36:14 +1000599 /* r3-r13 are destroyed -- Cort */
600 REST_8GPRS(14, r1)
601 REST_10GPRS(22, r1)
602
603 /* convert old thread to its task_struct for return value */
604 addi r3,r3,-THREAD
605 ld r7,_NIP(r1) /* Return to _switch caller in new task */
606 mtlr r7
607 addi r1,r1,SWITCH_FRAME_SIZE
608 blr
609
610 .align 7
611_GLOBAL(ret_from_except)
612 ld r11,_TRAP(r1)
613 andi. r0,r11,1
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100614 bne ret_from_except_lite
Paul Mackerras9994a332005-10-10 22:36:14 +1000615 REST_NVGPRS(r1)
616
617_GLOBAL(ret_from_except_lite)
618 /*
619 * Disable interrupts so that current_thread_info()->flags
620 * can't change between when we test it and when we return
621 * from the interrupt.
622 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000623#ifdef CONFIG_PPC_BOOK3E
624 wrteei 0
625#else
Benjamin Herrenschmidtd9ada912012-03-02 11:33:52 +1100626 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
627 mtmsrd r10,1 /* Update machine state */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000628#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000629
Stuart Yoder9778b692012-07-05 04:41:35 +0000630 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000631 ld r3,_MSR(r1)
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530632#ifdef CONFIG_PPC_BOOK3E
633 ld r10,PACACURRENT(r13)
634#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000635 ld r4,TI_FLAGS(r9)
Paul Mackerras9994a332005-10-10 22:36:14 +1000636 andi. r3,r3,MSR_PR
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000637 beq resume_kernel
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530638#ifdef CONFIG_PPC_BOOK3E
639 lwz r3,(THREAD+THREAD_DBCR0)(r10)
640#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000641
642 /* Check current_thread_info()->flags */
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000643 andi. r0,r4,_TIF_USER_WORK_MASK
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530644#ifdef CONFIG_PPC_BOOK3E
645 bne 1f
646 /*
647 * Check to see if the dbcr0 register is set up to debug.
648 * Use the internal debug mode bit to do this.
649 */
650 andis. r0,r3,DBCR0_IDM@h
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000651 beq restore
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530652 mfmsr r0
653 rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
654 mtmsr r0
655 mtspr SPRN_DBCR0,r3
656 li r10, -1
657 mtspr SPRN_DBSR,r10
658 b restore
659#else
660 beq restore
661#endif
6621: andi. r0,r4,_TIF_NEED_RESCHED
663 beq 2f
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100664 bl restore_interrupts
Li Zhong5d1c5742013-05-13 16:16:43 +0000665 SCHEDULE_USER
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100666 b ret_from_except_lite
Paul Mackerrasd31626f2014-01-13 15:56:29 +11006672:
668#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
669 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
670 bne 3f /* only restore TM if nothing else to do */
671 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100672 bl restore_tm_state
Paul Mackerrasd31626f2014-01-13 15:56:29 +1100673 b restore
6743:
675#endif
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100676 bl save_nvgprs
677 bl restore_interrupts
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000678 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100679 bl do_notify_resume
680 b ret_from_except
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000681
682resume_kernel:
Tiejun Chena9c4e542012-09-16 23:54:30 +0000683 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
Kevin Hao0edfdd12013-09-26 16:41:34 +0800684 andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
Tiejun Chena9c4e542012-09-16 23:54:30 +0000685 beq+ 1f
686
687 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
688
689 lwz r3,GPR1(r1)
690 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
691 mr r4,r1 /* src: current exception frame */
692 mr r1,r3 /* Reroute the trampoline frame to r1 */
693
694 /* Copy from the original to the trampoline. */
695 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
696 li r6,0 /* start offset: 0 */
697 mtctr r5
6982: ldx r0,r6,r4
699 stdx r0,r6,r3
700 addi r6,r6,8
701 bdnz 2b
702
703 /* Do real store operation to complete stwu */
704 lwz r5,GPR1(r1)
705 std r8,0(r5)
706
707 /* Clear _TIF_EMULATE_STACK_STORE flag */
708 lis r11,_TIF_EMULATE_STACK_STORE@h
709 addi r5,r9,TI_FLAGS
Kevin Haod8b92292013-04-09 22:31:24 +00007100: ldarx r4,0,r5
Tiejun Chena9c4e542012-09-16 23:54:30 +0000711 andc r4,r4,r11
712 stdcx. r4,0,r5
713 bne- 0b
7141:
715
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000716#ifdef CONFIG_PREEMPT
717 /* Check if we need to preempt */
718 andi. r0,r4,_TIF_NEED_RESCHED
719 beq+ restore
720 /* Check that preempt_count() == 0 and interrupts are enabled */
721 lwz r8,TI_PREEMPT(r9)
722 cmpwi cr1,r8,0
723 ld r0,SOFTE(r1)
724 cmpdi r0,0
725 crandc eq,cr1*4+eq,eq
726 bne restore
727
728 /*
729 * Here we are preempting the current task. We want to make
Tiejun Chende021bb2013-07-16 11:09:30 +0800730 * sure we are soft-disabled first and reconcile irq state.
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000731 */
Tiejun Chende021bb2013-07-16 11:09:30 +0800732 RECONCILE_IRQ_STATE(r3,r4)
Anton Blanchardb1576fe2014-02-04 16:04:35 +11007331: bl preempt_schedule_irq
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000734
735 /* Re-test flags and eventually loop */
Stuart Yoder9778b692012-07-05 04:41:35 +0000736 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000737 ld r4,TI_FLAGS(r9)
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000738 andi. r0,r4,_TIF_NEED_RESCHED
739 bne 1b
Tiejun Chen572177d2013-01-06 00:49:34 +0000740
741 /*
742 * arch_local_irq_restore() from preempt_schedule_irq above may
743 * enable hard interrupt but we really should disable interrupts
744 * when we return from the interrupt, and so that we don't get
745 * interrupted after loading SRR0/1.
746 */
747#ifdef CONFIG_PPC_BOOK3E
748 wrteei 0
749#else
750 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
751 mtmsrd r10,1 /* Update machine state */
752#endif /* CONFIG_PPC_BOOK3E */
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000753#endif /* CONFIG_PREEMPT */
Paul Mackerras9994a332005-10-10 22:36:14 +1000754
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100755 .globl fast_exc_return_irq
756fast_exc_return_irq:
Paul Mackerras9994a332005-10-10 22:36:14 +1000757restore:
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100758 /*
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000759 * This is the main kernel exit path. First we check if we
760 * are about to re-enable interrupts
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100761 */
Michael Ellerman01f3880d2008-07-16 14:21:34 +1000762 ld r5,SOFTE(r1)
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100763 lbz r6,PACASOFTIRQEN(r13)
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000764 cmpwi cr0,r5,0
765 beq restore_irq_off
Paul Mackerras9994a332005-10-10 22:36:14 +1000766
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000767 /* We are enabling, were we already enabled ? Yes, just return */
768 cmpwi cr0,r6,1
769 beq cr0,do_restore
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000770
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000771 /*
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100772 * We are about to soft-enable interrupts (we are hard disabled
773 * at this point). We check if there's anything that needs to
774 * be replayed first.
775 */
776 lbz r0,PACAIRQHAPPENED(r13)
777 cmpwi cr0,r0,0
778 bne- restore_check_irq_replay
779
780 /*
781 * Get here when nothing happened while soft-disabled, just
782 * soft-enable and move-on. We will hard-enable as a side
783 * effect of rfi
784 */
785restore_no_replay:
786 TRACE_ENABLE_INTS
787 li r0,1
788 stb r0,PACASOFTIRQEN(r13);
789
790 /*
791 * Final return path. BookE is handled in a different file
792 */
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000793do_restore:
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000794#ifdef CONFIG_PPC_BOOK3E
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100795 b exception_return_book3e
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000796#else
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100797 /*
798 * Clear the reservation. If we know the CPU tracks the address of
799 * the reservation then we can potentially save some cycles and use
800 * a larx. On POWER6 and POWER7 this is significantly faster.
801 */
802BEGIN_FTR_SECTION
803 stdcx. r0,0,r1 /* to clear the reservation */
804FTR_SECTION_ELSE
805 ldarx r4,0,r1
806ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
807
808 /*
809 * Some code path such as load_up_fpu or altivec return directly
810 * here. They run entirely hard disabled and do not alter the
811 * interrupt state. They also don't use lwarx/stwcx. and thus
812 * are known not to leave dangling reservations.
813 */
814 .globl fast_exception_return
815fast_exception_return:
816 ld r3,_MSR(r1)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100817 ld r4,_CTR(r1)
818 ld r0,_LINK(r1)
819 mtctr r4
820 mtlr r0
821 ld r4,_XER(r1)
822 mtspr SPRN_XER,r4
823
824 REST_8GPRS(5, r1)
825
826 andi. r0,r3,MSR_RI
827 beq- unrecov_restore
828
Benjamin Herrenschmidt0c4888e2013-11-05 16:33:22 +1100829 /* Load PPR from thread struct before we clear MSR:RI */
830BEGIN_FTR_SECTION
831 ld r2,PACACURRENT(r13)
832 ld r2,TASKTHREADPPR(r2)
833END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
834
Anton Blanchardf89451f2010-08-11 01:40:27 +0000835 /*
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100836 * Clear RI before restoring r13. If we are returning to
837 * userspace and we take an exception after restoring r13,
838 * we end up corrupting the userspace r13 value.
839 */
Benjamin Herrenschmidtd9ada912012-03-02 11:33:52 +1100840 ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */
841 andc r4,r4,r0 /* r0 contains MSR_RI here */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100842 mtmsrd r4,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000843
Michael Neulingafc07702013-02-13 16:21:34 +0000844#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
845 /* TM debug */
846 std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
847#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000848 /*
849 * r13 is our per cpu area, only restore it if we are returning to
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100850 * userspace the value stored in the stack frame may belong to
851 * another CPU.
Paul Mackerras9994a332005-10-10 22:36:14 +1000852 */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100853 andi. r0,r3,MSR_PR
Paul Mackerras9994a332005-10-10 22:36:14 +1000854 beq 1f
Benjamin Herrenschmidt0c4888e2013-11-05 16:33:22 +1100855BEGIN_FTR_SECTION
856 mtspr SPRN_PPR,r2 /* Restore PPR */
857END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100858 ACCOUNT_CPU_USER_EXIT(r2, r4)
Paul Mackerras9994a332005-10-10 22:36:14 +1000859 REST_GPR(13, r1)
8601:
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100861 mtspr SPRN_SRR1,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000862
863 ld r2,_CCR(r1)
864 mtcrf 0xFF,r2
865 ld r2,_NIP(r1)
866 mtspr SPRN_SRR0,r2
867
868 ld r0,GPR0(r1)
869 ld r2,GPR2(r1)
870 ld r3,GPR3(r1)
871 ld r4,GPR4(r1)
872 ld r1,GPR1(r1)
873
874 rfid
875 b . /* prevent speculative execution */
876
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000877#endif /* CONFIG_PPC_BOOK3E */
878
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100879 /*
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000880 * We are returning to a context with interrupts soft disabled.
881 *
882 * However, we may also about to hard enable, so we need to
883 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
884 * or that bit can get out of sync and bad things will happen
885 */
886restore_irq_off:
887 ld r3,_MSR(r1)
888 lbz r7,PACAIRQHAPPENED(r13)
889 andi. r0,r3,MSR_EE
890 beq 1f
891 rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
892 stb r7,PACAIRQHAPPENED(r13)
8931: li r0,0
894 stb r0,PACASOFTIRQEN(r13);
895 TRACE_DISABLE_INTS
896 b do_restore
897
898 /*
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100899 * Something did happen, check if a re-emit is needed
900 * (this also clears paca->irq_happened)
901 */
902restore_check_irq_replay:
903 /* XXX: We could implement a fast path here where we check
904 * for irq_happened being just 0x01, in which case we can
905 * clear it and return. That means that we would potentially
906 * miss a decrementer having wrapped all the way around.
907 *
908 * Still, this might be useful for things like hash_page
909 */
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100910 bl __check_irq_replay
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100911 cmpwi cr0,r3,0
912 beq restore_no_replay
913
914 /*
915 * We need to re-emit an interrupt. We do so by re-using our
916 * existing exception frame. We first change the trap value,
917 * but we need to ensure we preserve the low nibble of it
918 */
919 ld r4,_TRAP(r1)
920 clrldi r4,r4,60
921 or r4,r4,r3
922 std r4,_TRAP(r1)
923
924 /*
925 * Then find the right handler and call it. Interrupts are
926 * still soft-disabled and we keep them that way.
927 */
928 cmpwi cr0,r3,0x500
929 bne 1f
930 addi r3,r1,STACK_FRAME_OVERHEAD;
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100931 bl do_IRQ
932 b ret_from_except
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +11009331: cmpwi cr0,r3,0x900
934 bne 1f
935 addi r3,r1,STACK_FRAME_OVERHEAD;
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100936 bl timer_interrupt
937 b ret_from_except
Ian Munsiefe9e1d52012-11-14 18:49:48 +0000938#ifdef CONFIG_PPC_DOORBELL
9391:
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100940#ifdef CONFIG_PPC_BOOK3E
Ian Munsiefe9e1d52012-11-14 18:49:48 +0000941 cmpwi cr0,r3,0x280
942#else
943 BEGIN_FTR_SECTION
944 cmpwi cr0,r3,0xe80
945 FTR_SECTION_ELSE
946 cmpwi cr0,r3,0xa00
947 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
948#endif /* CONFIG_PPC_BOOK3E */
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100949 bne 1f
950 addi r3,r1,STACK_FRAME_OVERHEAD;
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100951 bl doorbell_exception
952 b ret_from_except
Ian Munsiefe9e1d52012-11-14 18:49:48 +0000953#endif /* CONFIG_PPC_DOORBELL */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11009541: b ret_from_except /* What else to do here ? */
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100955
Paul Mackerras9994a332005-10-10 22:36:14 +1000956unrecov_restore:
957 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100958 bl unrecoverable_exception
Paul Mackerras9994a332005-10-10 22:36:14 +1000959 b unrecov_restore
960
961#ifdef CONFIG_PPC_RTAS
962/*
963 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
964 * called with the MMU off.
965 *
966 * In addition, we need to be in 32b mode, at least for now.
967 *
968 * Note: r3 is an input parameter to rtas, so don't trash it...
969 */
970_GLOBAL(enter_rtas)
971 mflr r0
972 std r0,16(r1)
973 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
974
975 /* Because RTAS is running in 32b mode, it clobbers the high order half
976 * of all registers that it saves. We therefore save those registers
977 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
978 */
979 SAVE_GPR(2, r1) /* Save the TOC */
980 SAVE_GPR(13, r1) /* Save paca */
981 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
982 SAVE_10GPRS(22, r1) /* ditto */
983
984 mfcr r4
985 std r4,_CCR(r1)
986 mfctr r5
987 std r5,_CTR(r1)
988 mfspr r6,SPRN_XER
989 std r6,_XER(r1)
990 mfdar r7
991 std r7,_DAR(r1)
992 mfdsisr r8
993 std r8,_DSISR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000994
Mike Kravetz9fe901d2006-03-27 15:20:00 -0800995 /* Temporary workaround to clear CR until RTAS can be modified to
996 * ignore all bits.
997 */
998 li r0,0
999 mtcr r0
1000
David Woodhouse007d88d2007-01-01 18:45:34 +00001001#ifdef CONFIG_BUG
Paul Mackerras9994a332005-10-10 22:36:14 +10001002 /* There is no way it is acceptable to get here with interrupts enabled,
1003 * check it with the asm equivalent of WARN_ON
1004 */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +10001005 lbz r0,PACASOFTIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +100010061: tdnei r0,0
David Woodhouse007d88d2007-01-01 18:45:34 +00001007 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1008#endif
1009
Paul Mackerrasd04c56f2006-10-04 16:47:49 +10001010 /* Hard-disable interrupts */
1011 mfmsr r6
1012 rldicl r7,r6,48,1
1013 rotldi r7,r7,16
1014 mtmsrd r7,1
1015
Paul Mackerras9994a332005-10-10 22:36:14 +10001016 /* Unfortunately, the stack pointer and the MSR are also clobbered,
1017 * so they are saved in the PACA which allows us to restore
1018 * our original state after RTAS returns.
1019 */
1020 std r1,PACAR1(r13)
1021 std r6,PACASAVEDMSR(r13)
1022
1023 /* Setup our real return addr */
Anton Blanchardad0289e2014-02-04 16:04:52 +11001024 LOAD_REG_ADDR(r4,rtas_return_loc)
David Gibsone58c3492006-01-13 14:56:25 +11001025 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +10001026 mtlr r4
1027
1028 li r0,0
1029 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1030 andc r0,r6,r0
1031
1032 li r9,1
1033 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001034 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
Paul Mackerras9994a332005-10-10 22:36:14 +10001035 andc r6,r0,r9
Paul Mackerras9994a332005-10-10 22:36:14 +10001036 sync /* disable interrupts so SRR0/1 */
1037 mtmsrd r0 /* don't get trashed */
1038
David Gibsone58c3492006-01-13 14:56:25 +11001039 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +10001040 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
1041 ld r4,RTASBASE(r4) /* get the rtas->base value */
1042
1043 mtspr SPRN_SRR0,r5
1044 mtspr SPRN_SRR1,r6
1045 rfid
1046 b . /* prevent speculative execution */
1047
Anton Blanchardad0289e2014-02-04 16:04:52 +11001048rtas_return_loc:
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001049 FIXUP_ENDIAN
1050
Paul Mackerras9994a332005-10-10 22:36:14 +10001051 /* relocation is off at this point */
Benjamin Herrenschmidt2dd60d72011-01-20 17:50:21 +11001052 GET_PACA(r4)
David Gibsone58c3492006-01-13 14:56:25 +11001053 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +10001054
Paul Mackerrase31aa452008-08-30 11:41:12 +10001055 bcl 20,31,$+4
10560: mflr r3
Anton Blanchardad0289e2014-02-04 16:04:52 +11001057 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
Paul Mackerrase31aa452008-08-30 11:41:12 +10001058
Paul Mackerras9994a332005-10-10 22:36:14 +10001059 mfmsr r6
1060 li r0,MSR_RI
1061 andc r6,r6,r0
1062 sync
1063 mtmsrd r6
1064
1065 ld r1,PACAR1(r4) /* Restore our SP */
Paul Mackerras9994a332005-10-10 22:36:14 +10001066 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
1067
1068 mtspr SPRN_SRR0,r3
1069 mtspr SPRN_SRR1,r4
1070 rfid
1071 b . /* prevent speculative execution */
1072
Paul Mackerrase31aa452008-08-30 11:41:12 +10001073 .align 3
Anton Blanchardad0289e2014-02-04 16:04:52 +110010741: .llong rtas_restore_regs
Paul Mackerrase31aa452008-08-30 11:41:12 +10001075
Anton Blanchardad0289e2014-02-04 16:04:52 +11001076rtas_restore_regs:
Paul Mackerras9994a332005-10-10 22:36:14 +10001077 /* relocation is on at this point */
1078 REST_GPR(2, r1) /* Restore the TOC */
1079 REST_GPR(13, r1) /* Restore paca */
1080 REST_8GPRS(14, r1) /* Restore the non-volatiles */
1081 REST_10GPRS(22, r1) /* ditto */
1082
Benjamin Herrenschmidt2dd60d72011-01-20 17:50:21 +11001083 GET_PACA(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +10001084
1085 ld r4,_CCR(r1)
1086 mtcr r4
1087 ld r5,_CTR(r1)
1088 mtctr r5
1089 ld r6,_XER(r1)
1090 mtspr SPRN_XER,r6
1091 ld r7,_DAR(r1)
1092 mtdar r7
1093 ld r8,_DSISR(r1)
1094 mtdsisr r8
Paul Mackerras9994a332005-10-10 22:36:14 +10001095
1096 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
1097 ld r0,16(r1) /* get return address */
1098
1099 mtlr r0
1100 blr /* return to caller */
1101
1102#endif /* CONFIG_PPC_RTAS */
1103
Paul Mackerras9994a332005-10-10 22:36:14 +10001104_GLOBAL(enter_prom)
1105 mflr r0
1106 std r0,16(r1)
1107 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
1108
1109 /* Because PROM is running in 32b mode, it clobbers the high order half
1110 * of all registers that it saves. We therefore save those registers
1111 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
1112 */
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001113 SAVE_GPR(2, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001114 SAVE_GPR(13, r1)
1115 SAVE_8GPRS(14, r1)
1116 SAVE_10GPRS(22, r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001117 mfcr r10
Paul Mackerras9994a332005-10-10 22:36:14 +10001118 mfmsr r11
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001119 std r10,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001120 std r11,_MSR(r1)
1121
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001122 /* Put PROM address in SRR0 */
1123 mtsrr0 r4
Paul Mackerras9994a332005-10-10 22:36:14 +10001124
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001125 /* Setup our trampoline return addr in LR */
1126 bcl 20,31,$+4
11270: mflr r4
1128 addi r4,r4,(1f - 0b)
1129 mtlr r4
1130
1131 /* Prepare a 32-bit mode big endian MSR
Paul Mackerras9994a332005-10-10 22:36:14 +10001132 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +00001133#ifdef CONFIG_PPC_BOOK3E
1134 rlwinm r11,r11,0,1,31
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001135 mtsrr1 r11
1136 rfi
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +00001137#else /* CONFIG_PPC_BOOK3E */
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001138 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1139 andc r11,r11,r12
1140 mtsrr1 r11
1141 rfid
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +00001142#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +10001143
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +100011441: /* Return from OF */
1145 FIXUP_ENDIAN
Paul Mackerras9994a332005-10-10 22:36:14 +10001146
1147 /* Just make sure that r1 top 32 bits didn't get
1148 * corrupt by OF
1149 */
1150 rldicl r1,r1,0,32
1151
1152 /* Restore the MSR (back to 64 bits) */
1153 ld r0,_MSR(r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001154 MTMSRD(r0)
Paul Mackerras9994a332005-10-10 22:36:14 +10001155 isync
1156
1157 /* Restore other registers */
1158 REST_GPR(2, r1)
1159 REST_GPR(13, r1)
1160 REST_8GPRS(14, r1)
1161 REST_10GPRS(22, r1)
1162 ld r4,_CCR(r1)
1163 mtcr r4
Paul Mackerras9994a332005-10-10 22:36:14 +10001164
1165 addi r1,r1,PROM_FRAME_SIZE
1166 ld r0,16(r1)
1167 mtlr r0
1168 blr
Steven Rostedt4e491d12008-05-14 23:49:44 -04001169
Steven Rostedt606576c2008-10-06 19:06:12 -04001170#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt4e491d12008-05-14 23:49:44 -04001171#ifdef CONFIG_DYNAMIC_FTRACE
1172_GLOBAL(mcount)
1173_GLOBAL(_mcount)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001174 blr
1175
1176_GLOBAL(ftrace_caller)
1177 /* Taken from output of objdump from lib64/glibc */
1178 mflr r3
1179 ld r11, 0(r1)
1180 stdu r1, -112(r1)
1181 std r3, 128(r1)
1182 ld r4, 16(r11)
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301183 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001184.globl ftrace_call
1185ftrace_call:
1186 bl ftrace_stub
1187 nop
Steven Rostedt46542882009-02-10 22:19:54 -08001188#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1189.globl ftrace_graph_call
1190ftrace_graph_call:
1191 b ftrace_graph_stub
1192_GLOBAL(ftrace_graph_stub)
1193#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -04001194 ld r0, 128(r1)
1195 mtlr r0
1196 addi r1, r1, 112
1197_GLOBAL(ftrace_stub)
1198 blr
1199#else
1200_GLOBAL(mcount)
1201 blr
1202
1203_GLOBAL(_mcount)
1204 /* Taken from output of objdump from lib64/glibc */
1205 mflr r3
1206 ld r11, 0(r1)
1207 stdu r1, -112(r1)
1208 std r3, 128(r1)
1209 ld r4, 16(r11)
1210
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301211 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001212 LOAD_REG_ADDR(r5,ftrace_trace_function)
1213 ld r5,0(r5)
1214 ld r5,0(r5)
1215 mtctr r5
1216 bctrl
Steven Rostedt4e491d12008-05-14 23:49:44 -04001217 nop
Steven Rostedt6794c782009-02-09 21:10:27 -08001218
1219
1220#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1221 b ftrace_graph_caller
1222#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -04001223 ld r0, 128(r1)
1224 mtlr r0
1225 addi r1, r1, 112
1226_GLOBAL(ftrace_stub)
1227 blr
1228
Steven Rostedt6794c782009-02-09 21:10:27 -08001229#endif /* CONFIG_DYNAMIC_FTRACE */
1230
1231#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt46542882009-02-10 22:19:54 -08001232_GLOBAL(ftrace_graph_caller)
Steven Rostedt6794c782009-02-09 21:10:27 -08001233 /* load r4 with local address */
1234 ld r4, 128(r1)
1235 subi r4, r4, MCOUNT_INSN_SIZE
1236
1237 /* get the parent address */
1238 ld r11, 112(r1)
1239 addi r3, r11, 16
1240
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001241 bl prepare_ftrace_return
Steven Rostedt6794c782009-02-09 21:10:27 -08001242 nop
1243
1244 ld r0, 128(r1)
1245 mtlr r0
1246 addi r1, r1, 112
1247 blr
1248
1249_GLOBAL(return_to_handler)
1250 /* need to save return values */
Steven Rostedtbb725342009-02-11 12:45:49 -08001251 std r4, -24(r1)
1252 std r3, -16(r1)
1253 std r31, -8(r1)
1254 mr r31, r1
1255 stdu r1, -112(r1)
1256
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001257 bl ftrace_return_to_handler
Steven Rostedtbb725342009-02-11 12:45:49 -08001258 nop
1259
1260 /* return value has real return address */
1261 mtlr r3
1262
1263 ld r1, 0(r1)
1264 ld r4, -24(r1)
1265 ld r3, -16(r1)
1266 ld r31, -8(r1)
1267
1268 /* Jump back to real return address */
1269 blr
1270
1271_GLOBAL(mod_return_to_handler)
1272 /* need to save return values */
Steven Rostedt6794c782009-02-09 21:10:27 -08001273 std r4, -32(r1)
1274 std r3, -24(r1)
1275 /* save TOC */
1276 std r2, -16(r1)
1277 std r31, -8(r1)
1278 mr r31, r1
1279 stdu r1, -112(r1)
1280
Steven Rostedtbb725342009-02-11 12:45:49 -08001281 /*
1282 * We are in a module using the module's TOC.
1283 * Switch to our TOC to run inside the core kernel.
1284 */
Steven Rostedtbe10ab12009-09-15 08:30:14 -07001285 ld r2, PACATOC(r13)
Steven Rostedt6794c782009-02-09 21:10:27 -08001286
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001287 bl ftrace_return_to_handler
Steven Rostedt6794c782009-02-09 21:10:27 -08001288 nop
1289
1290 /* return value has real return address */
1291 mtlr r3
1292
1293 ld r1, 0(r1)
1294 ld r4, -32(r1)
1295 ld r3, -24(r1)
1296 ld r2, -16(r1)
1297 ld r31, -8(r1)
1298
1299 /* Jump back to real return address */
1300 blr
1301#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1302#endif /* CONFIG_FUNCTION_TRACER */