blob: cf4f6e693437304cccfd88d2ecd473529b343ca6 [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
Paul Mackerras9994a332005-10-10 22:36:14 +10002 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
Paul Mackerras9994a332005-10-10 22:36:14 +100021#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
Stephen Rothwell3f639ee2006-09-25 18:19:00 +100030#include <asm/firmware.h>
David Woodhouse007d88d2007-01-01 18:45:34 +000031#include <asm/bug.h>
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100032#include <asm/ptrace.h>
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +100033#include <asm/irqflags.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053034#include <asm/ftrace.h>
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +110035#include <asm/hw_irq.h>
Li Zhong5d1c5742013-05-13 16:16:43 +000036#include <asm/context_tracking.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100037
38/*
39 * System calls.
40 */
41 .section ".toc","aw"
Anton Blanchardc857c432014-02-04 16:05:53 +110042SYS_CALL_TABLE:
43 .tc sys_call_table[TC],sys_call_table
Paul Mackerras9994a332005-10-10 22:36:14 +100044
45/* This value is used to mark exception frames on the stack. */
46exception_marker:
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100047 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
Paul Mackerras9994a332005-10-10 22:36:14 +100048
49 .section ".text"
50 .align 7
51
52#undef SHOW_SYSCALLS
53
54 .globl system_call_common
55system_call_common:
56 andi. r10,r12,MSR_PR
57 mr r10,r1
58 addi r1,r1,-INT_FRAME_SIZE
59 beq- 1f
60 ld r1,PACAKSAVE(r13)
611: std r10,0(r1)
62 std r11,_NIP(r1)
63 std r12,_MSR(r1)
64 std r0,GPR0(r1)
65 std r10,GPR1(r1)
Haren Myneni5d75b262012-12-06 21:46:37 +000066 beq 2f /* if from kernel mode */
Paul Mackerrasc6622f62006-02-24 10:06:59 +110067 ACCOUNT_CPU_USER_ENTRY(r10, r11)
Haren Myneni5d75b262012-12-06 21:46:37 +0000682: std r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100069 std r3,GPR3(r1)
Anton Blanchardfd6c40f2012-04-05 03:44:48 +000070 mfcr r2
Paul Mackerras9994a332005-10-10 22:36:14 +100071 std r4,GPR4(r1)
72 std r5,GPR5(r1)
73 std r6,GPR6(r1)
74 std r7,GPR7(r1)
75 std r8,GPR8(r1)
76 li r11,0
77 std r11,GPR9(r1)
78 std r11,GPR10(r1)
79 std r11,GPR11(r1)
80 std r11,GPR12(r1)
Anton Blanchard823df432012-04-04 18:24:29 +000081 std r11,_XER(r1)
Anton Blanchard82087412012-04-04 18:26:39 +000082 std r11,_CTR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100083 std r9,GPR13(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100084 mflr r10
Anton Blanchardfd6c40f2012-04-05 03:44:48 +000085 /*
86 * This clears CR0.SO (bit 28), which is the error indication on
87 * return from this system call.
88 */
89 rldimi r2,r11,28,(63-28)
Paul Mackerras9994a332005-10-10 22:36:14 +100090 li r11,0xc01
Paul Mackerras9994a332005-10-10 22:36:14 +100091 std r10,_LINK(r1)
92 std r11,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100093 std r3,ORIG_GPR3(r1)
Anton Blanchardfd6c40f2012-04-05 03:44:48 +000094 std r2,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100095 ld r2,PACATOC(r13)
96 addi r9,r1,STACK_FRAME_OVERHEAD
97 ld r11,exception_marker@toc(r2)
98 std r11,-16(r9) /* "regshere" marker */
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +020099#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000100BEGIN_FW_FTR_SECTION
101 beq 33f
102 /* if from user, see if there are any DTL entries to process */
103 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
104 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
Anton Blanchard7ffcf8e2013-08-07 02:01:46 +1000105 addi r10,r10,LPPACA_DTLIDX
106 LDX_BE r10,0,r10 /* get log write index */
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000107 cmpd cr1,r11,r10
108 beq+ cr1,33f
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100109 bl accumulate_stolen_time
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000110 REST_GPR(0,r1)
111 REST_4GPRS(3,r1)
112 REST_2GPRS(7,r1)
113 addi r9,r1,STACK_FRAME_OVERHEAD
11433:
115END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200116#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000117
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100118 /*
119 * A syscall should always be called with interrupts enabled
120 * so we just unconditionally hard-enable here. When some kind
121 * of irq tracing is used, we additionally check that condition
122 * is correct
123 */
124#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
125 lbz r10,PACASOFTIRQEN(r13)
126 xori r10,r10,1
1271: tdnei r10,0
128 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
129#endif
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000130
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000131#ifdef CONFIG_PPC_BOOK3E
132 wrteei 1
133#else
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100134 ld r11,PACAKMSR(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +1000135 ori r11,r11,MSR_EE
136 mtmsrd r11,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000137#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000138
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100139 /* We do need to set SOFTE in the stack frame or the return
140 * from interrupt will be painful
141 */
142 li r10,1
143 std r10,SOFTE(r1)
144
Paul Mackerras9994a332005-10-10 22:36:14 +1000145#ifdef SHOW_SYSCALLS
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100146 bl do_show_syscall
Paul Mackerras9994a332005-10-10 22:36:14 +1000147 REST_GPR(0,r1)
148 REST_4GPRS(3,r1)
149 REST_2GPRS(7,r1)
150 addi r9,r1,STACK_FRAME_OVERHEAD
151#endif
Stuart Yoder9778b692012-07-05 04:41:35 +0000152 CURRENT_THREAD_INFO(r11, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000153 ld r10,TI_FLAGS(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000154 andi. r11,r10,_TIF_SYSCALL_T_OR_A
Anton Blanchard25403342013-01-09 10:47:36 +1100155 bne syscall_dotrace
Anton Blanchardd14299d2012-04-04 18:23:27 +0000156.Lsyscall_dotrace_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +1000157 cmpldi 0,r0,NR_syscalls
158 bge- syscall_enosys
159
160system_call: /* label this so stack traces look sane */
161/*
162 * Need to vector to 32 Bit or default sys_call_table here,
163 * based on caller's run-mode / personality.
164 */
Anton Blanchardc857c432014-02-04 16:05:53 +1100165 ld r11,SYS_CALL_TABLE@toc(2)
Paul Mackerras9994a332005-10-10 22:36:14 +1000166 andi. r10,r10,_TIF_32BIT
167 beq 15f
168 addi r11,r11,8 /* use 32-bit syscall entries */
169 clrldi r3,r3,32
170 clrldi r4,r4,32
171 clrldi r5,r5,32
172 clrldi r6,r6,32
173 clrldi r7,r7,32
174 clrldi r8,r8,32
17515:
176 slwi r0,r0,4
Anton Blanchardcc7efbf2014-02-04 16:07:47 +1100177 ldx r12,r11,r0 /* Fetch system call handler [ptr] */
178 mtctr r12
Paul Mackerras9994a332005-10-10 22:36:14 +1000179 bctrl /* Call handler */
180
181syscall_exit:
Paul Mackerras9994a332005-10-10 22:36:14 +1000182 std r3,RESULT(r1)
David Woodhouse401d1f02005-11-15 18:52:18 +0000183#ifdef SHOW_SYSCALLS
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100184 bl do_show_syscall_exit
David Woodhouse401d1f02005-11-15 18:52:18 +0000185 ld r3,RESULT(r1)
186#endif
Stuart Yoder9778b692012-07-05 04:41:35 +0000187 CURRENT_THREAD_INFO(r12, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000188
Paul Mackerras9994a332005-10-10 22:36:14 +1000189 ld r8,_MSR(r1)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000190#ifdef CONFIG_PPC_BOOK3S
191 /* No MSR:RI on BookE */
Paul Mackerras9994a332005-10-10 22:36:14 +1000192 andi. r10,r8,MSR_RI
193 beq- unrecov_restore
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000194#endif
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100195 /*
196 * Disable interrupts so current_thread_info()->flags can't change,
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000197 * and so that we don't get interrupted after loading SRR0/1.
198 */
199#ifdef CONFIG_PPC_BOOK3E
200 wrteei 0
201#else
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100202 ld r10,PACAKMSR(r13)
Anton Blanchardac1dc362012-05-29 12:22:00 +0000203 /*
204 * For performance reasons we clear RI the same time that we
205 * clear EE. We only need to clear RI just before we restore r13
206 * below, but batching it with EE saves us one expensive mtmsrd call.
207 * We have to be careful to restore RI if we branch anywhere from
208 * here (eg syscall_exit_work).
209 */
210 li r9,MSR_RI
211 andc r11,r10,r9
212 mtmsrd r11,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000213#endif /* CONFIG_PPC_BOOK3E */
214
Paul Mackerras9994a332005-10-10 22:36:14 +1000215 ld r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000216 li r11,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100217 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000218 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000219 cmpld r3,r11
220 ld r5,_CCR(r1)
221 bge- syscall_error
Anton Blanchardd14299d2012-04-04 18:23:27 +0000222.Lsyscall_error_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +1000223 ld r7,_NIP(r1)
Anton Blanchardf89451f2010-08-11 01:40:27 +0000224BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000225 stdcx. r0,0,r1 /* to clear the reservation */
Anton Blanchardf89451f2010-08-11 01:40:27 +0000226END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
Paul Mackerras9994a332005-10-10 22:36:14 +1000227 andi. r6,r8,MSR_PR
228 ld r4,_LINK(r1)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000229
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100230 beq- 1f
231 ACCOUNT_CPU_USER_EXIT(r11, r12)
Haren Myneni44e93092012-12-06 21:51:04 +0000232 HMT_MEDIUM_LOW_HAS_PPR
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100233 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
Paul Mackerras9994a332005-10-10 22:36:14 +10002341: ld r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000235 ld r1,GPR1(r1)
236 mtlr r4
237 mtcr r5
238 mtspr SPRN_SRR0,r7
239 mtspr SPRN_SRR1,r8
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000240 RFI
Paul Mackerras9994a332005-10-10 22:36:14 +1000241 b . /* prevent speculative execution */
242
David Woodhouse401d1f02005-11-15 18:52:18 +0000243syscall_error:
Paul Mackerras9994a332005-10-10 22:36:14 +1000244 oris r5,r5,0x1000 /* Set SO bit in CR */
David Woodhouse401d1f02005-11-15 18:52:18 +0000245 neg r3,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000246 std r5,_CCR(r1)
Anton Blanchardd14299d2012-04-04 18:23:27 +0000247 b .Lsyscall_error_cont
David Woodhouse401d1f02005-11-15 18:52:18 +0000248
Paul Mackerras9994a332005-10-10 22:36:14 +1000249/* Traced system call support */
250syscall_dotrace:
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100251 bl save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000252 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100253 bl do_syscall_trace_enter
Roland McGrath4f72c422008-07-27 16:51:03 +1000254 /*
255 * Restore argument registers possibly just changed.
256 * We use the return value of do_syscall_trace_enter
257 * for the call number to look up in the table (r0).
258 */
259 mr r0,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000260 ld r3,GPR3(r1)
261 ld r4,GPR4(r1)
262 ld r5,GPR5(r1)
263 ld r6,GPR6(r1)
264 ld r7,GPR7(r1)
265 ld r8,GPR8(r1)
266 addi r9,r1,STACK_FRAME_OVERHEAD
Stuart Yoder9778b692012-07-05 04:41:35 +0000267 CURRENT_THREAD_INFO(r10, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000268 ld r10,TI_FLAGS(r10)
Anton Blanchardd14299d2012-04-04 18:23:27 +0000269 b .Lsyscall_dotrace_cont
Paul Mackerras9994a332005-10-10 22:36:14 +1000270
David Woodhouse401d1f02005-11-15 18:52:18 +0000271syscall_enosys:
272 li r3,-ENOSYS
273 b syscall_exit
274
275syscall_exit_work:
Anton Blanchardac1dc362012-05-29 12:22:00 +0000276#ifdef CONFIG_PPC_BOOK3S
277 mtmsrd r10,1 /* Restore RI */
278#endif
David Woodhouse401d1f02005-11-15 18:52:18 +0000279 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
280 If TIF_NOERROR is set, just save r3 as it is. */
281
282 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100283 beq+ 0f
284 REST_NVGPRS(r1)
285 b 2f
2860: cmpld r3,r11 /* r10 is -LAST_ERRNO */
David Woodhouse401d1f02005-11-15 18:52:18 +0000287 blt+ 1f
288 andi. r0,r9,_TIF_NOERROR
289 bne- 1f
290 ld r5,_CCR(r1)
291 neg r3,r3
292 oris r5,r5,0x1000 /* Set SO bit in CR */
293 std r5,_CCR(r1)
2941: std r3,GPR3(r1)
2952: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
296 beq 4f
297
Paul Mackerras1bd79332006-03-08 13:24:22 +1100298 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000299
300 li r11,_TIF_PERSYSCALL_MASK
301 addi r12,r12,TI_FLAGS
3023: ldarx r10,0,r12
303 andc r10,r10,r11
304 stdcx. r10,0,r12
305 bne- 3b
306 subi r12,r12,TI_FLAGS
Paul Mackerras1bd79332006-03-08 13:24:22 +1100307
3084: /* Anything else left to do? */
Alistair Popple05e38e52013-04-15 11:44:14 +1000309 SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100310 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100311 beq ret_from_except_lite
David Woodhouse401d1f02005-11-15 18:52:18 +0000312
313 /* Re-enable interrupts */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000314#ifdef CONFIG_PPC_BOOK3E
315 wrteei 1
316#else
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100317 ld r10,PACAKMSR(r13)
David Woodhouse401d1f02005-11-15 18:52:18 +0000318 ori r10,r10,MSR_EE
319 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000320#endif /* CONFIG_PPC_BOOK3E */
David Woodhouse401d1f02005-11-15 18:52:18 +0000321
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100322 bl save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000323 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100324 bl do_syscall_trace_leave
325 b ret_from_except
Paul Mackerras9994a332005-10-10 22:36:14 +1000326
327/* Save non-volatile GPRs, if not already saved. */
328_GLOBAL(save_nvgprs)
329 ld r11,_TRAP(r1)
330 andi. r0,r11,1
331 beqlr-
332 SAVE_NVGPRS(r1)
333 clrrdi r0,r11,1
334 std r0,_TRAP(r1)
335 blr
336
David Woodhouse401d1f02005-11-15 18:52:18 +0000337
Paul Mackerras9994a332005-10-10 22:36:14 +1000338/*
339 * The sigsuspend and rt_sigsuspend system calls can call do_signal
340 * and thus put the process into the stopped state where we might
341 * want to examine its user state with ptrace. Therefore we need
342 * to save all the nonvolatile registers (r14 - r31) before calling
343 * the C code. Similarly, fork, vfork and clone need the full
344 * register state on the stack so that it can be copied to the child.
345 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000346
347_GLOBAL(ppc_fork)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100348 bl save_nvgprs
349 bl sys_fork
Paul Mackerras9994a332005-10-10 22:36:14 +1000350 b syscall_exit
351
352_GLOBAL(ppc_vfork)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100353 bl save_nvgprs
354 bl sys_vfork
Paul Mackerras9994a332005-10-10 22:36:14 +1000355 b syscall_exit
356
357_GLOBAL(ppc_clone)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100358 bl save_nvgprs
359 bl sys_clone
Paul Mackerras9994a332005-10-10 22:36:14 +1000360 b syscall_exit
361
Paul Mackerras1bd79332006-03-08 13:24:22 +1100362_GLOBAL(ppc32_swapcontext)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100363 bl save_nvgprs
364 bl compat_sys_swapcontext
Paul Mackerras1bd79332006-03-08 13:24:22 +1100365 b syscall_exit
366
367_GLOBAL(ppc64_swapcontext)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100368 bl save_nvgprs
369 bl sys_swapcontext
Paul Mackerras1bd79332006-03-08 13:24:22 +1100370 b syscall_exit
371
Paul Mackerras9994a332005-10-10 22:36:14 +1000372_GLOBAL(ret_from_fork)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100373 bl schedule_tail
Paul Mackerras9994a332005-10-10 22:36:14 +1000374 REST_NVGPRS(r1)
375 li r3,0
376 b syscall_exit
377
Al Viro58254e12012-09-12 18:32:42 -0400378_GLOBAL(ret_from_kernel_thread)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100379 bl schedule_tail
Al Viro58254e12012-09-12 18:32:42 -0400380 REST_NVGPRS(r1)
Al Viro58254e12012-09-12 18:32:42 -0400381 mtlr r14
382 mr r3,r15
Anton Blanchard7cedd602014-02-04 16:08:51 +1100383#if defined(_CALL_ELF) && _CALL_ELF == 2
384 mr r12,r14
385#endif
Al Viro58254e12012-09-12 18:32:42 -0400386 blrl
387 li r3,0
Al Virobe6abfa72012-08-31 15:48:05 -0400388 b syscall_exit
389
Anton Blanchard71433282012-09-03 16:51:10 +0000390 .section ".toc","aw"
391DSCR_DEFAULT:
392 .tc dscr_default[TC],dscr_default
393
394 .section ".text"
395
Paul Mackerras9994a332005-10-10 22:36:14 +1000396/*
397 * This routine switches between two different tasks. The process
398 * state of one is saved on its kernel stack. Then the state
399 * of the other is restored from its kernel stack. The memory
400 * management hardware is updated to the second process's state.
401 * Finally, we can return to the second process, via ret_from_except.
402 * On entry, r3 points to the THREAD for the current task, r4
403 * points to the THREAD for the new task.
404 *
405 * Note: there are two ways to get to the "going out" portion
406 * of this code; either by coming in via the entry (_switch)
407 * or via "fork" which must set up an environment equivalent
408 * to the "_switch" path. If you change this you'll have to change
409 * the fork code also.
410 *
411 * The code which creates the new task context is in 'copy_thread'
Jon Mason2ef94812006-01-23 10:58:20 -0600412 * in arch/powerpc/kernel/process.c
Paul Mackerras9994a332005-10-10 22:36:14 +1000413 */
414 .align 7
415_GLOBAL(_switch)
416 mflr r0
417 std r0,16(r1)
418 stdu r1,-SWITCH_FRAME_SIZE(r1)
419 /* r3-r13 are caller saved -- Cort */
420 SAVE_8GPRS(14, r1)
421 SAVE_10GPRS(22, r1)
422 mflr r20 /* Return to switch caller */
423 mfmsr r22
424 li r0, MSR_FP
Michael Neulingce48b212008-06-25 14:07:18 +1000425#ifdef CONFIG_VSX
426BEGIN_FTR_SECTION
427 oris r0,r0,MSR_VSX@h /* Disable VSX */
428END_FTR_SECTION_IFSET(CPU_FTR_VSX)
429#endif /* CONFIG_VSX */
Paul Mackerras9994a332005-10-10 22:36:14 +1000430#ifdef CONFIG_ALTIVEC
431BEGIN_FTR_SECTION
432 oris r0,r0,MSR_VEC@h /* Disable altivec */
433 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
434 std r24,THREAD_VRSAVE(r3)
435END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
436#endif /* CONFIG_ALTIVEC */
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000437#ifdef CONFIG_PPC64
438BEGIN_FTR_SECTION
439 mfspr r25,SPRN_DSCR
440 std r25,THREAD_DSCR(r3)
441END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
442#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000443 and. r0,r0,r22
444 beq+ 1f
445 andc r22,r22,r0
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000446 MTMSRD(r22)
Paul Mackerras9994a332005-10-10 22:36:14 +1000447 isync
4481: std r20,_NIP(r1)
449 mfcr r23
450 std r23,_CCR(r1)
451 std r1,KSP(r3) /* Set old stack pointer */
452
Ian Munsie2468dcf2013-02-07 15:46:58 +0000453#ifdef CONFIG_PPC_BOOK3S_64
454BEGIN_FTR_SECTION
Michael Ellerman93533742013-04-30 20:17:04 +0000455 /* Event based branch registers */
456 mfspr r0, SPRN_BESCR
457 std r0, THREAD_BESCR(r3)
458 mfspr r0, SPRN_EBBHR
459 std r0, THREAD_EBBHR(r3)
460 mfspr r0, SPRN_EBBRR
461 std r0, THREAD_EBBRR(r3)
Michael Ellerman1de2bd42013-04-30 20:17:02 +0000462END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Ian Munsie2468dcf2013-02-07 15:46:58 +0000463#endif
464
Paul Mackerras9994a332005-10-10 22:36:14 +1000465#ifdef CONFIG_SMP
466 /* We need a sync somewhere here to make sure that if the
467 * previous task gets rescheduled on another CPU, it sees all
468 * stores it has performed on this one.
469 */
470 sync
471#endif /* CONFIG_SMP */
472
Anton Blanchardf89451f2010-08-11 01:40:27 +0000473 /*
474 * If we optimise away the clear of the reservation in system
475 * calls because we know the CPU tracks the address of the
476 * reservation, then we need to clear it here to cover the
477 * case that the kernel context switch path has no larx
478 * instructions.
479 */
480BEGIN_FTR_SECTION
481 ldarx r6,0,r1
482END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
483
Michael Neulinga5153482013-05-29 19:34:27 +0000484#ifdef CONFIG_PPC_BOOK3S
485/* Cancel all explict user streams as they will have no use after context
486 * switch and will stop the HW from creating streams itself
487 */
488 DCBT_STOP_ALL_STREAM_IDS(r6)
489#endif
490
Paul Mackerras9994a332005-10-10 22:36:14 +1000491 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
492 std r6,PACACURRENT(r13) /* Set new 'current' */
493
494 ld r8,KSP(r4) /* new stack pointer */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000495#ifdef CONFIG_PPC_BOOK3S
Paul Mackerras9994a332005-10-10 22:36:14 +1000496BEGIN_FTR_SECTION
Michael Ellermanc2303282008-06-24 11:33:05 +1000497 BEGIN_FTR_SECTION_NESTED(95)
Paul Mackerras9994a332005-10-10 22:36:14 +1000498 clrrdi r6,r8,28 /* get its ESID */
499 clrrdi r9,r1,28 /* get current sp ESID */
Michael Ellermanc2303282008-06-24 11:33:05 +1000500 FTR_SECTION_ELSE_NESTED(95)
Paul Mackerras1189be62007-10-11 20:37:10 +1000501 clrrdi r6,r8,40 /* get its 1T ESID */
502 clrrdi r9,r1,40 /* get current sp 1T ESID */
Matt Evans44ae3ab2011-04-06 19:48:50 +0000503 ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95)
Michael Ellermanc2303282008-06-24 11:33:05 +1000504FTR_SECTION_ELSE
505 b 2f
Matt Evans44ae3ab2011-04-06 19:48:50 +0000506ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB)
Paul Mackerras9994a332005-10-10 22:36:14 +1000507 clrldi. r0,r6,2 /* is new ESID c00000000? */
508 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
509 cror eq,4*cr1+eq,eq
510 beq 2f /* if yes, don't slbie it */
511
512 /* Bolt in the new stack SLB entry */
513 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
514 oris r0,r6,(SLB_ESID_V)@h
515 ori r0,r0,(SLB_NUM_BOLTED-1)@l
Paul Mackerras1189be62007-10-11 20:37:10 +1000516BEGIN_FTR_SECTION
517 li r9,MMU_SEGSIZE_1T /* insert B field */
518 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
519 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
Matt Evans44ae3ab2011-04-06 19:48:50 +0000520END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
Michael Neuling2f6093c2006-08-07 16:19:19 +1000521
Michael Neuling00efee72007-08-24 16:58:37 +1000522 /* Update the last bolted SLB. No write barriers are needed
523 * here, provided we only update the current CPU's SLB shadow
524 * buffer.
525 */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000526 ld r9,PACA_SLBSHADOWPTR(r13)
Michael Neuling11a27ad2006-08-09 17:00:30 +1000527 li r12,0
Anton Blanchard7ffcf8e2013-08-07 02:01:46 +1000528 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
529 li r12,SLBSHADOW_STACKVSID
530 STDX_BE r7,r12,r9 /* Save VSID */
531 li r12,SLBSHADOW_STACKESID
532 STDX_BE r0,r12,r9 /* Save ESID */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000533
Matt Evans44ae3ab2011-04-06 19:48:50 +0000534 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
Olof Johanssonf66bce52007-10-16 00:58:59 +1000535 * we have 1TB segments, the only CPUs known to have the errata
536 * only support less than 1TB of system memory and we'll never
537 * actually hit this code path.
538 */
539
Paul Mackerras9994a332005-10-10 22:36:14 +1000540 slbie r6
541 slbie r6 /* Workaround POWER5 < DD2.1 issue */
542 slbmte r7,r0
543 isync
Paul Mackerras9994a332005-10-10 22:36:14 +10005442:
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000545#endif /* !CONFIG_PPC_BOOK3S */
546
Stuart Yoder9778b692012-07-05 04:41:35 +0000547 CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
Paul Mackerras9994a332005-10-10 22:36:14 +1000548 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
549 because we don't need to leave the 288-byte ABI gap at the
550 top of the kernel stack. */
551 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
552
553 mr r1,r8 /* start using new stack pointer */
554 std r7,PACAKSAVE(r13)
555
Ian Munsie2468dcf2013-02-07 15:46:58 +0000556#ifdef CONFIG_PPC_BOOK3S_64
557BEGIN_FTR_SECTION
Michael Ellerman93533742013-04-30 20:17:04 +0000558 /* Event based branch registers */
559 ld r0, THREAD_BESCR(r4)
560 mtspr SPRN_BESCR, r0
561 ld r0, THREAD_EBBHR(r4)
562 mtspr SPRN_EBBHR, r0
563 ld r0, THREAD_EBBRR(r4)
564 mtspr SPRN_EBBRR, r0
565
Ian Munsie2468dcf2013-02-07 15:46:58 +0000566 ld r0,THREAD_TAR(r4)
567 mtspr SPRN_TAR,r0
Michael Ellerman1de2bd42013-04-30 20:17:02 +0000568END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Ian Munsie2468dcf2013-02-07 15:46:58 +0000569#endif
570
Paul Mackerras9994a332005-10-10 22:36:14 +1000571#ifdef CONFIG_ALTIVEC
572BEGIN_FTR_SECTION
573 ld r0,THREAD_VRSAVE(r4)
574 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
575END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
576#endif /* CONFIG_ALTIVEC */
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000577#ifdef CONFIG_PPC64
578BEGIN_FTR_SECTION
Anton Blanchard71433282012-09-03 16:51:10 +0000579 lwz r6,THREAD_DSCR_INHERIT(r4)
580 ld r7,DSCR_DEFAULT@toc(2)
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000581 ld r0,THREAD_DSCR(r4)
Anton Blanchard71433282012-09-03 16:51:10 +0000582 cmpwi r6,0
583 bne 1f
584 ld r0,0(r7)
Michael Neuling25176172013-08-09 17:29:29 +10005851:
Michael Neulingbc683a72013-08-26 13:55:57 +1000586BEGIN_FTR_SECTION_NESTED(70)
587 mfspr r8, SPRN_FSCR
588 rldimi r8, r6, FSCR_DSCR_LG, (63 - FSCR_DSCR_LG)
589 mtspr SPRN_FSCR, r8
590END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
591 cmpd r0,r25
Anton Blanchard71433282012-09-03 16:51:10 +0000592 beq 2f
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000593 mtspr SPRN_DSCR,r0
Anton Blanchard71433282012-09-03 16:51:10 +00005942:
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000595END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
596#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000597
Anton Blanchard71433282012-09-03 16:51:10 +0000598 ld r6,_CCR(r1)
599 mtcrf 0xFF,r6
600
Paul Mackerras9994a332005-10-10 22:36:14 +1000601 /* r3-r13 are destroyed -- Cort */
602 REST_8GPRS(14, r1)
603 REST_10GPRS(22, r1)
604
605 /* convert old thread to its task_struct for return value */
606 addi r3,r3,-THREAD
607 ld r7,_NIP(r1) /* Return to _switch caller in new task */
608 mtlr r7
609 addi r1,r1,SWITCH_FRAME_SIZE
610 blr
611
612 .align 7
613_GLOBAL(ret_from_except)
614 ld r11,_TRAP(r1)
615 andi. r0,r11,1
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100616 bne ret_from_except_lite
Paul Mackerras9994a332005-10-10 22:36:14 +1000617 REST_NVGPRS(r1)
618
619_GLOBAL(ret_from_except_lite)
620 /*
621 * Disable interrupts so that current_thread_info()->flags
622 * can't change between when we test it and when we return
623 * from the interrupt.
624 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000625#ifdef CONFIG_PPC_BOOK3E
626 wrteei 0
627#else
Benjamin Herrenschmidtd9ada912012-03-02 11:33:52 +1100628 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
629 mtmsrd r10,1 /* Update machine state */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000630#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000631
Stuart Yoder9778b692012-07-05 04:41:35 +0000632 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000633 ld r3,_MSR(r1)
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530634#ifdef CONFIG_PPC_BOOK3E
635 ld r10,PACACURRENT(r13)
636#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000637 ld r4,TI_FLAGS(r9)
Paul Mackerras9994a332005-10-10 22:36:14 +1000638 andi. r3,r3,MSR_PR
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000639 beq resume_kernel
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530640#ifdef CONFIG_PPC_BOOK3E
641 lwz r3,(THREAD+THREAD_DBCR0)(r10)
642#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000643
644 /* Check current_thread_info()->flags */
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000645 andi. r0,r4,_TIF_USER_WORK_MASK
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530646#ifdef CONFIG_PPC_BOOK3E
647 bne 1f
648 /*
649 * Check to see if the dbcr0 register is set up to debug.
650 * Use the internal debug mode bit to do this.
651 */
652 andis. r0,r3,DBCR0_IDM@h
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000653 beq restore
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530654 mfmsr r0
655 rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
656 mtmsr r0
657 mtspr SPRN_DBCR0,r3
658 li r10, -1
659 mtspr SPRN_DBSR,r10
660 b restore
661#else
662 beq restore
663#endif
6641: andi. r0,r4,_TIF_NEED_RESCHED
665 beq 2f
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100666 bl restore_interrupts
Li Zhong5d1c5742013-05-13 16:16:43 +0000667 SCHEDULE_USER
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100668 b ret_from_except_lite
Paul Mackerrasd31626f2014-01-13 15:56:29 +11006692:
670#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
671 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
672 bne 3f /* only restore TM if nothing else to do */
673 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100674 bl restore_tm_state
Paul Mackerrasd31626f2014-01-13 15:56:29 +1100675 b restore
6763:
677#endif
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100678 bl save_nvgprs
679 bl restore_interrupts
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000680 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100681 bl do_notify_resume
682 b ret_from_except
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000683
684resume_kernel:
Tiejun Chena9c4e542012-09-16 23:54:30 +0000685 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
Kevin Hao0edfdd12013-09-26 16:41:34 +0800686 andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
Tiejun Chena9c4e542012-09-16 23:54:30 +0000687 beq+ 1f
688
689 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
690
691 lwz r3,GPR1(r1)
692 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
693 mr r4,r1 /* src: current exception frame */
694 mr r1,r3 /* Reroute the trampoline frame to r1 */
695
696 /* Copy from the original to the trampoline. */
697 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
698 li r6,0 /* start offset: 0 */
699 mtctr r5
7002: ldx r0,r6,r4
701 stdx r0,r6,r3
702 addi r6,r6,8
703 bdnz 2b
704
705 /* Do real store operation to complete stwu */
706 lwz r5,GPR1(r1)
707 std r8,0(r5)
708
709 /* Clear _TIF_EMULATE_STACK_STORE flag */
710 lis r11,_TIF_EMULATE_STACK_STORE@h
711 addi r5,r9,TI_FLAGS
Kevin Haod8b92292013-04-09 22:31:24 +00007120: ldarx r4,0,r5
Tiejun Chena9c4e542012-09-16 23:54:30 +0000713 andc r4,r4,r11
714 stdcx. r4,0,r5
715 bne- 0b
7161:
717
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000718#ifdef CONFIG_PREEMPT
719 /* Check if we need to preempt */
720 andi. r0,r4,_TIF_NEED_RESCHED
721 beq+ restore
722 /* Check that preempt_count() == 0 and interrupts are enabled */
723 lwz r8,TI_PREEMPT(r9)
724 cmpwi cr1,r8,0
725 ld r0,SOFTE(r1)
726 cmpdi r0,0
727 crandc eq,cr1*4+eq,eq
728 bne restore
729
730 /*
731 * Here we are preempting the current task. We want to make
Tiejun Chende021bb2013-07-16 11:09:30 +0800732 * sure we are soft-disabled first and reconcile irq state.
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000733 */
Tiejun Chende021bb2013-07-16 11:09:30 +0800734 RECONCILE_IRQ_STATE(r3,r4)
Anton Blanchardb1576fe2014-02-04 16:04:35 +11007351: bl preempt_schedule_irq
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000736
737 /* Re-test flags and eventually loop */
Stuart Yoder9778b692012-07-05 04:41:35 +0000738 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000739 ld r4,TI_FLAGS(r9)
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000740 andi. r0,r4,_TIF_NEED_RESCHED
741 bne 1b
Tiejun Chen572177d2013-01-06 00:49:34 +0000742
743 /*
744 * arch_local_irq_restore() from preempt_schedule_irq above may
745 * enable hard interrupt but we really should disable interrupts
746 * when we return from the interrupt, and so that we don't get
747 * interrupted after loading SRR0/1.
748 */
749#ifdef CONFIG_PPC_BOOK3E
750 wrteei 0
751#else
752 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
753 mtmsrd r10,1 /* Update machine state */
754#endif /* CONFIG_PPC_BOOK3E */
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000755#endif /* CONFIG_PREEMPT */
Paul Mackerras9994a332005-10-10 22:36:14 +1000756
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100757 .globl fast_exc_return_irq
758fast_exc_return_irq:
Paul Mackerras9994a332005-10-10 22:36:14 +1000759restore:
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100760 /*
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000761 * This is the main kernel exit path. First we check if we
762 * are about to re-enable interrupts
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100763 */
Michael Ellerman01f3880d2008-07-16 14:21:34 +1000764 ld r5,SOFTE(r1)
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100765 lbz r6,PACASOFTIRQEN(r13)
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000766 cmpwi cr0,r5,0
767 beq restore_irq_off
Paul Mackerras9994a332005-10-10 22:36:14 +1000768
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000769 /* We are enabling, were we already enabled ? Yes, just return */
770 cmpwi cr0,r6,1
771 beq cr0,do_restore
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000772
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000773 /*
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100774 * We are about to soft-enable interrupts (we are hard disabled
775 * at this point). We check if there's anything that needs to
776 * be replayed first.
777 */
778 lbz r0,PACAIRQHAPPENED(r13)
779 cmpwi cr0,r0,0
780 bne- restore_check_irq_replay
781
782 /*
783 * Get here when nothing happened while soft-disabled, just
784 * soft-enable and move-on. We will hard-enable as a side
785 * effect of rfi
786 */
787restore_no_replay:
788 TRACE_ENABLE_INTS
789 li r0,1
790 stb r0,PACASOFTIRQEN(r13);
791
792 /*
793 * Final return path. BookE is handled in a different file
794 */
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000795do_restore:
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000796#ifdef CONFIG_PPC_BOOK3E
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100797 b exception_return_book3e
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000798#else
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100799 /*
800 * Clear the reservation. If we know the CPU tracks the address of
801 * the reservation then we can potentially save some cycles and use
802 * a larx. On POWER6 and POWER7 this is significantly faster.
803 */
804BEGIN_FTR_SECTION
805 stdcx. r0,0,r1 /* to clear the reservation */
806FTR_SECTION_ELSE
807 ldarx r4,0,r1
808ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
809
810 /*
811 * Some code path such as load_up_fpu or altivec return directly
812 * here. They run entirely hard disabled and do not alter the
813 * interrupt state. They also don't use lwarx/stwcx. and thus
814 * are known not to leave dangling reservations.
815 */
816 .globl fast_exception_return
817fast_exception_return:
818 ld r3,_MSR(r1)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100819 ld r4,_CTR(r1)
820 ld r0,_LINK(r1)
821 mtctr r4
822 mtlr r0
823 ld r4,_XER(r1)
824 mtspr SPRN_XER,r4
825
826 REST_8GPRS(5, r1)
827
828 andi. r0,r3,MSR_RI
829 beq- unrecov_restore
830
Benjamin Herrenschmidt0c4888e2013-11-05 16:33:22 +1100831 /* Load PPR from thread struct before we clear MSR:RI */
832BEGIN_FTR_SECTION
833 ld r2,PACACURRENT(r13)
834 ld r2,TASKTHREADPPR(r2)
835END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
836
Anton Blanchardf89451f2010-08-11 01:40:27 +0000837 /*
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100838 * Clear RI before restoring r13. If we are returning to
839 * userspace and we take an exception after restoring r13,
840 * we end up corrupting the userspace r13 value.
841 */
Benjamin Herrenschmidtd9ada912012-03-02 11:33:52 +1100842 ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */
843 andc r4,r4,r0 /* r0 contains MSR_RI here */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100844 mtmsrd r4,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000845
Michael Neulingafc07702013-02-13 16:21:34 +0000846#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
847 /* TM debug */
848 std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
849#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000850 /*
851 * r13 is our per cpu area, only restore it if we are returning to
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100852 * userspace the value stored in the stack frame may belong to
853 * another CPU.
Paul Mackerras9994a332005-10-10 22:36:14 +1000854 */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100855 andi. r0,r3,MSR_PR
Paul Mackerras9994a332005-10-10 22:36:14 +1000856 beq 1f
Benjamin Herrenschmidt0c4888e2013-11-05 16:33:22 +1100857BEGIN_FTR_SECTION
858 mtspr SPRN_PPR,r2 /* Restore PPR */
859END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100860 ACCOUNT_CPU_USER_EXIT(r2, r4)
Paul Mackerras9994a332005-10-10 22:36:14 +1000861 REST_GPR(13, r1)
8621:
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100863 mtspr SPRN_SRR1,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000864
865 ld r2,_CCR(r1)
866 mtcrf 0xFF,r2
867 ld r2,_NIP(r1)
868 mtspr SPRN_SRR0,r2
869
870 ld r0,GPR0(r1)
871 ld r2,GPR2(r1)
872 ld r3,GPR3(r1)
873 ld r4,GPR4(r1)
874 ld r1,GPR1(r1)
875
876 rfid
877 b . /* prevent speculative execution */
878
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000879#endif /* CONFIG_PPC_BOOK3E */
880
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100881 /*
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000882 * We are returning to a context with interrupts soft disabled.
883 *
884 * However, we may also about to hard enable, so we need to
885 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
886 * or that bit can get out of sync and bad things will happen
887 */
888restore_irq_off:
889 ld r3,_MSR(r1)
890 lbz r7,PACAIRQHAPPENED(r13)
891 andi. r0,r3,MSR_EE
892 beq 1f
893 rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
894 stb r7,PACAIRQHAPPENED(r13)
8951: li r0,0
896 stb r0,PACASOFTIRQEN(r13);
897 TRACE_DISABLE_INTS
898 b do_restore
899
900 /*
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100901 * Something did happen, check if a re-emit is needed
902 * (this also clears paca->irq_happened)
903 */
904restore_check_irq_replay:
905 /* XXX: We could implement a fast path here where we check
906 * for irq_happened being just 0x01, in which case we can
907 * clear it and return. That means that we would potentially
908 * miss a decrementer having wrapped all the way around.
909 *
910 * Still, this might be useful for things like hash_page
911 */
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100912 bl __check_irq_replay
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100913 cmpwi cr0,r3,0
914 beq restore_no_replay
915
916 /*
917 * We need to re-emit an interrupt. We do so by re-using our
918 * existing exception frame. We first change the trap value,
919 * but we need to ensure we preserve the low nibble of it
920 */
921 ld r4,_TRAP(r1)
922 clrldi r4,r4,60
923 or r4,r4,r3
924 std r4,_TRAP(r1)
925
926 /*
927 * Then find the right handler and call it. Interrupts are
928 * still soft-disabled and we keep them that way.
929 */
930 cmpwi cr0,r3,0x500
931 bne 1f
932 addi r3,r1,STACK_FRAME_OVERHEAD;
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100933 bl do_IRQ
934 b ret_from_except
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +11009351: cmpwi cr0,r3,0x900
936 bne 1f
937 addi r3,r1,STACK_FRAME_OVERHEAD;
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100938 bl timer_interrupt
939 b ret_from_except
Ian Munsiefe9e1d52012-11-14 18:49:48 +0000940#ifdef CONFIG_PPC_DOORBELL
9411:
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100942#ifdef CONFIG_PPC_BOOK3E
Ian Munsiefe9e1d52012-11-14 18:49:48 +0000943 cmpwi cr0,r3,0x280
944#else
945 BEGIN_FTR_SECTION
946 cmpwi cr0,r3,0xe80
947 FTR_SECTION_ELSE
948 cmpwi cr0,r3,0xa00
949 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
950#endif /* CONFIG_PPC_BOOK3E */
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100951 bne 1f
952 addi r3,r1,STACK_FRAME_OVERHEAD;
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100953 bl doorbell_exception
954 b ret_from_except
Ian Munsiefe9e1d52012-11-14 18:49:48 +0000955#endif /* CONFIG_PPC_DOORBELL */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11009561: b ret_from_except /* What else to do here ? */
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100957
Paul Mackerras9994a332005-10-10 22:36:14 +1000958unrecov_restore:
959 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100960 bl unrecoverable_exception
Paul Mackerras9994a332005-10-10 22:36:14 +1000961 b unrecov_restore
962
963#ifdef CONFIG_PPC_RTAS
964/*
965 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
966 * called with the MMU off.
967 *
968 * In addition, we need to be in 32b mode, at least for now.
969 *
970 * Note: r3 is an input parameter to rtas, so don't trash it...
971 */
972_GLOBAL(enter_rtas)
973 mflr r0
974 std r0,16(r1)
975 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
976
977 /* Because RTAS is running in 32b mode, it clobbers the high order half
978 * of all registers that it saves. We therefore save those registers
979 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
980 */
981 SAVE_GPR(2, r1) /* Save the TOC */
982 SAVE_GPR(13, r1) /* Save paca */
983 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
984 SAVE_10GPRS(22, r1) /* ditto */
985
986 mfcr r4
987 std r4,_CCR(r1)
988 mfctr r5
989 std r5,_CTR(r1)
990 mfspr r6,SPRN_XER
991 std r6,_XER(r1)
992 mfdar r7
993 std r7,_DAR(r1)
994 mfdsisr r8
995 std r8,_DSISR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000996
Mike Kravetz9fe901d2006-03-27 15:20:00 -0800997 /* Temporary workaround to clear CR until RTAS can be modified to
998 * ignore all bits.
999 */
1000 li r0,0
1001 mtcr r0
1002
David Woodhouse007d88d2007-01-01 18:45:34 +00001003#ifdef CONFIG_BUG
Paul Mackerras9994a332005-10-10 22:36:14 +10001004 /* There is no way it is acceptable to get here with interrupts enabled,
1005 * check it with the asm equivalent of WARN_ON
1006 */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +10001007 lbz r0,PACASOFTIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +100010081: tdnei r0,0
David Woodhouse007d88d2007-01-01 18:45:34 +00001009 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1010#endif
1011
Paul Mackerrasd04c56f2006-10-04 16:47:49 +10001012 /* Hard-disable interrupts */
1013 mfmsr r6
1014 rldicl r7,r6,48,1
1015 rotldi r7,r7,16
1016 mtmsrd r7,1
1017
Paul Mackerras9994a332005-10-10 22:36:14 +10001018 /* Unfortunately, the stack pointer and the MSR are also clobbered,
1019 * so they are saved in the PACA which allows us to restore
1020 * our original state after RTAS returns.
1021 */
1022 std r1,PACAR1(r13)
1023 std r6,PACASAVEDMSR(r13)
1024
1025 /* Setup our real return addr */
Anton Blanchardad0289e2014-02-04 16:04:52 +11001026 LOAD_REG_ADDR(r4,rtas_return_loc)
David Gibsone58c3492006-01-13 14:56:25 +11001027 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +10001028 mtlr r4
1029
1030 li r0,0
1031 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1032 andc r0,r6,r0
1033
1034 li r9,1
1035 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001036 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
Paul Mackerras9994a332005-10-10 22:36:14 +10001037 andc r6,r0,r9
Paul Mackerras9994a332005-10-10 22:36:14 +10001038 sync /* disable interrupts so SRR0/1 */
1039 mtmsrd r0 /* don't get trashed */
1040
David Gibsone58c3492006-01-13 14:56:25 +11001041 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +10001042 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
1043 ld r4,RTASBASE(r4) /* get the rtas->base value */
1044
1045 mtspr SPRN_SRR0,r5
1046 mtspr SPRN_SRR1,r6
1047 rfid
1048 b . /* prevent speculative execution */
1049
Anton Blanchardad0289e2014-02-04 16:04:52 +11001050rtas_return_loc:
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001051 FIXUP_ENDIAN
1052
Paul Mackerras9994a332005-10-10 22:36:14 +10001053 /* relocation is off at this point */
Benjamin Herrenschmidt2dd60d72011-01-20 17:50:21 +11001054 GET_PACA(r4)
David Gibsone58c3492006-01-13 14:56:25 +11001055 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +10001056
Paul Mackerrase31aa452008-08-30 11:41:12 +10001057 bcl 20,31,$+4
10580: mflr r3
Anton Blanchardad0289e2014-02-04 16:04:52 +11001059 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
Paul Mackerrase31aa452008-08-30 11:41:12 +10001060
Paul Mackerras9994a332005-10-10 22:36:14 +10001061 mfmsr r6
1062 li r0,MSR_RI
1063 andc r6,r6,r0
1064 sync
1065 mtmsrd r6
1066
1067 ld r1,PACAR1(r4) /* Restore our SP */
Paul Mackerras9994a332005-10-10 22:36:14 +10001068 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
1069
1070 mtspr SPRN_SRR0,r3
1071 mtspr SPRN_SRR1,r4
1072 rfid
1073 b . /* prevent speculative execution */
1074
Paul Mackerrase31aa452008-08-30 11:41:12 +10001075 .align 3
Anton Blanchardad0289e2014-02-04 16:04:52 +110010761: .llong rtas_restore_regs
Paul Mackerrase31aa452008-08-30 11:41:12 +10001077
Anton Blanchardad0289e2014-02-04 16:04:52 +11001078rtas_restore_regs:
Paul Mackerras9994a332005-10-10 22:36:14 +10001079 /* relocation is on at this point */
1080 REST_GPR(2, r1) /* Restore the TOC */
1081 REST_GPR(13, r1) /* Restore paca */
1082 REST_8GPRS(14, r1) /* Restore the non-volatiles */
1083 REST_10GPRS(22, r1) /* ditto */
1084
Benjamin Herrenschmidt2dd60d72011-01-20 17:50:21 +11001085 GET_PACA(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +10001086
1087 ld r4,_CCR(r1)
1088 mtcr r4
1089 ld r5,_CTR(r1)
1090 mtctr r5
1091 ld r6,_XER(r1)
1092 mtspr SPRN_XER,r6
1093 ld r7,_DAR(r1)
1094 mtdar r7
1095 ld r8,_DSISR(r1)
1096 mtdsisr r8
Paul Mackerras9994a332005-10-10 22:36:14 +10001097
1098 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
1099 ld r0,16(r1) /* get return address */
1100
1101 mtlr r0
1102 blr /* return to caller */
1103
1104#endif /* CONFIG_PPC_RTAS */
1105
Paul Mackerras9994a332005-10-10 22:36:14 +10001106_GLOBAL(enter_prom)
1107 mflr r0
1108 std r0,16(r1)
1109 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
1110
1111 /* Because PROM is running in 32b mode, it clobbers the high order half
1112 * of all registers that it saves. We therefore save those registers
1113 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
1114 */
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001115 SAVE_GPR(2, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001116 SAVE_GPR(13, r1)
1117 SAVE_8GPRS(14, r1)
1118 SAVE_10GPRS(22, r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001119 mfcr r10
Paul Mackerras9994a332005-10-10 22:36:14 +10001120 mfmsr r11
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001121 std r10,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001122 std r11,_MSR(r1)
1123
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001124 /* Put PROM address in SRR0 */
1125 mtsrr0 r4
Paul Mackerras9994a332005-10-10 22:36:14 +10001126
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001127 /* Setup our trampoline return addr in LR */
1128 bcl 20,31,$+4
11290: mflr r4
1130 addi r4,r4,(1f - 0b)
1131 mtlr r4
1132
1133 /* Prepare a 32-bit mode big endian MSR
Paul Mackerras9994a332005-10-10 22:36:14 +10001134 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +00001135#ifdef CONFIG_PPC_BOOK3E
1136 rlwinm r11,r11,0,1,31
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001137 mtsrr1 r11
1138 rfi
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +00001139#else /* CONFIG_PPC_BOOK3E */
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001140 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1141 andc r11,r11,r12
1142 mtsrr1 r11
1143 rfid
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +00001144#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +10001145
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +100011461: /* Return from OF */
1147 FIXUP_ENDIAN
Paul Mackerras9994a332005-10-10 22:36:14 +10001148
1149 /* Just make sure that r1 top 32 bits didn't get
1150 * corrupt by OF
1151 */
1152 rldicl r1,r1,0,32
1153
1154 /* Restore the MSR (back to 64 bits) */
1155 ld r0,_MSR(r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001156 MTMSRD(r0)
Paul Mackerras9994a332005-10-10 22:36:14 +10001157 isync
1158
1159 /* Restore other registers */
1160 REST_GPR(2, r1)
1161 REST_GPR(13, r1)
1162 REST_8GPRS(14, r1)
1163 REST_10GPRS(22, r1)
1164 ld r4,_CCR(r1)
1165 mtcr r4
Paul Mackerras9994a332005-10-10 22:36:14 +10001166
1167 addi r1,r1,PROM_FRAME_SIZE
1168 ld r0,16(r1)
1169 mtlr r0
1170 blr
Steven Rostedt4e491d12008-05-14 23:49:44 -04001171
Steven Rostedt606576c2008-10-06 19:06:12 -04001172#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt4e491d12008-05-14 23:49:44 -04001173#ifdef CONFIG_DYNAMIC_FTRACE
1174_GLOBAL(mcount)
1175_GLOBAL(_mcount)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001176 blr
1177
1178_GLOBAL(ftrace_caller)
1179 /* Taken from output of objdump from lib64/glibc */
1180 mflr r3
1181 ld r11, 0(r1)
1182 stdu r1, -112(r1)
1183 std r3, 128(r1)
1184 ld r4, 16(r11)
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301185 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001186.globl ftrace_call
1187ftrace_call:
1188 bl ftrace_stub
1189 nop
Steven Rostedt46542882009-02-10 22:19:54 -08001190#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1191.globl ftrace_graph_call
1192ftrace_graph_call:
1193 b ftrace_graph_stub
1194_GLOBAL(ftrace_graph_stub)
1195#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -04001196 ld r0, 128(r1)
1197 mtlr r0
1198 addi r1, r1, 112
1199_GLOBAL(ftrace_stub)
1200 blr
1201#else
1202_GLOBAL(mcount)
1203 blr
1204
1205_GLOBAL(_mcount)
1206 /* Taken from output of objdump from lib64/glibc */
1207 mflr r3
1208 ld r11, 0(r1)
1209 stdu r1, -112(r1)
1210 std r3, 128(r1)
1211 ld r4, 16(r11)
1212
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301213 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001214 LOAD_REG_ADDR(r5,ftrace_trace_function)
1215 ld r5,0(r5)
1216 ld r5,0(r5)
1217 mtctr r5
1218 bctrl
Steven Rostedt4e491d12008-05-14 23:49:44 -04001219 nop
Steven Rostedt6794c782009-02-09 21:10:27 -08001220
1221
1222#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1223 b ftrace_graph_caller
1224#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -04001225 ld r0, 128(r1)
1226 mtlr r0
1227 addi r1, r1, 112
1228_GLOBAL(ftrace_stub)
1229 blr
1230
Steven Rostedt6794c782009-02-09 21:10:27 -08001231#endif /* CONFIG_DYNAMIC_FTRACE */
1232
1233#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt46542882009-02-10 22:19:54 -08001234_GLOBAL(ftrace_graph_caller)
Steven Rostedt6794c782009-02-09 21:10:27 -08001235 /* load r4 with local address */
1236 ld r4, 128(r1)
1237 subi r4, r4, MCOUNT_INSN_SIZE
1238
1239 /* get the parent address */
1240 ld r11, 112(r1)
1241 addi r3, r11, 16
1242
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001243 bl prepare_ftrace_return
Steven Rostedt6794c782009-02-09 21:10:27 -08001244 nop
1245
1246 ld r0, 128(r1)
1247 mtlr r0
1248 addi r1, r1, 112
1249 blr
1250
1251_GLOBAL(return_to_handler)
1252 /* need to save return values */
Steven Rostedtbb725342009-02-11 12:45:49 -08001253 std r4, -24(r1)
1254 std r3, -16(r1)
1255 std r31, -8(r1)
1256 mr r31, r1
1257 stdu r1, -112(r1)
1258
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001259 bl ftrace_return_to_handler
Steven Rostedtbb725342009-02-11 12:45:49 -08001260 nop
1261
1262 /* return value has real return address */
1263 mtlr r3
1264
1265 ld r1, 0(r1)
1266 ld r4, -24(r1)
1267 ld r3, -16(r1)
1268 ld r31, -8(r1)
1269
1270 /* Jump back to real return address */
1271 blr
1272
1273_GLOBAL(mod_return_to_handler)
1274 /* need to save return values */
Steven Rostedt6794c782009-02-09 21:10:27 -08001275 std r4, -32(r1)
1276 std r3, -24(r1)
1277 /* save TOC */
1278 std r2, -16(r1)
1279 std r31, -8(r1)
1280 mr r31, r1
1281 stdu r1, -112(r1)
1282
Steven Rostedtbb725342009-02-11 12:45:49 -08001283 /*
1284 * We are in a module using the module's TOC.
1285 * Switch to our TOC to run inside the core kernel.
1286 */
Steven Rostedtbe10ab12009-09-15 08:30:14 -07001287 ld r2, PACATOC(r13)
Steven Rostedt6794c782009-02-09 21:10:27 -08001288
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001289 bl ftrace_return_to_handler
Steven Rostedt6794c782009-02-09 21:10:27 -08001290 nop
1291
1292 /* return value has real return address */
1293 mtlr r3
1294
1295 ld r1, 0(r1)
1296 ld r4, -32(r1)
1297 ld r3, -24(r1)
1298 ld r2, -16(r1)
1299 ld r31, -8(r1)
1300
1301 /* Jump back to real return address */
1302 blr
1303#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1304#endif /* CONFIG_FUNCTION_TRACER */