Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1 | /* |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 2 | * PowerPC version |
| 3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 4 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP |
| 5 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> |
| 6 | * Adapted for Power Macintosh by Paul Mackerras. |
| 7 | * Low-level exception handlers and MMU support |
| 8 | * rewritten by Paul Mackerras. |
| 9 | * Copyright (C) 1996 Paul Mackerras. |
| 10 | * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). |
| 11 | * |
| 12 | * This file contains the system call entry code, context switch |
| 13 | * code, and exception/interrupt return code for PowerPC. |
| 14 | * |
| 15 | * This program is free software; you can redistribute it and/or |
| 16 | * modify it under the terms of the GNU General Public License |
| 17 | * as published by the Free Software Foundation; either version |
| 18 | * 2 of the License, or (at your option) any later version. |
| 19 | */ |
| 20 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 21 | #include <linux/errno.h> |
| 22 | #include <asm/unistd.h> |
| 23 | #include <asm/processor.h> |
| 24 | #include <asm/page.h> |
| 25 | #include <asm/mmu.h> |
| 26 | #include <asm/thread_info.h> |
| 27 | #include <asm/ppc_asm.h> |
| 28 | #include <asm/asm-offsets.h> |
| 29 | #include <asm/cputable.h> |
Stephen Rothwell | 3f639ee | 2006-09-25 18:19:00 +1000 | [diff] [blame] | 30 | #include <asm/firmware.h> |
David Woodhouse | 007d88d | 2007-01-01 18:45:34 +0000 | [diff] [blame] | 31 | #include <asm/bug.h> |
Benjamin Herrenschmidt | ec2b36b | 2008-04-17 14:34:59 +1000 | [diff] [blame] | 32 | #include <asm/ptrace.h> |
Benjamin Herrenschmidt | 945feb1 | 2008-04-17 14:35:01 +1000 | [diff] [blame] | 33 | #include <asm/irqflags.h> |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 34 | #include <asm/ftrace.h> |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 35 | |
| 36 | /* |
| 37 | * System calls. |
| 38 | */ |
| 39 | .section ".toc","aw" |
| 40 | .SYS_CALL_TABLE: |
| 41 | .tc .sys_call_table[TC],.sys_call_table |
| 42 | |
| 43 | /* This value is used to mark exception frames on the stack. */ |
| 44 | exception_marker: |
Benjamin Herrenschmidt | ec2b36b | 2008-04-17 14:34:59 +1000 | [diff] [blame] | 45 | .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 46 | |
| 47 | .section ".text" |
| 48 | .align 7 |
| 49 | |
| 50 | #undef SHOW_SYSCALLS |
| 51 | |
| 52 | .globl system_call_common |
| 53 | system_call_common: |
| 54 | andi. r10,r12,MSR_PR |
| 55 | mr r10,r1 |
| 56 | addi r1,r1,-INT_FRAME_SIZE |
| 57 | beq- 1f |
| 58 | ld r1,PACAKSAVE(r13) |
| 59 | 1: std r10,0(r1) |
| 60 | std r11,_NIP(r1) |
| 61 | std r12,_MSR(r1) |
| 62 | std r0,GPR0(r1) |
| 63 | std r10,GPR1(r1) |
Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 64 | ACCOUNT_CPU_USER_ENTRY(r10, r11) |
Paul Mackerras | ab598b6 | 2008-11-30 11:49:45 +0000 | [diff] [blame] | 65 | /* |
| 66 | * This "crclr so" clears CR0.SO, which is the error indication on |
| 67 | * return from this system call. There must be no cmp instruction |
| 68 | * between it and the "mfcr r9" below, otherwise if XER.SO is set, |
| 69 | * CR0.SO will get set, causing all system calls to appear to fail. |
| 70 | */ |
| 71 | crclr so |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 72 | std r2,GPR2(r1) |
| 73 | std r3,GPR3(r1) |
| 74 | std r4,GPR4(r1) |
| 75 | std r5,GPR5(r1) |
| 76 | std r6,GPR6(r1) |
| 77 | std r7,GPR7(r1) |
| 78 | std r8,GPR8(r1) |
| 79 | li r11,0 |
| 80 | std r11,GPR9(r1) |
| 81 | std r11,GPR10(r1) |
| 82 | std r11,GPR11(r1) |
| 83 | std r11,GPR12(r1) |
| 84 | std r9,GPR13(r1) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 85 | mfcr r9 |
| 86 | mflr r10 |
| 87 | li r11,0xc01 |
| 88 | std r9,_CCR(r1) |
| 89 | std r10,_LINK(r1) |
| 90 | std r11,_TRAP(r1) |
| 91 | mfxer r9 |
| 92 | mfctr r10 |
| 93 | std r9,_XER(r1) |
| 94 | std r10,_CTR(r1) |
| 95 | std r3,ORIG_GPR3(r1) |
| 96 | ld r2,PACATOC(r13) |
| 97 | addi r9,r1,STACK_FRAME_OVERHEAD |
| 98 | ld r11,exception_marker@toc(r2) |
| 99 | std r11,-16(r9) /* "regshere" marker */ |
Benjamin Herrenschmidt | 945feb1 | 2008-04-17 14:35:01 +1000 | [diff] [blame] | 100 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 101 | bl .trace_hardirqs_on |
| 102 | REST_GPR(0,r1) |
| 103 | REST_4GPRS(3,r1) |
| 104 | REST_2GPRS(7,r1) |
| 105 | addi r9,r1,STACK_FRAME_OVERHEAD |
| 106 | ld r12,_MSR(r1) |
| 107 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
Paul Mackerras | d04c56f | 2006-10-04 16:47:49 +1000 | [diff] [blame] | 108 | li r10,1 |
| 109 | stb r10,PACASOFTIRQEN(r13) |
| 110 | stb r10,PACAHARDIRQEN(r13) |
| 111 | std r10,SOFTE(r1) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 112 | #ifdef CONFIG_PPC_ISERIES |
Stephen Rothwell | 3f639ee | 2006-09-25 18:19:00 +1000 | [diff] [blame] | 113 | BEGIN_FW_FTR_SECTION |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 114 | /* Hack for handling interrupts when soft-enabling on iSeries */ |
| 115 | cmpdi cr1,r0,0x5555 /* syscall 0x5555 */ |
| 116 | andi. r10,r12,MSR_PR /* from kernel */ |
| 117 | crand 4*cr0+eq,4*cr1+eq,4*cr0+eq |
Stephen Rothwell | c705677 | 2006-11-27 14:59:50 +1100 | [diff] [blame] | 118 | bne 2f |
| 119 | b hardware_interrupt_entry |
| 120 | 2: |
Stephen Rothwell | 3f639ee | 2006-09-25 18:19:00 +1000 | [diff] [blame] | 121 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
Benjamin Herrenschmidt | 945feb1 | 2008-04-17 14:35:01 +1000 | [diff] [blame] | 122 | #endif /* CONFIG_PPC_ISERIES */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 123 | mfmsr r11 |
| 124 | ori r11,r11,MSR_EE |
| 125 | mtmsrd r11,1 |
| 126 | |
| 127 | #ifdef SHOW_SYSCALLS |
| 128 | bl .do_show_syscall |
| 129 | REST_GPR(0,r1) |
| 130 | REST_4GPRS(3,r1) |
| 131 | REST_2GPRS(7,r1) |
| 132 | addi r9,r1,STACK_FRAME_OVERHEAD |
| 133 | #endif |
| 134 | clrrdi r11,r1,THREAD_SHIFT |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 135 | ld r10,TI_FLAGS(r11) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 136 | andi. r11,r10,_TIF_SYSCALL_T_OR_A |
| 137 | bne- syscall_dotrace |
| 138 | syscall_dotrace_cont: |
| 139 | cmpldi 0,r0,NR_syscalls |
| 140 | bge- syscall_enosys |
| 141 | |
| 142 | system_call: /* label this so stack traces look sane */ |
| 143 | /* |
| 144 | * Need to vector to 32 Bit or default sys_call_table here, |
| 145 | * based on caller's run-mode / personality. |
| 146 | */ |
| 147 | ld r11,.SYS_CALL_TABLE@toc(2) |
| 148 | andi. r10,r10,_TIF_32BIT |
| 149 | beq 15f |
| 150 | addi r11,r11,8 /* use 32-bit syscall entries */ |
| 151 | clrldi r3,r3,32 |
| 152 | clrldi r4,r4,32 |
| 153 | clrldi r5,r5,32 |
| 154 | clrldi r6,r6,32 |
| 155 | clrldi r7,r7,32 |
| 156 | clrldi r8,r8,32 |
| 157 | 15: |
| 158 | slwi r0,r0,4 |
| 159 | ldx r10,r11,r0 /* Fetch system call handler [ptr] */ |
| 160 | mtctr r10 |
| 161 | bctrl /* Call handler */ |
| 162 | |
| 163 | syscall_exit: |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 164 | std r3,RESULT(r1) |
David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 165 | #ifdef SHOW_SYSCALLS |
| 166 | bl .do_show_syscall_exit |
| 167 | ld r3,RESULT(r1) |
| 168 | #endif |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 169 | clrrdi r12,r1,THREAD_SHIFT |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 170 | |
| 171 | /* disable interrupts so current_thread_info()->flags can't change, |
| 172 | and so that we don't get interrupted after loading SRR0/1. */ |
| 173 | ld r8,_MSR(r1) |
| 174 | andi. r10,r8,MSR_RI |
| 175 | beq- unrecov_restore |
| 176 | mfmsr r10 |
| 177 | rldicl r10,r10,48,1 |
| 178 | rotldi r10,r10,16 |
| 179 | mtmsrd r10,1 |
| 180 | ld r9,TI_FLAGS(r12) |
David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 181 | li r11,-_LAST_ERRNO |
Paul Mackerras | 1bd7933 | 2006-03-08 13:24:22 +1100 | [diff] [blame] | 182 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 183 | bne- syscall_exit_work |
David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 184 | cmpld r3,r11 |
| 185 | ld r5,_CCR(r1) |
| 186 | bge- syscall_error |
| 187 | syscall_error_cont: |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 188 | ld r7,_NIP(r1) |
| 189 | stdcx. r0,0,r1 /* to clear the reservation */ |
| 190 | andi. r6,r8,MSR_PR |
| 191 | ld r4,_LINK(r1) |
Paul Mackerras | e56a6e2 | 2007-02-07 13:13:26 +1100 | [diff] [blame] | 192 | /* |
| 193 | * Clear RI before restoring r13. If we are returning to |
| 194 | * userspace and we take an exception after restoring r13, |
| 195 | * we end up corrupting the userspace r13 value. |
| 196 | */ |
| 197 | li r12,MSR_RI |
| 198 | andc r11,r10,r12 |
| 199 | mtmsrd r11,1 /* clear MSR.RI */ |
Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 200 | beq- 1f |
| 201 | ACCOUNT_CPU_USER_EXIT(r11, r12) |
| 202 | ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 203 | 1: ld r2,GPR2(r1) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 204 | ld r1,GPR1(r1) |
| 205 | mtlr r4 |
| 206 | mtcr r5 |
| 207 | mtspr SPRN_SRR0,r7 |
| 208 | mtspr SPRN_SRR1,r8 |
| 209 | rfid |
| 210 | b . /* prevent speculative execution */ |
| 211 | |
David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 212 | syscall_error: |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 213 | oris r5,r5,0x1000 /* Set SO bit in CR */ |
David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 214 | neg r3,r3 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 215 | std r5,_CCR(r1) |
| 216 | b syscall_error_cont |
David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 217 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 218 | /* Traced system call support */ |
| 219 | syscall_dotrace: |
| 220 | bl .save_nvgprs |
| 221 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 222 | bl .do_syscall_trace_enter |
Roland McGrath | 4f72c42 | 2008-07-27 16:51:03 +1000 | [diff] [blame] | 223 | /* |
| 224 | * Restore argument registers possibly just changed. |
| 225 | * We use the return value of do_syscall_trace_enter |
| 226 | * for the call number to look up in the table (r0). |
| 227 | */ |
| 228 | mr r0,r3 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 229 | ld r3,GPR3(r1) |
| 230 | ld r4,GPR4(r1) |
| 231 | ld r5,GPR5(r1) |
| 232 | ld r6,GPR6(r1) |
| 233 | ld r7,GPR7(r1) |
| 234 | ld r8,GPR8(r1) |
| 235 | addi r9,r1,STACK_FRAME_OVERHEAD |
| 236 | clrrdi r10,r1,THREAD_SHIFT |
| 237 | ld r10,TI_FLAGS(r10) |
| 238 | b syscall_dotrace_cont |
| 239 | |
David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 240 | syscall_enosys: |
| 241 | li r3,-ENOSYS |
| 242 | b syscall_exit |
| 243 | |
| 244 | syscall_exit_work: |
| 245 | /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr. |
| 246 | If TIF_NOERROR is set, just save r3 as it is. */ |
| 247 | |
| 248 | andi. r0,r9,_TIF_RESTOREALL |
Paul Mackerras | 1bd7933 | 2006-03-08 13:24:22 +1100 | [diff] [blame] | 249 | beq+ 0f |
| 250 | REST_NVGPRS(r1) |
| 251 | b 2f |
| 252 | 0: cmpld r3,r11 /* r10 is -LAST_ERRNO */ |
David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 253 | blt+ 1f |
| 254 | andi. r0,r9,_TIF_NOERROR |
| 255 | bne- 1f |
| 256 | ld r5,_CCR(r1) |
| 257 | neg r3,r3 |
| 258 | oris r5,r5,0x1000 /* Set SO bit in CR */ |
| 259 | std r5,_CCR(r1) |
| 260 | 1: std r3,GPR3(r1) |
| 261 | 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) |
| 262 | beq 4f |
| 263 | |
Paul Mackerras | 1bd7933 | 2006-03-08 13:24:22 +1100 | [diff] [blame] | 264 | /* Clear per-syscall TIF flags if any are set. */ |
David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 265 | |
| 266 | li r11,_TIF_PERSYSCALL_MASK |
| 267 | addi r12,r12,TI_FLAGS |
| 268 | 3: ldarx r10,0,r12 |
| 269 | andc r10,r10,r11 |
| 270 | stdcx. r10,0,r12 |
| 271 | bne- 3b |
| 272 | subi r12,r12,TI_FLAGS |
Paul Mackerras | 1bd7933 | 2006-03-08 13:24:22 +1100 | [diff] [blame] | 273 | |
| 274 | 4: /* Anything else left to do? */ |
| 275 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) |
David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 276 | beq .ret_from_except_lite |
| 277 | |
| 278 | /* Re-enable interrupts */ |
| 279 | mfmsr r10 |
| 280 | ori r10,r10,MSR_EE |
| 281 | mtmsrd r10,1 |
| 282 | |
Paul Mackerras | 1bd7933 | 2006-03-08 13:24:22 +1100 | [diff] [blame] | 283 | bl .save_nvgprs |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 284 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 285 | bl .do_syscall_trace_leave |
Paul Mackerras | 1bd7933 | 2006-03-08 13:24:22 +1100 | [diff] [blame] | 286 | b .ret_from_except |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 287 | |
| 288 | /* Save non-volatile GPRs, if not already saved. */ |
| 289 | _GLOBAL(save_nvgprs) |
| 290 | ld r11,_TRAP(r1) |
| 291 | andi. r0,r11,1 |
| 292 | beqlr- |
| 293 | SAVE_NVGPRS(r1) |
| 294 | clrrdi r0,r11,1 |
| 295 | std r0,_TRAP(r1) |
| 296 | blr |
| 297 | |
David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 298 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 299 | /* |
| 300 | * The sigsuspend and rt_sigsuspend system calls can call do_signal |
| 301 | * and thus put the process into the stopped state where we might |
| 302 | * want to examine its user state with ptrace. Therefore we need |
| 303 | * to save all the nonvolatile registers (r14 - r31) before calling |
| 304 | * the C code. Similarly, fork, vfork and clone need the full |
| 305 | * register state on the stack so that it can be copied to the child. |
| 306 | */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 307 | |
| 308 | _GLOBAL(ppc_fork) |
| 309 | bl .save_nvgprs |
| 310 | bl .sys_fork |
| 311 | b syscall_exit |
| 312 | |
| 313 | _GLOBAL(ppc_vfork) |
| 314 | bl .save_nvgprs |
| 315 | bl .sys_vfork |
| 316 | b syscall_exit |
| 317 | |
| 318 | _GLOBAL(ppc_clone) |
| 319 | bl .save_nvgprs |
| 320 | bl .sys_clone |
| 321 | b syscall_exit |
| 322 | |
Paul Mackerras | 1bd7933 | 2006-03-08 13:24:22 +1100 | [diff] [blame] | 323 | _GLOBAL(ppc32_swapcontext) |
| 324 | bl .save_nvgprs |
| 325 | bl .compat_sys_swapcontext |
| 326 | b syscall_exit |
| 327 | |
| 328 | _GLOBAL(ppc64_swapcontext) |
| 329 | bl .save_nvgprs |
| 330 | bl .sys_swapcontext |
| 331 | b syscall_exit |
| 332 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 333 | _GLOBAL(ret_from_fork) |
| 334 | bl .schedule_tail |
| 335 | REST_NVGPRS(r1) |
| 336 | li r3,0 |
| 337 | b syscall_exit |
| 338 | |
| 339 | /* |
| 340 | * This routine switches between two different tasks. The process |
| 341 | * state of one is saved on its kernel stack. Then the state |
| 342 | * of the other is restored from its kernel stack. The memory |
| 343 | * management hardware is updated to the second process's state. |
| 344 | * Finally, we can return to the second process, via ret_from_except. |
| 345 | * On entry, r3 points to the THREAD for the current task, r4 |
| 346 | * points to the THREAD for the new task. |
| 347 | * |
| 348 | * Note: there are two ways to get to the "going out" portion |
| 349 | * of this code; either by coming in via the entry (_switch) |
| 350 | * or via "fork" which must set up an environment equivalent |
| 351 | * to the "_switch" path. If you change this you'll have to change |
| 352 | * the fork code also. |
| 353 | * |
| 354 | * The code which creates the new task context is in 'copy_thread' |
Jon Mason | 2ef9481 | 2006-01-23 10:58:20 -0600 | [diff] [blame] | 355 | * in arch/powerpc/kernel/process.c |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 356 | */ |
| 357 | .align 7 |
| 358 | _GLOBAL(_switch) |
| 359 | mflr r0 |
| 360 | std r0,16(r1) |
| 361 | stdu r1,-SWITCH_FRAME_SIZE(r1) |
| 362 | /* r3-r13 are caller saved -- Cort */ |
| 363 | SAVE_8GPRS(14, r1) |
| 364 | SAVE_10GPRS(22, r1) |
| 365 | mflr r20 /* Return to switch caller */ |
| 366 | mfmsr r22 |
| 367 | li r0, MSR_FP |
Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 368 | #ifdef CONFIG_VSX |
| 369 | BEGIN_FTR_SECTION |
| 370 | oris r0,r0,MSR_VSX@h /* Disable VSX */ |
| 371 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) |
| 372 | #endif /* CONFIG_VSX */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 373 | #ifdef CONFIG_ALTIVEC |
| 374 | BEGIN_FTR_SECTION |
| 375 | oris r0,r0,MSR_VEC@h /* Disable altivec */ |
| 376 | mfspr r24,SPRN_VRSAVE /* save vrsave register value */ |
| 377 | std r24,THREAD_VRSAVE(r3) |
| 378 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 379 | #endif /* CONFIG_ALTIVEC */ |
| 380 | and. r0,r0,r22 |
| 381 | beq+ 1f |
| 382 | andc r22,r22,r0 |
| 383 | mtmsrd r22 |
| 384 | isync |
| 385 | 1: std r20,_NIP(r1) |
| 386 | mfcr r23 |
| 387 | std r23,_CCR(r1) |
| 388 | std r1,KSP(r3) /* Set old stack pointer */ |
| 389 | |
| 390 | #ifdef CONFIG_SMP |
| 391 | /* We need a sync somewhere here to make sure that if the |
| 392 | * previous task gets rescheduled on another CPU, it sees all |
| 393 | * stores it has performed on this one. |
| 394 | */ |
| 395 | sync |
| 396 | #endif /* CONFIG_SMP */ |
| 397 | |
| 398 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ |
| 399 | std r6,PACACURRENT(r13) /* Set new 'current' */ |
| 400 | |
| 401 | ld r8,KSP(r4) /* new stack pointer */ |
| 402 | BEGIN_FTR_SECTION |
Michael Ellerman | c230328 | 2008-06-24 11:33:05 +1000 | [diff] [blame] | 403 | BEGIN_FTR_SECTION_NESTED(95) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 404 | clrrdi r6,r8,28 /* get its ESID */ |
| 405 | clrrdi r9,r1,28 /* get current sp ESID */ |
Michael Ellerman | c230328 | 2008-06-24 11:33:05 +1000 | [diff] [blame] | 406 | FTR_SECTION_ELSE_NESTED(95) |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 407 | clrrdi r6,r8,40 /* get its 1T ESID */ |
| 408 | clrrdi r9,r1,40 /* get current sp 1T ESID */ |
Michael Ellerman | c230328 | 2008-06-24 11:33:05 +1000 | [diff] [blame] | 409 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95) |
| 410 | FTR_SECTION_ELSE |
| 411 | b 2f |
| 412 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 413 | clrldi. r0,r6,2 /* is new ESID c00000000? */ |
| 414 | cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ |
| 415 | cror eq,4*cr1+eq,eq |
| 416 | beq 2f /* if yes, don't slbie it */ |
| 417 | |
| 418 | /* Bolt in the new stack SLB entry */ |
| 419 | ld r7,KSP_VSID(r4) /* Get new stack's VSID */ |
| 420 | oris r0,r6,(SLB_ESID_V)@h |
| 421 | ori r0,r0,(SLB_NUM_BOLTED-1)@l |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 422 | BEGIN_FTR_SECTION |
| 423 | li r9,MMU_SEGSIZE_1T /* insert B field */ |
| 424 | oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h |
| 425 | rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 |
| 426 | END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) |
Michael Neuling | 2f6093c | 2006-08-07 16:19:19 +1000 | [diff] [blame] | 427 | |
Michael Neuling | 00efee7 | 2007-08-24 16:58:37 +1000 | [diff] [blame] | 428 | /* Update the last bolted SLB. No write barriers are needed |
| 429 | * here, provided we only update the current CPU's SLB shadow |
| 430 | * buffer. |
| 431 | */ |
Michael Neuling | 2f6093c | 2006-08-07 16:19:19 +1000 | [diff] [blame] | 432 | ld r9,PACA_SLBSHADOWPTR(r13) |
Michael Neuling | 11a27ad | 2006-08-09 17:00:30 +1000 | [diff] [blame] | 433 | li r12,0 |
| 434 | std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ |
| 435 | std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ |
| 436 | std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ |
Michael Neuling | 2f6093c | 2006-08-07 16:19:19 +1000 | [diff] [blame] | 437 | |
Olof Johansson | f66bce5 | 2007-10-16 00:58:59 +1000 | [diff] [blame] | 438 | /* No need to check for CPU_FTR_NO_SLBIE_B here, since when |
| 439 | * we have 1TB segments, the only CPUs known to have the errata |
| 440 | * only support less than 1TB of system memory and we'll never |
| 441 | * actually hit this code path. |
| 442 | */ |
| 443 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 444 | slbie r6 |
| 445 | slbie r6 /* Workaround POWER5 < DD2.1 issue */ |
| 446 | slbmte r7,r0 |
| 447 | isync |
| 448 | |
| 449 | 2: |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 450 | clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ |
| 451 | /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE |
| 452 | because we don't need to leave the 288-byte ABI gap at the |
| 453 | top of the kernel stack. */ |
| 454 | addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE |
| 455 | |
| 456 | mr r1,r8 /* start using new stack pointer */ |
| 457 | std r7,PACAKSAVE(r13) |
| 458 | |
| 459 | ld r6,_CCR(r1) |
| 460 | mtcrf 0xFF,r6 |
| 461 | |
| 462 | #ifdef CONFIG_ALTIVEC |
| 463 | BEGIN_FTR_SECTION |
| 464 | ld r0,THREAD_VRSAVE(r4) |
| 465 | mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ |
| 466 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 467 | #endif /* CONFIG_ALTIVEC */ |
| 468 | |
| 469 | /* r3-r13 are destroyed -- Cort */ |
| 470 | REST_8GPRS(14, r1) |
| 471 | REST_10GPRS(22, r1) |
| 472 | |
| 473 | /* convert old thread to its task_struct for return value */ |
| 474 | addi r3,r3,-THREAD |
| 475 | ld r7,_NIP(r1) /* Return to _switch caller in new task */ |
| 476 | mtlr r7 |
| 477 | addi r1,r1,SWITCH_FRAME_SIZE |
| 478 | blr |
| 479 | |
| 480 | .align 7 |
| 481 | _GLOBAL(ret_from_except) |
| 482 | ld r11,_TRAP(r1) |
| 483 | andi. r0,r11,1 |
| 484 | bne .ret_from_except_lite |
| 485 | REST_NVGPRS(r1) |
| 486 | |
| 487 | _GLOBAL(ret_from_except_lite) |
| 488 | /* |
| 489 | * Disable interrupts so that current_thread_info()->flags |
| 490 | * can't change between when we test it and when we return |
| 491 | * from the interrupt. |
| 492 | */ |
| 493 | mfmsr r10 /* Get current interrupt state */ |
| 494 | rldicl r9,r10,48,1 /* clear MSR_EE */ |
| 495 | rotldi r9,r9,16 |
| 496 | mtmsrd r9,1 /* Update machine state */ |
| 497 | |
| 498 | #ifdef CONFIG_PREEMPT |
| 499 | clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ |
| 500 | li r0,_TIF_NEED_RESCHED /* bits to check */ |
| 501 | ld r3,_MSR(r1) |
| 502 | ld r4,TI_FLAGS(r9) |
| 503 | /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */ |
| 504 | rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING |
| 505 | and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */ |
| 506 | bne do_work |
| 507 | |
| 508 | #else /* !CONFIG_PREEMPT */ |
| 509 | ld r3,_MSR(r1) /* Returning to user mode? */ |
| 510 | andi. r3,r3,MSR_PR |
| 511 | beq restore /* if not, just restore regs and return */ |
| 512 | |
| 513 | /* Check current_thread_info()->flags */ |
| 514 | clrrdi r9,r1,THREAD_SHIFT |
| 515 | ld r4,TI_FLAGS(r9) |
| 516 | andi. r0,r4,_TIF_USER_WORK_MASK |
| 517 | bne do_work |
| 518 | #endif |
| 519 | |
| 520 | restore: |
Stephen Rothwell | 3f639ee | 2006-09-25 18:19:00 +1000 | [diff] [blame] | 521 | BEGIN_FW_FTR_SECTION |
Michael Ellerman | 01f3880d | 2008-07-16 14:21:34 +1000 | [diff] [blame] | 522 | ld r5,SOFTE(r1) |
| 523 | FW_FTR_SECTION_ELSE |
| 524 | b iseries_check_pending_irqs |
| 525 | ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) |
| 526 | 2: |
Benjamin Herrenschmidt | 945feb1 | 2008-04-17 14:35:01 +1000 | [diff] [blame] | 527 | TRACE_AND_RESTORE_IRQ(r5); |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 528 | |
Paul Mackerras | 93a6d3c | 2009-01-09 16:52:19 +1100 | [diff] [blame] | 529 | #ifdef CONFIG_PERF_COUNTERS |
| 530 | /* check paca->perf_counter_pending if we're enabling ints */ |
| 531 | lbz r3,PACAPERFPEND(r13) |
| 532 | and. r3,r3,r5 |
| 533 | beq 27f |
| 534 | bl .perf_counter_do_pending |
| 535 | 27: |
| 536 | #endif /* CONFIG_PERF_COUNTERS */ |
| 537 | |
Paul Mackerras | b0a779d | 2006-10-18 10:11:22 +1000 | [diff] [blame] | 538 | /* extract EE bit and use it to restore paca->hard_enabled */ |
Paul Mackerras | e56a6e2 | 2007-02-07 13:13:26 +1100 | [diff] [blame] | 539 | ld r3,_MSR(r1) |
Paul Mackerras | b0a779d | 2006-10-18 10:11:22 +1000 | [diff] [blame] | 540 | rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ |
| 541 | stb r4,PACAHARDIRQEN(r13) |
| 542 | |
Paul Mackerras | e56a6e2 | 2007-02-07 13:13:26 +1100 | [diff] [blame] | 543 | ld r4,_CTR(r1) |
| 544 | ld r0,_LINK(r1) |
| 545 | mtctr r4 |
| 546 | mtlr r0 |
| 547 | ld r4,_XER(r1) |
| 548 | mtspr SPRN_XER,r4 |
| 549 | |
| 550 | REST_8GPRS(5, r1) |
| 551 | |
| 552 | andi. r0,r3,MSR_RI |
| 553 | beq- unrecov_restore |
| 554 | |
| 555 | stdcx. r0,0,r1 /* to clear the reservation */ |
| 556 | |
| 557 | /* |
| 558 | * Clear RI before restoring r13. If we are returning to |
| 559 | * userspace and we take an exception after restoring r13, |
| 560 | * we end up corrupting the userspace r13 value. |
| 561 | */ |
| 562 | mfmsr r4 |
| 563 | andc r4,r4,r0 /* r0 contains MSR_RI here */ |
| 564 | mtmsrd r4,1 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 565 | |
| 566 | /* |
| 567 | * r13 is our per cpu area, only restore it if we are returning to |
| 568 | * userspace |
| 569 | */ |
Paul Mackerras | e56a6e2 | 2007-02-07 13:13:26 +1100 | [diff] [blame] | 570 | andi. r0,r3,MSR_PR |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 571 | beq 1f |
Paul Mackerras | e56a6e2 | 2007-02-07 13:13:26 +1100 | [diff] [blame] | 572 | ACCOUNT_CPU_USER_EXIT(r2, r4) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 573 | REST_GPR(13, r1) |
| 574 | 1: |
Paul Mackerras | e56a6e2 | 2007-02-07 13:13:26 +1100 | [diff] [blame] | 575 | mtspr SPRN_SRR1,r3 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 576 | |
| 577 | ld r2,_CCR(r1) |
| 578 | mtcrf 0xFF,r2 |
| 579 | ld r2,_NIP(r1) |
| 580 | mtspr SPRN_SRR0,r2 |
| 581 | |
| 582 | ld r0,GPR0(r1) |
| 583 | ld r2,GPR2(r1) |
| 584 | ld r3,GPR3(r1) |
| 585 | ld r4,GPR4(r1) |
| 586 | ld r1,GPR1(r1) |
| 587 | |
| 588 | rfid |
| 589 | b . /* prevent speculative execution */ |
| 590 | |
Michael Ellerman | 01f3880d | 2008-07-16 14:21:34 +1000 | [diff] [blame] | 591 | iseries_check_pending_irqs: |
| 592 | #ifdef CONFIG_PPC_ISERIES |
| 593 | ld r5,SOFTE(r1) |
| 594 | cmpdi 0,r5,0 |
| 595 | beq 2b |
| 596 | /* Check for pending interrupts (iSeries) */ |
| 597 | ld r3,PACALPPACAPTR(r13) |
| 598 | ld r3,LPPACAANYINT(r3) |
| 599 | cmpdi r3,0 |
| 600 | beq+ 2b /* skip do_IRQ if no interrupts */ |
| 601 | |
| 602 | li r3,0 |
| 603 | stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */ |
| 604 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 605 | bl .trace_hardirqs_off |
| 606 | mfmsr r10 |
| 607 | #endif |
| 608 | ori r10,r10,MSR_EE |
| 609 | mtmsrd r10 /* hard-enable again */ |
| 610 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 611 | bl .do_IRQ |
| 612 | b .ret_from_except_lite /* loop back and handle more */ |
| 613 | #endif |
| 614 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 615 | do_work: |
| 616 | #ifdef CONFIG_PREEMPT |
| 617 | andi. r0,r3,MSR_PR /* Returning to user mode? */ |
| 618 | bne user_work |
| 619 | /* Check that preempt_count() == 0 and interrupts are enabled */ |
| 620 | lwz r8,TI_PREEMPT(r9) |
| 621 | cmpwi cr1,r8,0 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 622 | ld r0,SOFTE(r1) |
| 623 | cmpdi r0,0 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 624 | crandc eq,cr1*4+eq,eq |
| 625 | bne restore |
| 626 | /* here we are preempting the current task */ |
| 627 | 1: |
Benjamin Herrenschmidt | 945feb1 | 2008-04-17 14:35:01 +1000 | [diff] [blame] | 628 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 629 | bl .trace_hardirqs_on |
| 630 | /* Note: we just clobbered r10 which used to contain the previous |
| 631 | * MSR before the hard-disabling done by the caller of do_work. |
| 632 | * We don't have that value anymore, but it doesn't matter as |
| 633 | * we will hard-enable unconditionally, we can just reload the |
| 634 | * current MSR into r10 |
| 635 | */ |
| 636 | mfmsr r10 |
| 637 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 638 | li r0,1 |
Paul Mackerras | d04c56f | 2006-10-04 16:47:49 +1000 | [diff] [blame] | 639 | stb r0,PACASOFTIRQEN(r13) |
| 640 | stb r0,PACAHARDIRQEN(r13) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 641 | ori r10,r10,MSR_EE |
| 642 | mtmsrd r10,1 /* reenable interrupts */ |
| 643 | bl .preempt_schedule |
| 644 | mfmsr r10 |
| 645 | clrrdi r9,r1,THREAD_SHIFT |
| 646 | rldicl r10,r10,48,1 /* disable interrupts again */ |
| 647 | rotldi r10,r10,16 |
| 648 | mtmsrd r10,1 |
| 649 | ld r4,TI_FLAGS(r9) |
| 650 | andi. r0,r4,_TIF_NEED_RESCHED |
| 651 | bne 1b |
| 652 | b restore |
| 653 | |
| 654 | user_work: |
| 655 | #endif |
| 656 | /* Enable interrupts */ |
| 657 | ori r10,r10,MSR_EE |
| 658 | mtmsrd r10,1 |
| 659 | |
| 660 | andi. r0,r4,_TIF_NEED_RESCHED |
| 661 | beq 1f |
| 662 | bl .schedule |
| 663 | b .ret_from_except_lite |
| 664 | |
| 665 | 1: bl .save_nvgprs |
Roland McGrath | 7d6d637 | 2008-07-27 16:52:52 +1000 | [diff] [blame] | 666 | addi r3,r1,STACK_FRAME_OVERHEAD |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 667 | bl .do_signal |
| 668 | b .ret_from_except |
| 669 | |
| 670 | unrecov_restore: |
| 671 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 672 | bl .unrecoverable_exception |
| 673 | b unrecov_restore |
| 674 | |
| 675 | #ifdef CONFIG_PPC_RTAS |
| 676 | /* |
| 677 | * On CHRP, the Run-Time Abstraction Services (RTAS) have to be |
| 678 | * called with the MMU off. |
| 679 | * |
| 680 | * In addition, we need to be in 32b mode, at least for now. |
| 681 | * |
| 682 | * Note: r3 is an input parameter to rtas, so don't trash it... |
| 683 | */ |
| 684 | _GLOBAL(enter_rtas) |
| 685 | mflr r0 |
| 686 | std r0,16(r1) |
| 687 | stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */ |
| 688 | |
| 689 | /* Because RTAS is running in 32b mode, it clobbers the high order half |
| 690 | * of all registers that it saves. We therefore save those registers |
| 691 | * RTAS might touch to the stack. (r0, r3-r13 are caller saved) |
| 692 | */ |
| 693 | SAVE_GPR(2, r1) /* Save the TOC */ |
| 694 | SAVE_GPR(13, r1) /* Save paca */ |
| 695 | SAVE_8GPRS(14, r1) /* Save the non-volatiles */ |
| 696 | SAVE_10GPRS(22, r1) /* ditto */ |
| 697 | |
| 698 | mfcr r4 |
| 699 | std r4,_CCR(r1) |
| 700 | mfctr r5 |
| 701 | std r5,_CTR(r1) |
| 702 | mfspr r6,SPRN_XER |
| 703 | std r6,_XER(r1) |
| 704 | mfdar r7 |
| 705 | std r7,_DAR(r1) |
| 706 | mfdsisr r8 |
| 707 | std r8,_DSISR(r1) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 708 | |
Mike Kravetz | 9fe901d | 2006-03-27 15:20:00 -0800 | [diff] [blame] | 709 | /* Temporary workaround to clear CR until RTAS can be modified to |
| 710 | * ignore all bits. |
| 711 | */ |
| 712 | li r0,0 |
| 713 | mtcr r0 |
| 714 | |
David Woodhouse | 007d88d | 2007-01-01 18:45:34 +0000 | [diff] [blame] | 715 | #ifdef CONFIG_BUG |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 716 | /* There is no way it is acceptable to get here with interrupts enabled, |
| 717 | * check it with the asm equivalent of WARN_ON |
| 718 | */ |
Paul Mackerras | d04c56f | 2006-10-04 16:47:49 +1000 | [diff] [blame] | 719 | lbz r0,PACASOFTIRQEN(r13) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 720 | 1: tdnei r0,0 |
David Woodhouse | 007d88d | 2007-01-01 18:45:34 +0000 | [diff] [blame] | 721 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING |
| 722 | #endif |
| 723 | |
Paul Mackerras | d04c56f | 2006-10-04 16:47:49 +1000 | [diff] [blame] | 724 | /* Hard-disable interrupts */ |
| 725 | mfmsr r6 |
| 726 | rldicl r7,r6,48,1 |
| 727 | rotldi r7,r7,16 |
| 728 | mtmsrd r7,1 |
| 729 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 730 | /* Unfortunately, the stack pointer and the MSR are also clobbered, |
| 731 | * so they are saved in the PACA which allows us to restore |
| 732 | * our original state after RTAS returns. |
| 733 | */ |
| 734 | std r1,PACAR1(r13) |
| 735 | std r6,PACASAVEDMSR(r13) |
| 736 | |
| 737 | /* Setup our real return addr */ |
David Gibson | e58c349 | 2006-01-13 14:56:25 +1100 | [diff] [blame] | 738 | LOAD_REG_ADDR(r4,.rtas_return_loc) |
| 739 | clrldi r4,r4,2 /* convert to realmode address */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 740 | mtlr r4 |
| 741 | |
| 742 | li r0,0 |
| 743 | ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI |
| 744 | andc r0,r6,r0 |
| 745 | |
| 746 | li r9,1 |
| 747 | rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) |
| 748 | ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP |
| 749 | andc r6,r0,r9 |
| 750 | ori r6,r6,MSR_RI |
| 751 | sync /* disable interrupts so SRR0/1 */ |
| 752 | mtmsrd r0 /* don't get trashed */ |
| 753 | |
David Gibson | e58c349 | 2006-01-13 14:56:25 +1100 | [diff] [blame] | 754 | LOAD_REG_ADDR(r4, rtas) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 755 | ld r5,RTASENTRY(r4) /* get the rtas->entry value */ |
| 756 | ld r4,RTASBASE(r4) /* get the rtas->base value */ |
| 757 | |
| 758 | mtspr SPRN_SRR0,r5 |
| 759 | mtspr SPRN_SRR1,r6 |
| 760 | rfid |
| 761 | b . /* prevent speculative execution */ |
| 762 | |
| 763 | _STATIC(rtas_return_loc) |
| 764 | /* relocation is off at this point */ |
| 765 | mfspr r4,SPRN_SPRG3 /* Get PACA */ |
David Gibson | e58c349 | 2006-01-13 14:56:25 +1100 | [diff] [blame] | 766 | clrldi r4,r4,2 /* convert to realmode address */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 767 | |
Paul Mackerras | e31aa45 | 2008-08-30 11:41:12 +1000 | [diff] [blame] | 768 | bcl 20,31,$+4 |
| 769 | 0: mflr r3 |
| 770 | ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */ |
| 771 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 772 | mfmsr r6 |
| 773 | li r0,MSR_RI |
| 774 | andc r6,r6,r0 |
| 775 | sync |
| 776 | mtmsrd r6 |
| 777 | |
| 778 | ld r1,PACAR1(r4) /* Restore our SP */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 779 | ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ |
| 780 | |
| 781 | mtspr SPRN_SRR0,r3 |
| 782 | mtspr SPRN_SRR1,r4 |
| 783 | rfid |
| 784 | b . /* prevent speculative execution */ |
| 785 | |
Paul Mackerras | e31aa45 | 2008-08-30 11:41:12 +1000 | [diff] [blame] | 786 | .align 3 |
| 787 | 1: .llong .rtas_restore_regs |
| 788 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 789 | _STATIC(rtas_restore_regs) |
| 790 | /* relocation is on at this point */ |
| 791 | REST_GPR(2, r1) /* Restore the TOC */ |
| 792 | REST_GPR(13, r1) /* Restore paca */ |
| 793 | REST_8GPRS(14, r1) /* Restore the non-volatiles */ |
| 794 | REST_10GPRS(22, r1) /* ditto */ |
| 795 | |
| 796 | mfspr r13,SPRN_SPRG3 |
| 797 | |
| 798 | ld r4,_CCR(r1) |
| 799 | mtcr r4 |
| 800 | ld r5,_CTR(r1) |
| 801 | mtctr r5 |
| 802 | ld r6,_XER(r1) |
| 803 | mtspr SPRN_XER,r6 |
| 804 | ld r7,_DAR(r1) |
| 805 | mtdar r7 |
| 806 | ld r8,_DSISR(r1) |
| 807 | mtdsisr r8 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 808 | |
| 809 | addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */ |
| 810 | ld r0,16(r1) /* get return address */ |
| 811 | |
| 812 | mtlr r0 |
| 813 | blr /* return to caller */ |
| 814 | |
| 815 | #endif /* CONFIG_PPC_RTAS */ |
| 816 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 817 | _GLOBAL(enter_prom) |
| 818 | mflr r0 |
| 819 | std r0,16(r1) |
| 820 | stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */ |
| 821 | |
| 822 | /* Because PROM is running in 32b mode, it clobbers the high order half |
| 823 | * of all registers that it saves. We therefore save those registers |
| 824 | * PROM might touch to the stack. (r0, r3-r13 are caller saved) |
| 825 | */ |
| 826 | SAVE_8GPRS(2, r1) |
| 827 | SAVE_GPR(13, r1) |
| 828 | SAVE_8GPRS(14, r1) |
| 829 | SAVE_10GPRS(22, r1) |
| 830 | mfcr r4 |
| 831 | std r4,_CCR(r1) |
| 832 | mfctr r5 |
| 833 | std r5,_CTR(r1) |
| 834 | mfspr r6,SPRN_XER |
| 835 | std r6,_XER(r1) |
| 836 | mfdar r7 |
| 837 | std r7,_DAR(r1) |
| 838 | mfdsisr r8 |
| 839 | std r8,_DSISR(r1) |
| 840 | mfsrr0 r9 |
| 841 | std r9,_SRR0(r1) |
| 842 | mfsrr1 r10 |
| 843 | std r10,_SRR1(r1) |
| 844 | mfmsr r11 |
| 845 | std r11,_MSR(r1) |
| 846 | |
| 847 | /* Get the PROM entrypoint */ |
| 848 | ld r0,GPR4(r1) |
| 849 | mtlr r0 |
| 850 | |
| 851 | /* Switch MSR to 32 bits mode |
| 852 | */ |
| 853 | mfmsr r11 |
| 854 | li r12,1 |
| 855 | rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) |
| 856 | andc r11,r11,r12 |
| 857 | li r12,1 |
| 858 | rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) |
| 859 | andc r11,r11,r12 |
| 860 | mtmsrd r11 |
| 861 | isync |
| 862 | |
| 863 | /* Restore arguments & enter PROM here... */ |
| 864 | ld r3,GPR3(r1) |
| 865 | blrl |
| 866 | |
| 867 | /* Just make sure that r1 top 32 bits didn't get |
| 868 | * corrupt by OF |
| 869 | */ |
| 870 | rldicl r1,r1,0,32 |
| 871 | |
| 872 | /* Restore the MSR (back to 64 bits) */ |
| 873 | ld r0,_MSR(r1) |
| 874 | mtmsrd r0 |
| 875 | isync |
| 876 | |
| 877 | /* Restore other registers */ |
| 878 | REST_GPR(2, r1) |
| 879 | REST_GPR(13, r1) |
| 880 | REST_8GPRS(14, r1) |
| 881 | REST_10GPRS(22, r1) |
| 882 | ld r4,_CCR(r1) |
| 883 | mtcr r4 |
| 884 | ld r5,_CTR(r1) |
| 885 | mtctr r5 |
| 886 | ld r6,_XER(r1) |
| 887 | mtspr SPRN_XER,r6 |
| 888 | ld r7,_DAR(r1) |
| 889 | mtdar r7 |
| 890 | ld r8,_DSISR(r1) |
| 891 | mtdsisr r8 |
| 892 | ld r9,_SRR0(r1) |
| 893 | mtsrr0 r9 |
| 894 | ld r10,_SRR1(r1) |
| 895 | mtsrr1 r10 |
| 896 | |
| 897 | addi r1,r1,PROM_FRAME_SIZE |
| 898 | ld r0,16(r1) |
| 899 | mtlr r0 |
| 900 | blr |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 901 | |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 902 | #ifdef CONFIG_FUNCTION_TRACER |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 903 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 904 | _GLOBAL(mcount) |
| 905 | _GLOBAL(_mcount) |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 906 | blr |
| 907 | |
| 908 | _GLOBAL(ftrace_caller) |
| 909 | /* Taken from output of objdump from lib64/glibc */ |
| 910 | mflr r3 |
| 911 | ld r11, 0(r1) |
| 912 | stdu r1, -112(r1) |
| 913 | std r3, 128(r1) |
| 914 | ld r4, 16(r11) |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 915 | subi r3, r3, MCOUNT_INSN_SIZE |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 916 | .globl ftrace_call |
| 917 | ftrace_call: |
| 918 | bl ftrace_stub |
| 919 | nop |
| 920 | ld r0, 128(r1) |
| 921 | mtlr r0 |
| 922 | addi r1, r1, 112 |
| 923 | _GLOBAL(ftrace_stub) |
| 924 | blr |
| 925 | #else |
| 926 | _GLOBAL(mcount) |
| 927 | blr |
| 928 | |
| 929 | _GLOBAL(_mcount) |
| 930 | /* Taken from output of objdump from lib64/glibc */ |
| 931 | mflr r3 |
| 932 | ld r11, 0(r1) |
| 933 | stdu r1, -112(r1) |
| 934 | std r3, 128(r1) |
| 935 | ld r4, 16(r11) |
| 936 | |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 937 | subi r3, r3, MCOUNT_INSN_SIZE |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 938 | LOAD_REG_ADDR(r5,ftrace_trace_function) |
| 939 | ld r5,0(r5) |
| 940 | ld r5,0(r5) |
| 941 | mtctr r5 |
| 942 | bctrl |
| 943 | |
| 944 | nop |
| 945 | ld r0, 128(r1) |
| 946 | mtlr r0 |
| 947 | addi r1, r1, 112 |
| 948 | _GLOBAL(ftrace_stub) |
| 949 | blr |
| 950 | |
| 951 | #endif |
| 952 | #endif |