Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or modify |
| 3 | * it under the terms of the GNU General Public License, version 2, as |
| 4 | * published by the Free Software Foundation. |
| 5 | * |
| 6 | * This program is distributed in the hope that it will be useful, |
| 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 9 | * GNU General Public License for more details. |
| 10 | * |
| 11 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
| 12 | * |
| 13 | * Derived from book3s_rmhandlers.S and other files, which are: |
| 14 | * |
| 15 | * Copyright SUSE Linux Products GmbH 2009 |
| 16 | * |
| 17 | * Authors: Alexander Graf <agraf@suse.de> |
| 18 | */ |
| 19 | |
| 20 | #include <asm/ppc_asm.h> |
| 21 | #include <asm/kvm_asm.h> |
| 22 | #include <asm/reg.h> |
Paul Mackerras | 177339d | 2011-07-23 17:41:11 +1000 | [diff] [blame] | 23 | #include <asm/mmu.h> |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 24 | #include <asm/page.h> |
Paul Mackerras | 177339d | 2011-07-23 17:41:11 +1000 | [diff] [blame] | 25 | #include <asm/ptrace.h> |
| 26 | #include <asm/hvcall.h> |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 27 | #include <asm/asm-offsets.h> |
| 28 | #include <asm/exception-64s.h> |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 29 | #include <asm/kvm_book3s_asm.h> |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 30 | #include <asm/mmu-hash64.h> |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 31 | #include <asm/tm.h> |
| 32 | |
| 33 | #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 34 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 35 | /* Values in HSTATE_NAPPING(r13) */ |
| 36 | #define NAPPING_CEDE 1 |
| 37 | #define NAPPING_NOVCPU 2 |
| 38 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 39 | /* |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 40 | * Call kvmppc_hv_entry in real mode. |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 41 | * Must be called with interrupts hard-disabled. |
| 42 | * |
| 43 | * Input Registers: |
| 44 | * |
| 45 | * LR = return address to continue at after eventually re-enabling MMU |
| 46 | */ |
Anton Blanchard | 6ed179b | 2014-06-12 18:16:53 +1000 | [diff] [blame] | 47 | _GLOBAL_TOC(kvmppc_hv_entry_trampoline) |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 48 | mflr r0 |
| 49 | std r0, PPC_LR_STKOFF(r1) |
| 50 | stdu r1, -112(r1) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 51 | mfmsr r10 |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 52 | LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 53 | li r0,MSR_RI |
| 54 | andc r0,r10,r0 |
| 55 | li r6,MSR_IR | MSR_DR |
| 56 | andc r6,r10,r6 |
| 57 | mtmsrd r0,1 /* clear RI in MSR */ |
| 58 | mtsrr0 r5 |
| 59 | mtsrr1 r6 |
| 60 | RFI |
| 61 | |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 62 | kvmppc_call_hv_entry: |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 63 | ld r4, HSTATE_KVM_VCPU(r13) |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 64 | bl kvmppc_hv_entry |
| 65 | |
| 66 | /* Back from guest - restore host state and return to caller */ |
| 67 | |
Michael Neuling | eee7ff9 | 2014-01-08 21:25:19 +1100 | [diff] [blame] | 68 | BEGIN_FTR_SECTION |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 69 | /* Restore host DABR and DABRX */ |
| 70 | ld r5,HSTATE_DABR(r13) |
| 71 | li r6,7 |
| 72 | mtspr SPRN_DABR,r5 |
| 73 | mtspr SPRN_DABRX,r6 |
Michael Neuling | eee7ff9 | 2014-01-08 21:25:19 +1100 | [diff] [blame] | 74 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 75 | |
| 76 | /* Restore SPRG3 */ |
Scott Wood | 9d378df | 2014-03-10 17:29:38 -0500 | [diff] [blame] | 77 | ld r3,PACA_SPRG_VDSO(r13) |
| 78 | mtspr SPRN_SPRG_VDSO_WRITE,r3 |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 79 | |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 80 | /* Reload the host's PMU registers */ |
| 81 | ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ |
| 82 | lbz r4, LPPACA_PMCINUSE(r3) |
| 83 | cmpwi r4, 0 |
| 84 | beq 23f /* skip if not */ |
Paul Mackerras | 9bc01a9 | 2014-05-26 19:48:40 +1000 | [diff] [blame] | 85 | BEGIN_FTR_SECTION |
| 86 | ld r3, HSTATE_MMCR(r13) |
| 87 | andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO |
| 88 | cmpwi r4, MMCR0_PMAO |
| 89 | beql kvmppc_fix_pmao |
| 90 | END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 91 | lwz r3, HSTATE_PMC(r13) |
| 92 | lwz r4, HSTATE_PMC + 4(r13) |
| 93 | lwz r5, HSTATE_PMC + 8(r13) |
| 94 | lwz r6, HSTATE_PMC + 12(r13) |
| 95 | lwz r8, HSTATE_PMC + 16(r13) |
| 96 | lwz r9, HSTATE_PMC + 20(r13) |
| 97 | BEGIN_FTR_SECTION |
| 98 | lwz r10, HSTATE_PMC + 24(r13) |
| 99 | lwz r11, HSTATE_PMC + 28(r13) |
| 100 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) |
| 101 | mtspr SPRN_PMC1, r3 |
| 102 | mtspr SPRN_PMC2, r4 |
| 103 | mtspr SPRN_PMC3, r5 |
| 104 | mtspr SPRN_PMC4, r6 |
| 105 | mtspr SPRN_PMC5, r8 |
| 106 | mtspr SPRN_PMC6, r9 |
| 107 | BEGIN_FTR_SECTION |
| 108 | mtspr SPRN_PMC7, r10 |
| 109 | mtspr SPRN_PMC8, r11 |
| 110 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) |
| 111 | ld r3, HSTATE_MMCR(r13) |
| 112 | ld r4, HSTATE_MMCR + 8(r13) |
| 113 | ld r5, HSTATE_MMCR + 16(r13) |
Paul Mackerras | 72cde5a | 2014-03-25 10:47:08 +1100 | [diff] [blame] | 114 | ld r6, HSTATE_MMCR + 24(r13) |
| 115 | ld r7, HSTATE_MMCR + 32(r13) |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 116 | mtspr SPRN_MMCR1, r4 |
| 117 | mtspr SPRN_MMCRA, r5 |
Paul Mackerras | 72cde5a | 2014-03-25 10:47:08 +1100 | [diff] [blame] | 118 | mtspr SPRN_SIAR, r6 |
| 119 | mtspr SPRN_SDAR, r7 |
| 120 | BEGIN_FTR_SECTION |
| 121 | ld r8, HSTATE_MMCR + 40(r13) |
| 122 | ld r9, HSTATE_MMCR + 48(r13) |
| 123 | mtspr SPRN_MMCR2, r8 |
| 124 | mtspr SPRN_SIER, r9 |
| 125 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 126 | mtspr SPRN_MMCR0, r3 |
| 127 | isync |
| 128 | 23: |
| 129 | |
| 130 | /* |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 131 | * Reload DEC. HDEC interrupts were disabled when |
| 132 | * we reloaded the host's LPCR value. |
| 133 | */ |
| 134 | ld r3, HSTATE_DECEXP(r13) |
| 135 | mftb r4 |
| 136 | subf r4, r4, r3 |
| 137 | mtspr SPRN_DEC, r4 |
| 138 | |
| 139 | /* |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 140 | * For external and machine check interrupts, we need |
| 141 | * to call the Linux handler to process the interrupt. |
| 142 | * We do that by jumping to absolute address 0x500 for |
| 143 | * external interrupts, or the machine_check_fwnmi label |
| 144 | * for machine checks (since firmware might have patched |
| 145 | * the vector area at 0x200). The [h]rfid at the end of the |
| 146 | * handler will return to the book3s_hv_interrupts.S code. |
| 147 | * For other interrupts we do the rfid to get back |
| 148 | * to the book3s_hv_interrupts.S code here. |
| 149 | */ |
| 150 | ld r8, 112+PPC_LR_STKOFF(r1) |
| 151 | addi r1, r1, 112 |
| 152 | ld r7, HSTATE_HOST_MSR(r13) |
| 153 | |
| 154 | cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK |
| 155 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL |
| 156 | BEGIN_FTR_SECTION |
| 157 | beq 11f |
Mahesh Salgaonkar | 0869b6f | 2014-07-29 18:40:01 +0530 | [diff] [blame] | 158 | cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI |
| 159 | beq cr2, 14f /* HMI check */ |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 160 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
| 161 | |
| 162 | /* RFI into the highmem handler, or branch to interrupt handler */ |
| 163 | mfmsr r6 |
| 164 | li r0, MSR_RI |
| 165 | andc r6, r6, r0 |
| 166 | mtmsrd r6, 1 /* Clear RI in MSR */ |
| 167 | mtsrr0 r8 |
| 168 | mtsrr1 r7 |
| 169 | beqa 0x500 /* external interrupt (PPC970) */ |
| 170 | beq cr1, 13f /* machine check */ |
| 171 | RFI |
| 172 | |
| 173 | /* On POWER7, we have external interrupts set to use HSRR0/1 */ |
| 174 | 11: mtspr SPRN_HSRR0, r8 |
| 175 | mtspr SPRN_HSRR1, r7 |
| 176 | ba 0x500 |
| 177 | |
| 178 | 13: b machine_check_fwnmi |
| 179 | |
Mahesh Salgaonkar | 0869b6f | 2014-07-29 18:40:01 +0530 | [diff] [blame] | 180 | 14: mtspr SPRN_HSRR0, r8 |
| 181 | mtspr SPRN_HSRR1, r7 |
| 182 | b hmi_exception_after_realmode |
| 183 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 184 | kvmppc_primary_no_guest: |
| 185 | /* We handle this much like a ceded vcpu */ |
| 186 | /* set our bit in napping_threads */ |
| 187 | ld r5, HSTATE_KVM_VCORE(r13) |
| 188 | lbz r7, HSTATE_PTID(r13) |
| 189 | li r0, 1 |
| 190 | sld r0, r0, r7 |
| 191 | addi r6, r5, VCORE_NAPPING_THREADS |
| 192 | 1: lwarx r3, 0, r6 |
| 193 | or r3, r3, r0 |
| 194 | stwcx. r3, 0, r6 |
| 195 | bne 1b |
| 196 | /* order napping_threads update vs testing entry_exit_count */ |
| 197 | isync |
| 198 | li r12, 0 |
| 199 | lwz r7, VCORE_ENTRY_EXIT(r5) |
| 200 | cmpwi r7, 0x100 |
| 201 | bge kvm_novcpu_exit /* another thread already exiting */ |
| 202 | li r3, NAPPING_NOVCPU |
| 203 | stb r3, HSTATE_NAPPING(r13) |
| 204 | li r3, 1 |
| 205 | stb r3, HSTATE_HWTHREAD_REQ(r13) |
| 206 | |
| 207 | b kvm_do_nap |
| 208 | |
| 209 | kvm_novcpu_wakeup: |
| 210 | ld r1, HSTATE_HOST_R1(r13) |
| 211 | ld r5, HSTATE_KVM_VCORE(r13) |
| 212 | li r0, 0 |
| 213 | stb r0, HSTATE_NAPPING(r13) |
| 214 | stb r0, HSTATE_HWTHREAD_REQ(r13) |
| 215 | |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 216 | /* check the wake reason */ |
| 217 | bl kvmppc_check_wake_reason |
| 218 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 219 | /* see if any other thread is already exiting */ |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 220 | lwz r0, VCORE_ENTRY_EXIT(r5) |
| 221 | cmpwi r0, 0x100 |
| 222 | bge kvm_novcpu_exit |
| 223 | |
| 224 | /* clear our bit in napping_threads */ |
| 225 | lbz r7, HSTATE_PTID(r13) |
| 226 | li r0, 1 |
| 227 | sld r0, r0, r7 |
| 228 | addi r6, r5, VCORE_NAPPING_THREADS |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 229 | 4: lwarx r7, 0, r6 |
| 230 | andc r7, r7, r0 |
| 231 | stwcx. r7, 0, r6 |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 232 | bne 4b |
| 233 | |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 234 | /* See if the wake reason means we need to exit */ |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 235 | cmpdi r3, 0 |
| 236 | bge kvm_novcpu_exit |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 237 | |
| 238 | /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ |
| 239 | ld r4, HSTATE_KVM_VCPU(r13) |
| 240 | cmpdi r4, 0 |
| 241 | bne kvmppc_got_guest |
| 242 | |
| 243 | kvm_novcpu_exit: |
| 244 | b hdec_soon |
| 245 | |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 246 | /* |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 247 | * We come in here when wakened from nap mode. |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 248 | * Relocation is off and most register values are lost. |
| 249 | * r13 points to the PACA. |
| 250 | */ |
| 251 | .globl kvm_start_guest |
| 252 | kvm_start_guest: |
Preeti U Murthy | fd17dc7 | 2014-04-11 16:01:58 +0530 | [diff] [blame] | 253 | |
| 254 | /* Set runlatch bit the minute you wake up from nap */ |
| 255 | mfspr r1, SPRN_CTRLF |
| 256 | ori r1, r1, 1 |
| 257 | mtspr SPRN_CTRLT, r1 |
| 258 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 259 | ld r2,PACATOC(r13) |
| 260 | |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 261 | li r0,KVM_HWTHREAD_IN_KVM |
| 262 | stb r0,HSTATE_HWTHREAD_STATE(r13) |
| 263 | |
| 264 | /* NV GPR values from power7_idle() will no longer be valid */ |
| 265 | li r0,1 |
| 266 | stb r0,PACA_NAPSTATELOST(r13) |
| 267 | |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 268 | /* were we napping due to cede? */ |
| 269 | lbz r0,HSTATE_NAPPING(r13) |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 270 | cmpwi r0,NAPPING_CEDE |
| 271 | beq kvm_end_cede |
| 272 | cmpwi r0,NAPPING_NOVCPU |
| 273 | beq kvm_novcpu_wakeup |
| 274 | |
| 275 | ld r1,PACAEMERGSP(r13) |
| 276 | subi r1,r1,STACK_FRAME_OVERHEAD |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 277 | |
| 278 | /* |
| 279 | * We weren't napping due to cede, so this must be a secondary |
| 280 | * thread being woken up to run a guest, or being woken up due |
| 281 | * to a stray IPI. (Or due to some machine check or hypervisor |
| 282 | * maintenance interrupt while the core is in KVM.) |
| 283 | */ |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 284 | |
| 285 | /* Check the wake reason in SRR1 to see why we got here */ |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 286 | bl kvmppc_check_wake_reason |
| 287 | cmpdi r3, 0 |
| 288 | bge kvm_no_guest |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 289 | |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 290 | /* get vcpu pointer, NULL if we have no vcpu to run */ |
Paul Mackerras | 7b444c6 | 2012-10-15 01:16:14 +0000 | [diff] [blame] | 291 | ld r4,HSTATE_KVM_VCPU(r13) |
| 292 | cmpdi r4,0 |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 293 | /* if we have no vcpu to run, go back to sleep */ |
Paul Mackerras | 7b444c6 | 2012-10-15 01:16:14 +0000 | [diff] [blame] | 294 | beq kvm_no_guest |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 295 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 296 | /* Set HSTATE_DSCR(r13) to something sensible */ |
Sam bobroff | 1739ea9 | 2014-05-21 16:32:38 +1000 | [diff] [blame] | 297 | ld r6, PACA_DSCR(r13) |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 298 | std r6, HSTATE_DSCR(r13) |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 299 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 300 | bl kvmppc_hv_entry |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 301 | |
| 302 | /* Back from the guest, go back to nap */ |
| 303 | /* Clear our vcpu pointer so we don't come back in early */ |
| 304 | li r0, 0 |
| 305 | std r0, HSTATE_KVM_VCPU(r13) |
Paul Mackerras | f019b7a | 2013-11-16 17:46:03 +1100 | [diff] [blame] | 306 | /* |
| 307 | * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing |
| 308 | * the nap_count, because once the increment to nap_count is |
| 309 | * visible we could be given another vcpu. |
| 310 | */ |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 311 | lwsync |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 312 | |
| 313 | /* increment the nap count and then go to nap mode */ |
| 314 | ld r4, HSTATE_KVM_VCORE(r13) |
| 315 | addi r4, r4, VCORE_NAP_COUNT |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 316 | 51: lwarx r3, 0, r4 |
| 317 | addi r3, r3, 1 |
| 318 | stwcx. r3, 0, r4 |
| 319 | bne 51b |
| 320 | |
| 321 | kvm_no_guest: |
| 322 | li r0, KVM_HWTHREAD_IN_NAP |
| 323 | stb r0, HSTATE_HWTHREAD_STATE(r13) |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 324 | kvm_do_nap: |
Preeti U Murthy | 582b910 | 2014-04-11 16:02:08 +0530 | [diff] [blame] | 325 | /* Clear the runlatch bit before napping */ |
| 326 | mfspr r2, SPRN_CTRLF |
| 327 | clrrdi r2, r2, 1 |
| 328 | mtspr SPRN_CTRLT, r2 |
| 329 | |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 330 | li r3, LPCR_PECE0 |
| 331 | mfspr r4, SPRN_LPCR |
| 332 | rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 |
| 333 | mtspr SPRN_LPCR, r4 |
| 334 | isync |
| 335 | std r0, HSTATE_SCRATCH0(r13) |
| 336 | ptesync |
| 337 | ld r0, HSTATE_SCRATCH0(r13) |
| 338 | 1: cmpd r0, r0 |
| 339 | bne 1b |
| 340 | nap |
| 341 | b . |
| 342 | |
| 343 | /****************************************************************************** |
| 344 | * * |
| 345 | * Entry code * |
| 346 | * * |
| 347 | *****************************************************************************/ |
| 348 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 349 | .global kvmppc_hv_entry |
| 350 | kvmppc_hv_entry: |
| 351 | |
| 352 | /* Required state: |
| 353 | * |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 354 | * R4 = vcpu pointer (or NULL) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 355 | * MSR = ~IR|DR |
| 356 | * R13 = PACA |
| 357 | * R1 = host R1 |
Michael Neuling | 06a29e4 | 2014-08-19 14:59:30 +1000 | [diff] [blame] | 358 | * R2 = TOC |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 359 | * all other volatile GPRS = free |
| 360 | */ |
| 361 | mflr r0 |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 362 | std r0, PPC_LR_STKOFF(r1) |
| 363 | stdu r1, -112(r1) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 364 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 365 | /* Save R1 in the PACA */ |
| 366 | std r1, HSTATE_HOST_R1(r13) |
| 367 | |
Paul Mackerras | 44a3add | 2013-10-04 21:45:04 +1000 | [diff] [blame] | 368 | li r6, KVM_GUEST_MODE_HOST_HV |
| 369 | stb r6, HSTATE_IN_GUEST(r13) |
| 370 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 371 | /* Clear out SLB */ |
| 372 | li r6,0 |
| 373 | slbmte r6,r6 |
| 374 | slbia |
| 375 | ptesync |
| 376 | |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 377 | BEGIN_FTR_SECTION |
| 378 | b 30f |
| 379 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) |
| 380 | /* |
| 381 | * POWER7 host -> guest partition switch code. |
| 382 | * We don't have to lock against concurrent tlbies, |
| 383 | * but we do have to coordinate across hardware threads. |
| 384 | */ |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 385 | /* Increment entry count iff exit count is zero. */ |
| 386 | ld r5,HSTATE_KVM_VCORE(r13) |
| 387 | addi r9,r5,VCORE_ENTRY_EXIT |
| 388 | 21: lwarx r3,0,r9 |
| 389 | cmpwi r3,0x100 /* any threads starting to exit? */ |
| 390 | bge secondary_too_late /* if so we're too late to the party */ |
| 391 | addi r3,r3,1 |
| 392 | stwcx. r3,0,r9 |
| 393 | bne 21b |
| 394 | |
| 395 | /* Primary thread switches to guest partition. */ |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 396 | ld r9,VCORE_KVM(r5) /* pointer to struct kvm */ |
| 397 | lbz r6,HSTATE_PTID(r13) |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 398 | cmpwi r6,0 |
| 399 | bne 20f |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 400 | ld r6,KVM_SDR1(r9) |
| 401 | lwz r7,KVM_LPID(r9) |
| 402 | li r0,LPID_RSVD /* switch to reserved LPID */ |
| 403 | mtspr SPRN_LPID,r0 |
| 404 | ptesync |
| 405 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ |
| 406 | mtspr SPRN_LPID,r7 |
| 407 | isync |
Paul Mackerras | 1b400ba | 2012-11-21 23:28:08 +0000 | [diff] [blame] | 408 | |
| 409 | /* See if we need to flush the TLB */ |
| 410 | lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ |
| 411 | clrldi r7,r6,64-6 /* extract bit number (6 bits) */ |
| 412 | srdi r6,r6,6 /* doubleword number */ |
| 413 | sldi r6,r6,3 /* address offset */ |
| 414 | add r6,r6,r9 |
| 415 | addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 416 | li r0,1 |
Paul Mackerras | 1b400ba | 2012-11-21 23:28:08 +0000 | [diff] [blame] | 417 | sld r0,r0,r7 |
| 418 | ld r7,0(r6) |
| 419 | and. r7,r7,r0 |
| 420 | beq 22f |
| 421 | 23: ldarx r7,0,r6 /* if set, clear the bit */ |
| 422 | andc r7,r7,r0 |
| 423 | stdcx. r7,0,r6 |
| 424 | bne 23b |
Paul Mackerras | ca25205 | 2014-01-08 21:25:22 +1100 | [diff] [blame] | 425 | /* Flush the TLB of any entries for this LPID */ |
| 426 | /* use arch 2.07S as a proxy for POWER8 */ |
| 427 | BEGIN_FTR_SECTION |
| 428 | li r6,512 /* POWER8 has 512 sets */ |
| 429 | FTR_SECTION_ELSE |
| 430 | li r6,128 /* POWER7 has 128 sets */ |
| 431 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) |
Paul Mackerras | 1b400ba | 2012-11-21 23:28:08 +0000 | [diff] [blame] | 432 | mtctr r6 |
| 433 | li r7,0x800 /* IS field = 0b10 */ |
| 434 | ptesync |
| 435 | 28: tlbiel r7 |
| 436 | addi r7,r7,0x1000 |
| 437 | bdnz 28b |
| 438 | ptesync |
| 439 | |
Paul Mackerras | 93b0f4d | 2013-09-06 13:17:46 +1000 | [diff] [blame] | 440 | /* Add timebase offset onto timebase */ |
| 441 | 22: ld r8,VCORE_TB_OFFSET(r5) |
| 442 | cmpdi r8,0 |
| 443 | beq 37f |
| 444 | mftb r6 /* current host timebase */ |
| 445 | add r8,r8,r6 |
| 446 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ |
| 447 | mftb r7 /* check if lower 24 bits overflowed */ |
| 448 | clrldi r6,r6,40 |
| 449 | clrldi r7,r7,40 |
| 450 | cmpld r7,r6 |
| 451 | bge 37f |
| 452 | addis r8,r8,0x100 /* if so, increment upper 40 bits */ |
| 453 | mtspr SPRN_TBU40,r8 |
| 454 | |
Paul Mackerras | 388cc6e | 2013-09-21 14:35:02 +1000 | [diff] [blame] | 455 | /* Load guest PCR value to select appropriate compat mode */ |
| 456 | 37: ld r7, VCORE_PCR(r5) |
| 457 | cmpdi r7, 0 |
| 458 | beq 38f |
| 459 | mtspr SPRN_PCR, r7 |
| 460 | 38: |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 461 | |
| 462 | BEGIN_FTR_SECTION |
| 463 | /* DPDES is shared between threads */ |
| 464 | ld r8, VCORE_DPDES(r5) |
| 465 | mtspr SPRN_DPDES, r8 |
| 466 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
| 467 | |
Paul Mackerras | 388cc6e | 2013-09-21 14:35:02 +1000 | [diff] [blame] | 468 | li r0,1 |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 469 | stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ |
| 470 | b 10f |
| 471 | |
| 472 | /* Secondary threads wait for primary to have done partition switch */ |
| 473 | 20: lbz r0,VCORE_IN_GUEST(r5) |
| 474 | cmpwi r0,0 |
| 475 | beq 20b |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 476 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 477 | /* Set LPCR and RMOR. */ |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 478 | 10: ld r8,VCORE_LPCR(r5) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 479 | mtspr SPRN_LPCR,r8 |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 480 | ld r8,KVM_RMOR(r9) |
| 481 | mtspr SPRN_RMOR,r8 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 482 | isync |
| 483 | |
| 484 | /* Check if HDEC expires soon */ |
| 485 | mfspr r3,SPRN_HDEC |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 486 | cmpwi r3,512 /* 1 microsecond */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 487 | li r12,BOOK3S_INTERRUPT_HV_DECREMENTER |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 488 | blt hdec_soon |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 489 | b 31f |
| 490 | |
| 491 | /* |
| 492 | * PPC970 host -> guest partition switch code. |
| 493 | * We have to lock against concurrent tlbies, |
| 494 | * using native_tlbie_lock to lock against host tlbies |
| 495 | * and kvm->arch.tlbie_lock to lock against guest tlbies. |
| 496 | * We also have to invalidate the TLB since its |
| 497 | * entries aren't tagged with the LPID. |
| 498 | */ |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 499 | 30: ld r5,HSTATE_KVM_VCORE(r13) |
| 500 | ld r9,VCORE_KVM(r5) /* pointer to struct kvm */ |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 501 | |
| 502 | /* first take native_tlbie_lock */ |
| 503 | .section ".toc","aw" |
| 504 | toc_tlbie_lock: |
| 505 | .tc native_tlbie_lock[TC],native_tlbie_lock |
| 506 | .previous |
Michael Neuling | 06a29e4 | 2014-08-19 14:59:30 +1000 | [diff] [blame] | 507 | ld r3,toc_tlbie_lock@toc(r2) |
Anton Blanchard | 54bb7f4 | 2013-08-07 02:01:51 +1000 | [diff] [blame] | 508 | #ifdef __BIG_ENDIAN__ |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 509 | lwz r8,PACA_LOCK_TOKEN(r13) |
Anton Blanchard | 54bb7f4 | 2013-08-07 02:01:51 +1000 | [diff] [blame] | 510 | #else |
| 511 | lwz r8,PACAPACAINDEX(r13) |
| 512 | #endif |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 513 | 24: lwarx r0,0,r3 |
| 514 | cmpwi r0,0 |
| 515 | bne 24b |
| 516 | stwcx. r8,0,r3 |
| 517 | bne 24b |
| 518 | isync |
| 519 | |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 520 | ld r5,HSTATE_KVM_VCORE(r13) |
| 521 | ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */ |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 522 | li r0,0x18f |
| 523 | rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ |
| 524 | or r0,r7,r0 |
| 525 | ptesync |
| 526 | sync |
| 527 | mtspr SPRN_HID4,r0 /* switch to reserved LPID */ |
| 528 | isync |
| 529 | li r0,0 |
| 530 | stw r0,0(r3) /* drop native_tlbie_lock */ |
| 531 | |
| 532 | /* invalidate the whole TLB */ |
| 533 | li r0,256 |
| 534 | mtctr r0 |
| 535 | li r6,0 |
| 536 | 25: tlbiel r6 |
| 537 | addi r6,r6,0x1000 |
| 538 | bdnz 25b |
| 539 | ptesync |
| 540 | |
| 541 | /* Take the guest's tlbie_lock */ |
| 542 | addi r3,r9,KVM_TLBIE_LOCK |
| 543 | 24: lwarx r0,0,r3 |
| 544 | cmpwi r0,0 |
| 545 | bne 24b |
| 546 | stwcx. r8,0,r3 |
| 547 | bne 24b |
| 548 | isync |
| 549 | ld r6,KVM_SDR1(r9) |
| 550 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ |
| 551 | |
| 552 | /* Set up HID4 with the guest's LPID etc. */ |
| 553 | sync |
| 554 | mtspr SPRN_HID4,r7 |
| 555 | isync |
| 556 | |
| 557 | /* drop the guest's tlbie_lock */ |
| 558 | li r0,0 |
| 559 | stw r0,0(r3) |
| 560 | |
| 561 | /* Check if HDEC expires soon */ |
| 562 | mfspr r3,SPRN_HDEC |
| 563 | cmpwi r3,10 |
| 564 | li r12,BOOK3S_INTERRUPT_HV_DECREMENTER |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 565 | blt hdec_soon |
| 566 | |
| 567 | /* Enable HDEC interrupts */ |
| 568 | mfspr r0,SPRN_HID0 |
| 569 | li r3,1 |
| 570 | rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 |
| 571 | sync |
| 572 | mtspr SPRN_HID0,r0 |
| 573 | mfspr r0,SPRN_HID0 |
| 574 | mfspr r0,SPRN_HID0 |
| 575 | mfspr r0,SPRN_HID0 |
| 576 | mfspr r0,SPRN_HID0 |
| 577 | mfspr r0,SPRN_HID0 |
| 578 | mfspr r0,SPRN_HID0 |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 579 | 31: |
| 580 | /* Do we have a guest vcpu to run? */ |
| 581 | cmpdi r4, 0 |
| 582 | beq kvmppc_primary_no_guest |
| 583 | kvmppc_got_guest: |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 584 | |
| 585 | /* Load up guest SLB entries */ |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 586 | lwz r5,VCPU_SLB_MAX(r4) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 587 | cmpwi r5,0 |
| 588 | beq 9f |
| 589 | mtctr r5 |
| 590 | addi r6,r4,VCPU_SLB |
| 591 | 1: ld r8,VCPU_SLB_E(r6) |
| 592 | ld r9,VCPU_SLB_V(r6) |
| 593 | slbmte r9,r8 |
| 594 | addi r6,r6,VCPU_SLB_SIZE |
| 595 | bdnz 1b |
| 596 | 9: |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 597 | /* Increment yield count if they have a VPA */ |
| 598 | ld r3, VCPU_VPA(r4) |
| 599 | cmpdi r3, 0 |
| 600 | beq 25f |
Alexander Graf | 0865a58 | 2014-06-11 10:36:17 +0200 | [diff] [blame] | 601 | li r6, LPPACA_YIELDCOUNT |
| 602 | LWZX_BE r5, r3, r6 |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 603 | addi r5, r5, 1 |
Alexander Graf | 0865a58 | 2014-06-11 10:36:17 +0200 | [diff] [blame] | 604 | STWX_BE r5, r3, r6 |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 605 | li r6, 1 |
| 606 | stb r6, VCPU_VPA_DIRTY(r4) |
| 607 | 25: |
| 608 | |
| 609 | BEGIN_FTR_SECTION |
| 610 | /* Save purr/spurr */ |
| 611 | mfspr r5,SPRN_PURR |
| 612 | mfspr r6,SPRN_SPURR |
| 613 | std r5,HSTATE_PURR(r13) |
| 614 | std r6,HSTATE_SPURR(r13) |
| 615 | ld r7,VCPU_PURR(r4) |
| 616 | ld r8,VCPU_SPURR(r4) |
| 617 | mtspr SPRN_PURR,r7 |
| 618 | mtspr SPRN_SPURR,r8 |
| 619 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
| 620 | |
Michael Neuling | eee7ff9 | 2014-01-08 21:25:19 +1100 | [diff] [blame] | 621 | BEGIN_FTR_SECTION |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 622 | /* Set partition DABR */ |
| 623 | /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 624 | lwz r5,VCPU_DABRX(r4) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 625 | ld r6,VCPU_DABR(r4) |
| 626 | mtspr SPRN_DABRX,r5 |
| 627 | mtspr SPRN_DABR,r6 |
Michael Neuling | eee7ff9 | 2014-01-08 21:25:19 +1100 | [diff] [blame] | 628 | BEGIN_FTR_SECTION_NESTED(89) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 629 | isync |
Michael Neuling | eee7ff9 | 2014-01-08 21:25:19 +1100 | [diff] [blame] | 630 | END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89) |
| 631 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 632 | |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 633 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 634 | BEGIN_FTR_SECTION |
| 635 | b skip_tm |
| 636 | END_FTR_SECTION_IFCLR(CPU_FTR_TM) |
| 637 | |
| 638 | /* Turn on TM/FP/VSX/VMX so we can restore them. */ |
| 639 | mfmsr r5 |
| 640 | li r6, MSR_TM >> 32 |
| 641 | sldi r6, r6, 32 |
| 642 | or r5, r5, r6 |
| 643 | ori r5, r5, MSR_FP |
| 644 | oris r5, r5, (MSR_VEC | MSR_VSX)@h |
| 645 | mtmsrd r5 |
| 646 | |
| 647 | /* |
| 648 | * The user may change these outside of a transaction, so they must |
| 649 | * always be context switched. |
| 650 | */ |
| 651 | ld r5, VCPU_TFHAR(r4) |
| 652 | ld r6, VCPU_TFIAR(r4) |
| 653 | ld r7, VCPU_TEXASR(r4) |
| 654 | mtspr SPRN_TFHAR, r5 |
| 655 | mtspr SPRN_TFIAR, r6 |
| 656 | mtspr SPRN_TEXASR, r7 |
| 657 | |
| 658 | ld r5, VCPU_MSR(r4) |
| 659 | rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 |
| 660 | beq skip_tm /* TM not active in guest */ |
| 661 | |
| 662 | /* Make sure the failure summary is set, otherwise we'll program check |
| 663 | * when we trechkpt. It's possible that this might have been not set |
| 664 | * on a kvmppc_set_one_reg() call but we shouldn't let this crash the |
| 665 | * host. |
| 666 | */ |
| 667 | oris r7, r7, (TEXASR_FS)@h |
| 668 | mtspr SPRN_TEXASR, r7 |
| 669 | |
| 670 | /* |
| 671 | * We need to load up the checkpointed state for the guest. |
| 672 | * We need to do this early as it will blow away any GPRs, VSRs and |
| 673 | * some SPRs. |
| 674 | */ |
| 675 | |
| 676 | mr r31, r4 |
| 677 | addi r3, r31, VCPU_FPRS_TM |
Alexander Graf | 9bf163f | 2014-06-16 14:41:15 +0200 | [diff] [blame] | 678 | bl load_fp_state |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 679 | addi r3, r31, VCPU_VRS_TM |
Alexander Graf | 9bf163f | 2014-06-16 14:41:15 +0200 | [diff] [blame] | 680 | bl load_vr_state |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 681 | mr r4, r31 |
| 682 | lwz r7, VCPU_VRSAVE_TM(r4) |
| 683 | mtspr SPRN_VRSAVE, r7 |
| 684 | |
| 685 | ld r5, VCPU_LR_TM(r4) |
| 686 | lwz r6, VCPU_CR_TM(r4) |
| 687 | ld r7, VCPU_CTR_TM(r4) |
| 688 | ld r8, VCPU_AMR_TM(r4) |
| 689 | ld r9, VCPU_TAR_TM(r4) |
| 690 | mtlr r5 |
| 691 | mtcr r6 |
| 692 | mtctr r7 |
| 693 | mtspr SPRN_AMR, r8 |
| 694 | mtspr SPRN_TAR, r9 |
| 695 | |
| 696 | /* |
| 697 | * Load up PPR and DSCR values but don't put them in the actual SPRs |
| 698 | * till the last moment to avoid running with userspace PPR and DSCR for |
| 699 | * too long. |
| 700 | */ |
| 701 | ld r29, VCPU_DSCR_TM(r4) |
| 702 | ld r30, VCPU_PPR_TM(r4) |
| 703 | |
| 704 | std r2, PACATMSCRATCH(r13) /* Save TOC */ |
| 705 | |
| 706 | /* Clear the MSR RI since r1, r13 are all going to be foobar. */ |
| 707 | li r5, 0 |
| 708 | mtmsrd r5, 1 |
| 709 | |
| 710 | /* Load GPRs r0-r28 */ |
| 711 | reg = 0 |
| 712 | .rept 29 |
| 713 | ld reg, VCPU_GPRS_TM(reg)(r31) |
| 714 | reg = reg + 1 |
| 715 | .endr |
| 716 | |
| 717 | mtspr SPRN_DSCR, r29 |
| 718 | mtspr SPRN_PPR, r30 |
| 719 | |
| 720 | /* Load final GPRs */ |
| 721 | ld 29, VCPU_GPRS_TM(29)(r31) |
| 722 | ld 30, VCPU_GPRS_TM(30)(r31) |
| 723 | ld 31, VCPU_GPRS_TM(31)(r31) |
| 724 | |
| 725 | /* TM checkpointed state is now setup. All GPRs are now volatile. */ |
| 726 | TRECHKPT |
| 727 | |
| 728 | /* Now let's get back the state we need. */ |
| 729 | HMT_MEDIUM |
| 730 | GET_PACA(r13) |
| 731 | ld r29, HSTATE_DSCR(r13) |
| 732 | mtspr SPRN_DSCR, r29 |
| 733 | ld r4, HSTATE_KVM_VCPU(r13) |
| 734 | ld r1, HSTATE_HOST_R1(r13) |
| 735 | ld r2, PACATMSCRATCH(r13) |
| 736 | |
| 737 | /* Set the MSR RI since we have our registers back. */ |
| 738 | li r5, MSR_RI |
| 739 | mtmsrd r5, 1 |
| 740 | skip_tm: |
| 741 | #endif |
| 742 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 743 | /* Load guest PMU registers */ |
| 744 | /* R4 is live here (vcpu pointer) */ |
| 745 | li r3, 1 |
| 746 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ |
| 747 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ |
| 748 | isync |
Paul Mackerras | 9bc01a9 | 2014-05-26 19:48:40 +1000 | [diff] [blame] | 749 | BEGIN_FTR_SECTION |
| 750 | ld r3, VCPU_MMCR(r4) |
| 751 | andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO |
| 752 | cmpwi r5, MMCR0_PMAO |
| 753 | beql kvmppc_fix_pmao |
| 754 | END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 755 | lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ |
| 756 | lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ |
| 757 | lwz r6, VCPU_PMC + 8(r4) |
| 758 | lwz r7, VCPU_PMC + 12(r4) |
| 759 | lwz r8, VCPU_PMC + 16(r4) |
| 760 | lwz r9, VCPU_PMC + 20(r4) |
| 761 | BEGIN_FTR_SECTION |
| 762 | lwz r10, VCPU_PMC + 24(r4) |
| 763 | lwz r11, VCPU_PMC + 28(r4) |
| 764 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) |
| 765 | mtspr SPRN_PMC1, r3 |
| 766 | mtspr SPRN_PMC2, r5 |
| 767 | mtspr SPRN_PMC3, r6 |
| 768 | mtspr SPRN_PMC4, r7 |
| 769 | mtspr SPRN_PMC5, r8 |
| 770 | mtspr SPRN_PMC6, r9 |
| 771 | BEGIN_FTR_SECTION |
| 772 | mtspr SPRN_PMC7, r10 |
| 773 | mtspr SPRN_PMC8, r11 |
| 774 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) |
| 775 | ld r3, VCPU_MMCR(r4) |
| 776 | ld r5, VCPU_MMCR + 8(r4) |
| 777 | ld r6, VCPU_MMCR + 16(r4) |
| 778 | ld r7, VCPU_SIAR(r4) |
| 779 | ld r8, VCPU_SDAR(r4) |
| 780 | mtspr SPRN_MMCR1, r5 |
| 781 | mtspr SPRN_MMCRA, r6 |
| 782 | mtspr SPRN_SIAR, r7 |
| 783 | mtspr SPRN_SDAR, r8 |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 784 | BEGIN_FTR_SECTION |
| 785 | ld r5, VCPU_MMCR + 24(r4) |
| 786 | ld r6, VCPU_SIER(r4) |
| 787 | lwz r7, VCPU_PMC + 24(r4) |
| 788 | lwz r8, VCPU_PMC + 28(r4) |
| 789 | ld r9, VCPU_MMCR + 32(r4) |
| 790 | mtspr SPRN_MMCR2, r5 |
| 791 | mtspr SPRN_SIER, r6 |
| 792 | mtspr SPRN_SPMC1, r7 |
| 793 | mtspr SPRN_SPMC2, r8 |
| 794 | mtspr SPRN_MMCRS, r9 |
| 795 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 796 | mtspr SPRN_MMCR0, r3 |
| 797 | isync |
| 798 | |
| 799 | /* Load up FP, VMX and VSX registers */ |
| 800 | bl kvmppc_load_fp |
| 801 | |
| 802 | ld r14, VCPU_GPR(R14)(r4) |
| 803 | ld r15, VCPU_GPR(R15)(r4) |
| 804 | ld r16, VCPU_GPR(R16)(r4) |
| 805 | ld r17, VCPU_GPR(R17)(r4) |
| 806 | ld r18, VCPU_GPR(R18)(r4) |
| 807 | ld r19, VCPU_GPR(R19)(r4) |
| 808 | ld r20, VCPU_GPR(R20)(r4) |
| 809 | ld r21, VCPU_GPR(R21)(r4) |
| 810 | ld r22, VCPU_GPR(R22)(r4) |
| 811 | ld r23, VCPU_GPR(R23)(r4) |
| 812 | ld r24, VCPU_GPR(R24)(r4) |
| 813 | ld r25, VCPU_GPR(R25)(r4) |
| 814 | ld r26, VCPU_GPR(R26)(r4) |
| 815 | ld r27, VCPU_GPR(R27)(r4) |
| 816 | ld r28, VCPU_GPR(R28)(r4) |
| 817 | ld r29, VCPU_GPR(R29)(r4) |
| 818 | ld r30, VCPU_GPR(R30)(r4) |
| 819 | ld r31, VCPU_GPR(R31)(r4) |
| 820 | |
| 821 | BEGIN_FTR_SECTION |
| 822 | /* Switch DSCR to guest value */ |
| 823 | ld r5, VCPU_DSCR(r4) |
| 824 | mtspr SPRN_DSCR, r5 |
| 825 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
| 826 | |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 827 | BEGIN_FTR_SECTION |
| 828 | /* Skip next section on POWER7 or PPC970 */ |
| 829 | b 8f |
| 830 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
| 831 | /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ |
| 832 | mfmsr r8 |
| 833 | li r0, 1 |
| 834 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG |
| 835 | mtmsrd r8 |
| 836 | |
| 837 | /* Load up POWER8-specific registers */ |
| 838 | ld r5, VCPU_IAMR(r4) |
| 839 | lwz r6, VCPU_PSPB(r4) |
| 840 | ld r7, VCPU_FSCR(r4) |
| 841 | mtspr SPRN_IAMR, r5 |
| 842 | mtspr SPRN_PSPB, r6 |
| 843 | mtspr SPRN_FSCR, r7 |
| 844 | ld r5, VCPU_DAWR(r4) |
| 845 | ld r6, VCPU_DAWRX(r4) |
| 846 | ld r7, VCPU_CIABR(r4) |
| 847 | ld r8, VCPU_TAR(r4) |
| 848 | mtspr SPRN_DAWR, r5 |
| 849 | mtspr SPRN_DAWRX, r6 |
| 850 | mtspr SPRN_CIABR, r7 |
| 851 | mtspr SPRN_TAR, r8 |
| 852 | ld r5, VCPU_IC(r4) |
| 853 | ld r6, VCPU_VTB(r4) |
| 854 | mtspr SPRN_IC, r5 |
| 855 | mtspr SPRN_VTB, r6 |
Michael Neuling | 7b49041 | 2014-01-08 21:25:32 +1100 | [diff] [blame] | 856 | ld r8, VCPU_EBBHR(r4) |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 857 | mtspr SPRN_EBBHR, r8 |
| 858 | ld r5, VCPU_EBBRR(r4) |
| 859 | ld r6, VCPU_BESCR(r4) |
| 860 | ld r7, VCPU_CSIGR(r4) |
| 861 | ld r8, VCPU_TACR(r4) |
| 862 | mtspr SPRN_EBBRR, r5 |
| 863 | mtspr SPRN_BESCR, r6 |
| 864 | mtspr SPRN_CSIGR, r7 |
| 865 | mtspr SPRN_TACR, r8 |
| 866 | ld r5, VCPU_TCSCR(r4) |
| 867 | ld r6, VCPU_ACOP(r4) |
| 868 | lwz r7, VCPU_GUEST_PID(r4) |
| 869 | ld r8, VCPU_WORT(r4) |
| 870 | mtspr SPRN_TCSCR, r5 |
| 871 | mtspr SPRN_ACOP, r6 |
| 872 | mtspr SPRN_PID, r7 |
| 873 | mtspr SPRN_WORT, r8 |
| 874 | 8: |
| 875 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 876 | /* |
| 877 | * Set the decrementer to the guest decrementer. |
| 878 | */ |
| 879 | ld r8,VCPU_DEC_EXPIRES(r4) |
Paul Mackerras | c5fb80d | 2014-03-25 10:47:07 +1100 | [diff] [blame] | 880 | /* r8 is a host timebase value here, convert to guest TB */ |
| 881 | ld r5,HSTATE_KVM_VCORE(r13) |
| 882 | ld r6,VCORE_TB_OFFSET(r5) |
| 883 | add r8,r8,r6 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 884 | mftb r7 |
| 885 | subf r3,r7,r8 |
| 886 | mtspr SPRN_DEC,r3 |
| 887 | stw r3,VCPU_DEC(r4) |
| 888 | |
| 889 | ld r5, VCPU_SPRG0(r4) |
| 890 | ld r6, VCPU_SPRG1(r4) |
| 891 | ld r7, VCPU_SPRG2(r4) |
| 892 | ld r8, VCPU_SPRG3(r4) |
| 893 | mtspr SPRN_SPRG0, r5 |
| 894 | mtspr SPRN_SPRG1, r6 |
| 895 | mtspr SPRN_SPRG2, r7 |
| 896 | mtspr SPRN_SPRG3, r8 |
| 897 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 898 | /* Load up DAR and DSISR */ |
| 899 | ld r5, VCPU_DAR(r4) |
| 900 | lwz r6, VCPU_DSISR(r4) |
| 901 | mtspr SPRN_DAR, r5 |
| 902 | mtspr SPRN_DSISR, r6 |
| 903 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 904 | BEGIN_FTR_SECTION |
| 905 | /* Restore AMR and UAMOR, set AMOR to all 1s */ |
| 906 | ld r5,VCPU_AMR(r4) |
| 907 | ld r6,VCPU_UAMOR(r4) |
| 908 | li r7,-1 |
| 909 | mtspr SPRN_AMR,r5 |
| 910 | mtspr SPRN_UAMOR,r6 |
| 911 | mtspr SPRN_AMOR,r7 |
| 912 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 913 | |
| 914 | /* Restore state of CTRL run bit; assume 1 on entry */ |
| 915 | lwz r5,VCPU_CTRL(r4) |
| 916 | andi. r5,r5,1 |
| 917 | bne 4f |
| 918 | mfspr r6,SPRN_CTRLF |
| 919 | clrrdi r6,r6,1 |
| 920 | mtspr SPRN_CTRLT,r6 |
| 921 | 4: |
| 922 | ld r6, VCPU_CTR(r4) |
| 923 | lwz r7, VCPU_XER(r4) |
| 924 | |
| 925 | mtctr r6 |
| 926 | mtxer r7 |
| 927 | |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 928 | kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 929 | ld r10, VCPU_PC(r4) |
| 930 | ld r11, VCPU_MSR(r4) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 931 | ld r6, VCPU_SRR0(r4) |
| 932 | ld r7, VCPU_SRR1(r4) |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 933 | mtspr SPRN_SRR0, r6 |
| 934 | mtspr SPRN_SRR1, r7 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 935 | |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 936 | deliver_guest_interrupt: |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 937 | /* r11 = vcpu->arch.msr & ~MSR_HV */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 938 | rldicl r11, r11, 63 - MSR_HV_LG, 1 |
| 939 | rotldi r11, r11, 1 + MSR_HV_LG |
| 940 | ori r11, r11, MSR_ME |
| 941 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 942 | /* Check if we can deliver an external or decrementer interrupt now */ |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 943 | ld r0, VCPU_PENDING_EXC(r4) |
| 944 | rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 |
| 945 | cmpdi cr1, r0, 0 |
| 946 | andi. r8, r11, MSR_EE |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 947 | BEGIN_FTR_SECTION |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 948 | mfspr r8, SPRN_LPCR |
| 949 | /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ |
| 950 | rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH |
| 951 | mtspr SPRN_LPCR, r8 |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 952 | isync |
| 953 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
| 954 | beq 5f |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 955 | li r0, BOOK3S_INTERRUPT_EXTERNAL |
| 956 | bne cr1, 12f |
| 957 | mfspr r0, SPRN_DEC |
| 958 | cmpwi r0, 0 |
| 959 | li r0, BOOK3S_INTERRUPT_DECREMENTER |
| 960 | bge 5f |
| 961 | |
| 962 | 12: mtspr SPRN_SRR0, r10 |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 963 | mr r10,r0 |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 964 | mtspr SPRN_SRR1, r11 |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 965 | mr r9, r4 |
| 966 | bl kvmppc_msr_interrupt |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 967 | 5: |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 968 | |
Liu Ping Fan | 27025a6 | 2013-11-19 14:12:48 +0800 | [diff] [blame] | 969 | /* |
| 970 | * Required state: |
| 971 | * R4 = vcpu |
| 972 | * R10: value for HSRR0 |
| 973 | * R11: value for HSRR1 |
| 974 | * R13 = PACA |
| 975 | */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 976 | fast_guest_return: |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 977 | li r0,0 |
| 978 | stb r0,VCPU_CEDED(r4) /* cancel cede */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 979 | mtspr SPRN_HSRR0,r10 |
| 980 | mtspr SPRN_HSRR1,r11 |
| 981 | |
| 982 | /* Activate guest mode, so faults get handled by KVM */ |
Paul Mackerras | 44a3add | 2013-10-04 21:45:04 +1000 | [diff] [blame] | 983 | li r9, KVM_GUEST_MODE_GUEST_HV |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 984 | stb r9, HSTATE_IN_GUEST(r13) |
| 985 | |
| 986 | /* Enter guest */ |
| 987 | |
Paul Mackerras | 0acb911 | 2013-02-04 18:10:51 +0000 | [diff] [blame] | 988 | BEGIN_FTR_SECTION |
| 989 | ld r5, VCPU_CFAR(r4) |
| 990 | mtspr SPRN_CFAR, r5 |
| 991 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) |
Paul Mackerras | 4b8473c | 2013-09-20 14:52:39 +1000 | [diff] [blame] | 992 | BEGIN_FTR_SECTION |
| 993 | ld r0, VCPU_PPR(r4) |
| 994 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
Paul Mackerras | 0acb911 | 2013-02-04 18:10:51 +0000 | [diff] [blame] | 995 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 996 | ld r5, VCPU_LR(r4) |
| 997 | lwz r6, VCPU_CR(r4) |
| 998 | mtlr r5 |
| 999 | mtcr r6 |
| 1000 | |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1001 | ld r1, VCPU_GPR(R1)(r4) |
| 1002 | ld r2, VCPU_GPR(R2)(r4) |
| 1003 | ld r3, VCPU_GPR(R3)(r4) |
| 1004 | ld r5, VCPU_GPR(R5)(r4) |
| 1005 | ld r6, VCPU_GPR(R6)(r4) |
| 1006 | ld r7, VCPU_GPR(R7)(r4) |
| 1007 | ld r8, VCPU_GPR(R8)(r4) |
| 1008 | ld r9, VCPU_GPR(R9)(r4) |
| 1009 | ld r10, VCPU_GPR(R10)(r4) |
| 1010 | ld r11, VCPU_GPR(R11)(r4) |
| 1011 | ld r12, VCPU_GPR(R12)(r4) |
| 1012 | ld r13, VCPU_GPR(R13)(r4) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1013 | |
Paul Mackerras | 4b8473c | 2013-09-20 14:52:39 +1000 | [diff] [blame] | 1014 | BEGIN_FTR_SECTION |
| 1015 | mtspr SPRN_PPR, r0 |
| 1016 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
| 1017 | ld r0, VCPU_GPR(R0)(r4) |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1018 | ld r4, VCPU_GPR(R4)(r4) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1019 | |
| 1020 | hrfid |
| 1021 | b . |
| 1022 | |
| 1023 | /****************************************************************************** |
| 1024 | * * |
| 1025 | * Exit code * |
| 1026 | * * |
| 1027 | *****************************************************************************/ |
| 1028 | |
| 1029 | /* |
| 1030 | * We come here from the first-level interrupt handlers. |
| 1031 | */ |
Aneesh Kumar K.V | dd96b2c | 2013-10-07 22:17:55 +0530 | [diff] [blame] | 1032 | .globl kvmppc_interrupt_hv |
| 1033 | kvmppc_interrupt_hv: |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1034 | /* |
| 1035 | * Register contents: |
| 1036 | * R12 = interrupt vector |
| 1037 | * R13 = PACA |
| 1038 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 |
| 1039 | * guest R13 saved in SPRN_SCRATCH0 |
| 1040 | */ |
Aneesh Kumar K.V | 36e7bb3 | 2013-11-11 19:29:47 +0530 | [diff] [blame] | 1041 | std r9, HSTATE_SCRATCH2(r13) |
Paul Mackerras | 44a3add | 2013-10-04 21:45:04 +1000 | [diff] [blame] | 1042 | |
| 1043 | lbz r9, HSTATE_IN_GUEST(r13) |
| 1044 | cmpwi r9, KVM_GUEST_MODE_HOST_HV |
| 1045 | beq kvmppc_bad_host_intr |
Aneesh Kumar K.V | dd96b2c | 2013-10-07 22:17:55 +0530 | [diff] [blame] | 1046 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
| 1047 | cmpwi r9, KVM_GUEST_MODE_GUEST |
Aneesh Kumar K.V | 36e7bb3 | 2013-11-11 19:29:47 +0530 | [diff] [blame] | 1048 | ld r9, HSTATE_SCRATCH2(r13) |
Aneesh Kumar K.V | dd96b2c | 2013-10-07 22:17:55 +0530 | [diff] [blame] | 1049 | beq kvmppc_interrupt_pr |
| 1050 | #endif |
Paul Mackerras | 44a3add | 2013-10-04 21:45:04 +1000 | [diff] [blame] | 1051 | /* We're now back in the host but in guest MMU context */ |
| 1052 | li r9, KVM_GUEST_MODE_HOST_HV |
| 1053 | stb r9, HSTATE_IN_GUEST(r13) |
| 1054 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1055 | ld r9, HSTATE_KVM_VCPU(r13) |
| 1056 | |
| 1057 | /* Save registers */ |
| 1058 | |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1059 | std r0, VCPU_GPR(R0)(r9) |
| 1060 | std r1, VCPU_GPR(R1)(r9) |
| 1061 | std r2, VCPU_GPR(R2)(r9) |
| 1062 | std r3, VCPU_GPR(R3)(r9) |
| 1063 | std r4, VCPU_GPR(R4)(r9) |
| 1064 | std r5, VCPU_GPR(R5)(r9) |
| 1065 | std r6, VCPU_GPR(R6)(r9) |
| 1066 | std r7, VCPU_GPR(R7)(r9) |
| 1067 | std r8, VCPU_GPR(R8)(r9) |
Aneesh Kumar K.V | 36e7bb3 | 2013-11-11 19:29:47 +0530 | [diff] [blame] | 1068 | ld r0, HSTATE_SCRATCH2(r13) |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1069 | std r0, VCPU_GPR(R9)(r9) |
| 1070 | std r10, VCPU_GPR(R10)(r9) |
| 1071 | std r11, VCPU_GPR(R11)(r9) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1072 | ld r3, HSTATE_SCRATCH0(r13) |
| 1073 | lwz r4, HSTATE_SCRATCH1(r13) |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1074 | std r3, VCPU_GPR(R12)(r9) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1075 | stw r4, VCPU_CR(r9) |
Paul Mackerras | 0acb911 | 2013-02-04 18:10:51 +0000 | [diff] [blame] | 1076 | BEGIN_FTR_SECTION |
| 1077 | ld r3, HSTATE_CFAR(r13) |
| 1078 | std r3, VCPU_CFAR(r9) |
| 1079 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) |
Paul Mackerras | 4b8473c | 2013-09-20 14:52:39 +1000 | [diff] [blame] | 1080 | BEGIN_FTR_SECTION |
| 1081 | ld r4, HSTATE_PPR(r13) |
| 1082 | std r4, VCPU_PPR(r9) |
| 1083 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1084 | |
| 1085 | /* Restore R1/R2 so we can handle faults */ |
| 1086 | ld r1, HSTATE_HOST_R1(r13) |
| 1087 | ld r2, PACATOC(r13) |
| 1088 | |
| 1089 | mfspr r10, SPRN_SRR0 |
| 1090 | mfspr r11, SPRN_SRR1 |
| 1091 | std r10, VCPU_SRR0(r9) |
| 1092 | std r11, VCPU_SRR1(r9) |
| 1093 | andi. r0, r12, 2 /* need to read HSRR0/1? */ |
| 1094 | beq 1f |
| 1095 | mfspr r10, SPRN_HSRR0 |
| 1096 | mfspr r11, SPRN_HSRR1 |
| 1097 | clrrdi r12, r12, 2 |
| 1098 | 1: std r10, VCPU_PC(r9) |
| 1099 | std r11, VCPU_MSR(r9) |
| 1100 | |
| 1101 | GET_SCRATCH0(r3) |
| 1102 | mflr r4 |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1103 | std r3, VCPU_GPR(R13)(r9) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1104 | std r4, VCPU_LR(r9) |
| 1105 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1106 | stw r12,VCPU_TRAP(r9) |
| 1107 | |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1108 | /* Save HEIR (HV emulation assist reg) in last_inst |
| 1109 | if this is an HEI (HV emulation interrupt, e40) */ |
| 1110 | li r3,KVM_INST_FETCH_FAILED |
| 1111 | BEGIN_FTR_SECTION |
| 1112 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST |
| 1113 | bne 11f |
| 1114 | mfspr r3,SPRN_HEIR |
| 1115 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
| 1116 | 11: stw r3,VCPU_LAST_INST(r9) |
| 1117 | |
| 1118 | /* these are volatile across C function calls */ |
| 1119 | mfctr r3 |
| 1120 | mfxer r4 |
| 1121 | std r3, VCPU_CTR(r9) |
| 1122 | stw r4, VCPU_XER(r9) |
| 1123 | |
| 1124 | BEGIN_FTR_SECTION |
| 1125 | /* If this is a page table miss then see if it's theirs or ours */ |
| 1126 | cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE |
| 1127 | beq kvmppc_hdsi |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1128 | cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE |
| 1129 | beq kvmppc_hisi |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1130 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
| 1131 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1132 | /* See if this is a leftover HDEC interrupt */ |
| 1133 | cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER |
| 1134 | bne 2f |
| 1135 | mfspr r3,SPRN_HDEC |
| 1136 | cmpwi r3,0 |
| 1137 | bge ignore_hdec |
| 1138 | 2: |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1139 | /* See if this is an hcall we can handle in real mode */ |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1140 | cmpwi r12,BOOK3S_INTERRUPT_SYSCALL |
| 1141 | beq hcall_try_real_mode |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1142 | |
Benjamin Herrenschmidt | 54695c3 | 2013-04-17 20:30:50 +0000 | [diff] [blame] | 1143 | /* Only handle external interrupts here on arch 206 and later */ |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 1144 | BEGIN_FTR_SECTION |
Benjamin Herrenschmidt | 54695c3 | 2013-04-17 20:30:50 +0000 | [diff] [blame] | 1145 | b ext_interrupt_to_host |
| 1146 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) |
| 1147 | |
| 1148 | /* External interrupt ? */ |
| 1149 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL |
| 1150 | bne+ ext_interrupt_to_host |
| 1151 | |
| 1152 | /* External interrupt, first check for host_ipi. If this is |
| 1153 | * set, we know the host wants us out so let's do it now |
| 1154 | */ |
Paul Mackerras | c934243 | 2013-09-06 13:24:13 +1000 | [diff] [blame] | 1155 | bl kvmppc_read_intr |
| 1156 | cmpdi r3, 0 |
| 1157 | bgt ext_interrupt_to_host |
Benjamin Herrenschmidt | 54695c3 | 2013-04-17 20:30:50 +0000 | [diff] [blame] | 1158 | |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 1159 | /* Check if any CPU is heading out to the host, if so head out too */ |
| 1160 | ld r5, HSTATE_KVM_VCORE(r13) |
| 1161 | lwz r0, VCORE_ENTRY_EXIT(r5) |
| 1162 | cmpwi r0, 0x100 |
| 1163 | bge ext_interrupt_to_host |
| 1164 | |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 1165 | /* Return to guest after delivering any pending interrupt */ |
| 1166 | mr r4, r9 |
| 1167 | b deliver_guest_interrupt |
Benjamin Herrenschmidt | 54695c3 | 2013-04-17 20:30:50 +0000 | [diff] [blame] | 1168 | |
Benjamin Herrenschmidt | 54695c3 | 2013-04-17 20:30:50 +0000 | [diff] [blame] | 1169 | ext_interrupt_to_host: |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1170 | |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 1171 | guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1172 | /* Save more register state */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1173 | mfdar r6 |
| 1174 | mfdsisr r7 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1175 | std r6, VCPU_DAR(r9) |
| 1176 | stw r7, VCPU_DSISR(r9) |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 1177 | BEGIN_FTR_SECTION |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1178 | /* don't overwrite fault_dar/fault_dsisr if HDSI */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1179 | cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE |
| 1180 | beq 6f |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 1181 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1182 | std r6, VCPU_FAULT_DAR(r9) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1183 | stw r7, VCPU_FAULT_DSISR(r9) |
| 1184 | |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 1185 | /* See if it is a machine check */ |
| 1186 | cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK |
| 1187 | beq machine_check_realmode |
| 1188 | mc_cont: |
| 1189 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1190 | /* Save guest CTRL register, set runlatch to 1 */ |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1191 | 6: mfspr r6,SPRN_CTRLF |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1192 | stw r6,VCPU_CTRL(r9) |
| 1193 | andi. r0,r6,1 |
| 1194 | bne 4f |
| 1195 | ori r6,r6,1 |
| 1196 | mtspr SPRN_CTRLT,r6 |
| 1197 | 4: |
| 1198 | /* Read the guest SLB and save it away */ |
| 1199 | lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ |
| 1200 | mtctr r0 |
| 1201 | li r6,0 |
| 1202 | addi r7,r9,VCPU_SLB |
| 1203 | li r5,0 |
| 1204 | 1: slbmfee r8,r6 |
| 1205 | andis. r0,r8,SLB_ESID_V@h |
| 1206 | beq 2f |
| 1207 | add r8,r8,r6 /* put index in */ |
| 1208 | slbmfev r3,r6 |
| 1209 | std r8,VCPU_SLB_E(r7) |
| 1210 | std r3,VCPU_SLB_V(r7) |
| 1211 | addi r7,r7,VCPU_SLB_SIZE |
| 1212 | addi r5,r5,1 |
| 1213 | 2: addi r6,r6,1 |
| 1214 | bdnz 1b |
| 1215 | stw r5,VCPU_SLB_MAX(r9) |
| 1216 | |
| 1217 | /* |
| 1218 | * Save the guest PURR/SPURR |
| 1219 | */ |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 1220 | BEGIN_FTR_SECTION |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1221 | mfspr r5,SPRN_PURR |
| 1222 | mfspr r6,SPRN_SPURR |
| 1223 | ld r7,VCPU_PURR(r9) |
| 1224 | ld r8,VCPU_SPURR(r9) |
| 1225 | std r5,VCPU_PURR(r9) |
| 1226 | std r6,VCPU_SPURR(r9) |
| 1227 | subf r5,r7,r5 |
| 1228 | subf r6,r8,r6 |
| 1229 | |
| 1230 | /* |
| 1231 | * Restore host PURR/SPURR and add guest times |
| 1232 | * so that the time in the guest gets accounted. |
| 1233 | */ |
| 1234 | ld r3,HSTATE_PURR(r13) |
| 1235 | ld r4,HSTATE_SPURR(r13) |
| 1236 | add r3,r3,r5 |
| 1237 | add r4,r4,r6 |
| 1238 | mtspr SPRN_PURR,r3 |
| 1239 | mtspr SPRN_SPURR,r4 |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 1240 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1241 | |
Paul Mackerras | 93b0f4d | 2013-09-06 13:17:46 +1000 | [diff] [blame] | 1242 | /* Save DEC */ |
| 1243 | mfspr r5,SPRN_DEC |
| 1244 | mftb r6 |
| 1245 | extsw r5,r5 |
| 1246 | add r5,r5,r6 |
Paul Mackerras | c5fb80d | 2014-03-25 10:47:07 +1100 | [diff] [blame] | 1247 | /* r5 is a guest timebase value here, convert to host TB */ |
| 1248 | ld r3,HSTATE_KVM_VCORE(r13) |
| 1249 | ld r4,VCORE_TB_OFFSET(r3) |
| 1250 | subf r5,r4,r5 |
Paul Mackerras | 93b0f4d | 2013-09-06 13:17:46 +1000 | [diff] [blame] | 1251 | std r5,VCPU_DEC_EXPIRES(r9) |
| 1252 | |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1253 | BEGIN_FTR_SECTION |
| 1254 | b 8f |
| 1255 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1256 | /* Save POWER8-specific registers */ |
| 1257 | mfspr r5, SPRN_IAMR |
| 1258 | mfspr r6, SPRN_PSPB |
| 1259 | mfspr r7, SPRN_FSCR |
| 1260 | std r5, VCPU_IAMR(r9) |
| 1261 | stw r6, VCPU_PSPB(r9) |
| 1262 | std r7, VCPU_FSCR(r9) |
| 1263 | mfspr r5, SPRN_IC |
| 1264 | mfspr r6, SPRN_VTB |
| 1265 | mfspr r7, SPRN_TAR |
| 1266 | std r5, VCPU_IC(r9) |
| 1267 | std r6, VCPU_VTB(r9) |
| 1268 | std r7, VCPU_TAR(r9) |
Michael Neuling | 7b49041 | 2014-01-08 21:25:32 +1100 | [diff] [blame] | 1269 | mfspr r8, SPRN_EBBHR |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1270 | std r8, VCPU_EBBHR(r9) |
| 1271 | mfspr r5, SPRN_EBBRR |
| 1272 | mfspr r6, SPRN_BESCR |
| 1273 | mfspr r7, SPRN_CSIGR |
| 1274 | mfspr r8, SPRN_TACR |
| 1275 | std r5, VCPU_EBBRR(r9) |
| 1276 | std r6, VCPU_BESCR(r9) |
| 1277 | std r7, VCPU_CSIGR(r9) |
| 1278 | std r8, VCPU_TACR(r9) |
| 1279 | mfspr r5, SPRN_TCSCR |
| 1280 | mfspr r6, SPRN_ACOP |
| 1281 | mfspr r7, SPRN_PID |
| 1282 | mfspr r8, SPRN_WORT |
| 1283 | std r5, VCPU_TCSCR(r9) |
| 1284 | std r6, VCPU_ACOP(r9) |
| 1285 | stw r7, VCPU_GUEST_PID(r9) |
| 1286 | std r8, VCPU_WORT(r9) |
| 1287 | 8: |
| 1288 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1289 | /* Save and reset AMR and UAMOR before turning on the MMU */ |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 1290 | BEGIN_FTR_SECTION |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1291 | mfspr r5,SPRN_AMR |
| 1292 | mfspr r6,SPRN_UAMOR |
| 1293 | std r5,VCPU_AMR(r9) |
| 1294 | std r6,VCPU_UAMOR(r9) |
| 1295 | li r6,0 |
| 1296 | mtspr SPRN_AMR,r6 |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 1297 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1298 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1299 | /* Switch DSCR back to host value */ |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 1300 | BEGIN_FTR_SECTION |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1301 | mfspr r8, SPRN_DSCR |
| 1302 | ld r7, HSTATE_DSCR(r13) |
Paul Mackerras | cfc8602 | 2013-09-21 09:53:28 +1000 | [diff] [blame] | 1303 | std r8, VCPU_DSCR(r9) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1304 | mtspr SPRN_DSCR, r7 |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 1305 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1306 | |
| 1307 | /* Save non-volatile GPRs */ |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1308 | std r14, VCPU_GPR(R14)(r9) |
| 1309 | std r15, VCPU_GPR(R15)(r9) |
| 1310 | std r16, VCPU_GPR(R16)(r9) |
| 1311 | std r17, VCPU_GPR(R17)(r9) |
| 1312 | std r18, VCPU_GPR(R18)(r9) |
| 1313 | std r19, VCPU_GPR(R19)(r9) |
| 1314 | std r20, VCPU_GPR(R20)(r9) |
| 1315 | std r21, VCPU_GPR(R21)(r9) |
| 1316 | std r22, VCPU_GPR(R22)(r9) |
| 1317 | std r23, VCPU_GPR(R23)(r9) |
| 1318 | std r24, VCPU_GPR(R24)(r9) |
| 1319 | std r25, VCPU_GPR(R25)(r9) |
| 1320 | std r26, VCPU_GPR(R26)(r9) |
| 1321 | std r27, VCPU_GPR(R27)(r9) |
| 1322 | std r28, VCPU_GPR(R28)(r9) |
| 1323 | std r29, VCPU_GPR(R29)(r9) |
| 1324 | std r30, VCPU_GPR(R30)(r9) |
| 1325 | std r31, VCPU_GPR(R31)(r9) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1326 | |
| 1327 | /* Save SPRGs */ |
| 1328 | mfspr r3, SPRN_SPRG0 |
| 1329 | mfspr r4, SPRN_SPRG1 |
| 1330 | mfspr r5, SPRN_SPRG2 |
| 1331 | mfspr r6, SPRN_SPRG3 |
| 1332 | std r3, VCPU_SPRG0(r9) |
| 1333 | std r4, VCPU_SPRG1(r9) |
| 1334 | std r5, VCPU_SPRG2(r9) |
| 1335 | std r6, VCPU_SPRG3(r9) |
| 1336 | |
Paul Mackerras | 8943633 | 2012-03-02 01:38:23 +0000 | [diff] [blame] | 1337 | /* save FP state */ |
| 1338 | mr r3, r9 |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 1339 | bl kvmppc_save_fp |
Paul Mackerras | 8943633 | 2012-03-02 01:38:23 +0000 | [diff] [blame] | 1340 | |
Paul Mackerras | 0a8ecce | 2014-04-14 08:56:26 +1000 | [diff] [blame] | 1341 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 1342 | BEGIN_FTR_SECTION |
| 1343 | b 2f |
| 1344 | END_FTR_SECTION_IFCLR(CPU_FTR_TM) |
| 1345 | /* Turn on TM. */ |
| 1346 | mfmsr r8 |
| 1347 | li r0, 1 |
| 1348 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG |
| 1349 | mtmsrd r8 |
| 1350 | |
| 1351 | ld r5, VCPU_MSR(r9) |
| 1352 | rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 |
| 1353 | beq 1f /* TM not active in guest. */ |
| 1354 | |
| 1355 | li r3, TM_CAUSE_KVM_RESCHED |
| 1356 | |
| 1357 | /* Clear the MSR RI since r1, r13 are all going to be foobar. */ |
| 1358 | li r5, 0 |
| 1359 | mtmsrd r5, 1 |
| 1360 | |
| 1361 | /* All GPRs are volatile at this point. */ |
| 1362 | TRECLAIM(R3) |
| 1363 | |
| 1364 | /* Temporarily store r13 and r9 so we have some regs to play with */ |
| 1365 | SET_SCRATCH0(r13) |
| 1366 | GET_PACA(r13) |
| 1367 | std r9, PACATMSCRATCH(r13) |
| 1368 | ld r9, HSTATE_KVM_VCPU(r13) |
| 1369 | |
| 1370 | /* Get a few more GPRs free. */ |
| 1371 | std r29, VCPU_GPRS_TM(29)(r9) |
| 1372 | std r30, VCPU_GPRS_TM(30)(r9) |
| 1373 | std r31, VCPU_GPRS_TM(31)(r9) |
| 1374 | |
| 1375 | /* Save away PPR and DSCR soon so don't run with user values. */ |
| 1376 | mfspr r31, SPRN_PPR |
| 1377 | HMT_MEDIUM |
| 1378 | mfspr r30, SPRN_DSCR |
| 1379 | ld r29, HSTATE_DSCR(r13) |
| 1380 | mtspr SPRN_DSCR, r29 |
| 1381 | |
| 1382 | /* Save all but r9, r13 & r29-r31 */ |
| 1383 | reg = 0 |
| 1384 | .rept 29 |
| 1385 | .if (reg != 9) && (reg != 13) |
| 1386 | std reg, VCPU_GPRS_TM(reg)(r9) |
| 1387 | .endif |
| 1388 | reg = reg + 1 |
| 1389 | .endr |
| 1390 | /* ... now save r13 */ |
| 1391 | GET_SCRATCH0(r4) |
| 1392 | std r4, VCPU_GPRS_TM(13)(r9) |
| 1393 | /* ... and save r9 */ |
| 1394 | ld r4, PACATMSCRATCH(r13) |
| 1395 | std r4, VCPU_GPRS_TM(9)(r9) |
| 1396 | |
| 1397 | /* Reload stack pointer and TOC. */ |
| 1398 | ld r1, HSTATE_HOST_R1(r13) |
| 1399 | ld r2, PACATOC(r13) |
| 1400 | |
| 1401 | /* Set MSR RI now we have r1 and r13 back. */ |
| 1402 | li r5, MSR_RI |
| 1403 | mtmsrd r5, 1 |
| 1404 | |
| 1405 | /* Save away checkpinted SPRs. */ |
| 1406 | std r31, VCPU_PPR_TM(r9) |
| 1407 | std r30, VCPU_DSCR_TM(r9) |
| 1408 | mflr r5 |
| 1409 | mfcr r6 |
| 1410 | mfctr r7 |
| 1411 | mfspr r8, SPRN_AMR |
| 1412 | mfspr r10, SPRN_TAR |
| 1413 | std r5, VCPU_LR_TM(r9) |
| 1414 | stw r6, VCPU_CR_TM(r9) |
| 1415 | std r7, VCPU_CTR_TM(r9) |
| 1416 | std r8, VCPU_AMR_TM(r9) |
| 1417 | std r10, VCPU_TAR_TM(r9) |
| 1418 | |
| 1419 | /* Restore r12 as trap number. */ |
| 1420 | lwz r12, VCPU_TRAP(r9) |
| 1421 | |
| 1422 | /* Save FP/VSX. */ |
| 1423 | addi r3, r9, VCPU_FPRS_TM |
Alexander Graf | 9bf163f | 2014-06-16 14:41:15 +0200 | [diff] [blame] | 1424 | bl store_fp_state |
Paul Mackerras | 0a8ecce | 2014-04-14 08:56:26 +1000 | [diff] [blame] | 1425 | addi r3, r9, VCPU_VRS_TM |
Alexander Graf | 9bf163f | 2014-06-16 14:41:15 +0200 | [diff] [blame] | 1426 | bl store_vr_state |
Paul Mackerras | 0a8ecce | 2014-04-14 08:56:26 +1000 | [diff] [blame] | 1427 | mfspr r6, SPRN_VRSAVE |
| 1428 | stw r6, VCPU_VRSAVE_TM(r9) |
| 1429 | 1: |
| 1430 | /* |
| 1431 | * We need to save these SPRs after the treclaim so that the software |
| 1432 | * error code is recorded correctly in the TEXASR. Also the user may |
| 1433 | * change these outside of a transaction, so they must always be |
| 1434 | * context switched. |
| 1435 | */ |
| 1436 | mfspr r5, SPRN_TFHAR |
| 1437 | mfspr r6, SPRN_TFIAR |
| 1438 | mfspr r7, SPRN_TEXASR |
| 1439 | std r5, VCPU_TFHAR(r9) |
| 1440 | std r6, VCPU_TFIAR(r9) |
| 1441 | std r7, VCPU_TEXASR(r9) |
| 1442 | 2: |
| 1443 | #endif |
| 1444 | |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1445 | /* Increment yield count if they have a VPA */ |
| 1446 | ld r8, VCPU_VPA(r9) /* do they have a VPA? */ |
| 1447 | cmpdi r8, 0 |
| 1448 | beq 25f |
Alexander Graf | 0865a58 | 2014-06-11 10:36:17 +0200 | [diff] [blame] | 1449 | li r4, LPPACA_YIELDCOUNT |
| 1450 | LWZX_BE r3, r8, r4 |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1451 | addi r3, r3, 1 |
Alexander Graf | 0865a58 | 2014-06-11 10:36:17 +0200 | [diff] [blame] | 1452 | STWX_BE r3, r8, r4 |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 1453 | li r3, 1 |
| 1454 | stb r3, VCPU_VPA_DIRTY(r9) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1455 | 25: |
| 1456 | /* Save PMU registers if requested */ |
| 1457 | /* r8 and cr0.eq are live here */ |
Paul Mackerras | 9bc01a9 | 2014-05-26 19:48:40 +1000 | [diff] [blame] | 1458 | BEGIN_FTR_SECTION |
| 1459 | /* |
| 1460 | * POWER8 seems to have a hardware bug where setting |
| 1461 | * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] |
| 1462 | * when some counters are already negative doesn't seem |
| 1463 | * to cause a performance monitor alert (and hence interrupt). |
| 1464 | * The effect of this is that when saving the PMU state, |
| 1465 | * if there is no PMU alert pending when we read MMCR0 |
| 1466 | * before freezing the counters, but one becomes pending |
| 1467 | * before we read the counters, we lose it. |
| 1468 | * To work around this, we need a way to freeze the counters |
| 1469 | * before reading MMCR0. Normally, freezing the counters |
| 1470 | * is done by writing MMCR0 (to set MMCR0[FC]) which |
| 1471 | * unavoidably writes MMCR0[PMA0] as well. On POWER8, |
| 1472 | * we can also freeze the counters using MMCR2, by writing |
| 1473 | * 1s to all the counter freeze condition bits (there are |
| 1474 | * 9 bits each for 6 counters). |
| 1475 | */ |
| 1476 | li r3, -1 /* set all freeze bits */ |
| 1477 | clrrdi r3, r3, 10 |
| 1478 | mfspr r10, SPRN_MMCR2 |
| 1479 | mtspr SPRN_MMCR2, r3 |
| 1480 | isync |
| 1481 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1482 | li r3, 1 |
| 1483 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ |
| 1484 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ |
| 1485 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ |
Paul Mackerras | 8943633 | 2012-03-02 01:38:23 +0000 | [diff] [blame] | 1486 | mfspr r6, SPRN_MMCRA |
| 1487 | BEGIN_FTR_SECTION |
| 1488 | /* On P7, clear MMCRA in order to disable SDAR updates */ |
| 1489 | li r7, 0 |
| 1490 | mtspr SPRN_MMCRA, r7 |
| 1491 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1492 | isync |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1493 | beq 21f /* if no VPA, save PMU stuff anyway */ |
| 1494 | lbz r7, LPPACA_PMCINUSE(r8) |
| 1495 | cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ |
| 1496 | bne 21f |
| 1497 | std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ |
| 1498 | b 22f |
| 1499 | 21: mfspr r5, SPRN_MMCR1 |
Paul Mackerras | 1494178 | 2013-09-06 13:11:18 +1000 | [diff] [blame] | 1500 | mfspr r7, SPRN_SIAR |
| 1501 | mfspr r8, SPRN_SDAR |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1502 | std r4, VCPU_MMCR(r9) |
| 1503 | std r5, VCPU_MMCR + 8(r9) |
| 1504 | std r6, VCPU_MMCR + 16(r9) |
Paul Mackerras | 9bc01a9 | 2014-05-26 19:48:40 +1000 | [diff] [blame] | 1505 | BEGIN_FTR_SECTION |
| 1506 | std r10, VCPU_MMCR + 24(r9) |
| 1507 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
Paul Mackerras | 1494178 | 2013-09-06 13:11:18 +1000 | [diff] [blame] | 1508 | std r7, VCPU_SIAR(r9) |
| 1509 | std r8, VCPU_SDAR(r9) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1510 | mfspr r3, SPRN_PMC1 |
| 1511 | mfspr r4, SPRN_PMC2 |
| 1512 | mfspr r5, SPRN_PMC3 |
| 1513 | mfspr r6, SPRN_PMC4 |
| 1514 | mfspr r7, SPRN_PMC5 |
| 1515 | mfspr r8, SPRN_PMC6 |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 1516 | BEGIN_FTR_SECTION |
| 1517 | mfspr r10, SPRN_PMC7 |
| 1518 | mfspr r11, SPRN_PMC8 |
| 1519 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1520 | stw r3, VCPU_PMC(r9) |
| 1521 | stw r4, VCPU_PMC + 4(r9) |
| 1522 | stw r5, VCPU_PMC + 8(r9) |
| 1523 | stw r6, VCPU_PMC + 12(r9) |
| 1524 | stw r7, VCPU_PMC + 16(r9) |
| 1525 | stw r8, VCPU_PMC + 20(r9) |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 1526 | BEGIN_FTR_SECTION |
| 1527 | stw r10, VCPU_PMC + 24(r9) |
| 1528 | stw r11, VCPU_PMC + 28(r9) |
| 1529 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1530 | BEGIN_FTR_SECTION |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1531 | mfspr r5, SPRN_SIER |
| 1532 | mfspr r6, SPRN_SPMC1 |
| 1533 | mfspr r7, SPRN_SPMC2 |
| 1534 | mfspr r8, SPRN_MMCRS |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1535 | std r5, VCPU_SIER(r9) |
| 1536 | stw r6, VCPU_PMC + 24(r9) |
| 1537 | stw r7, VCPU_PMC + 28(r9) |
| 1538 | std r8, VCPU_MMCR + 32(r9) |
| 1539 | lis r4, 0x8000 |
| 1540 | mtspr SPRN_MMCRS, r4 |
| 1541 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1542 | 22: |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1543 | /* Clear out SLB */ |
| 1544 | li r5,0 |
| 1545 | slbmte r5,r5 |
| 1546 | slbia |
| 1547 | ptesync |
| 1548 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1549 | hdec_soon: /* r12 = trap, r13 = paca */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1550 | BEGIN_FTR_SECTION |
| 1551 | b 32f |
| 1552 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) |
| 1553 | /* |
| 1554 | * POWER7 guest -> host partition switch code. |
| 1555 | * We don't have to lock against tlbies but we do |
| 1556 | * have to coordinate the hardware threads. |
| 1557 | */ |
| 1558 | /* Increment the threads-exiting-guest count in the 0xff00 |
| 1559 | bits of vcore->entry_exit_count */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1560 | ld r5,HSTATE_KVM_VCORE(r13) |
| 1561 | addi r6,r5,VCORE_ENTRY_EXIT |
| 1562 | 41: lwarx r3,0,r6 |
| 1563 | addi r0,r3,0x100 |
| 1564 | stwcx. r0,0,r6 |
| 1565 | bne 41b |
Paul Mackerras | f019b7a | 2013-11-16 17:46:03 +1100 | [diff] [blame] | 1566 | isync /* order stwcx. vs. reading napping_threads */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1567 | |
| 1568 | /* |
| 1569 | * At this point we have an interrupt that we have to pass |
| 1570 | * up to the kernel or qemu; we can't handle it in real mode. |
| 1571 | * Thus we have to do a partition switch, so we have to |
| 1572 | * collect the other threads, if we are the first thread |
| 1573 | * to take an interrupt. To do this, we set the HDEC to 0, |
| 1574 | * which causes an HDEC interrupt in all threads within 2ns |
| 1575 | * because the HDEC register is shared between all 4 threads. |
| 1576 | * However, we don't need to bother if this is an HDEC |
| 1577 | * interrupt, since the other threads will already be on their |
| 1578 | * way here in that case. |
| 1579 | */ |
| 1580 | cmpwi r3,0x100 /* Are we the first here? */ |
| 1581 | bge 43f |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1582 | cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER |
| 1583 | beq 40f |
| 1584 | li r0,0 |
| 1585 | mtspr SPRN_HDEC,r0 |
| 1586 | 40: |
| 1587 | /* |
| 1588 | * Send an IPI to any napping threads, since an HDEC interrupt |
| 1589 | * doesn't wake CPUs up from nap. |
| 1590 | */ |
| 1591 | lwz r3,VCORE_NAPPING_THREADS(r5) |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1592 | lbz r4,HSTATE_PTID(r13) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1593 | li r0,1 |
| 1594 | sld r0,r0,r4 |
| 1595 | andc. r3,r3,r0 /* no sense IPI'ing ourselves */ |
| 1596 | beq 43f |
Paul Mackerras | f019b7a | 2013-11-16 17:46:03 +1100 | [diff] [blame] | 1597 | /* Order entry/exit update vs. IPIs */ |
| 1598 | sync |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1599 | mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ |
| 1600 | subf r6,r4,r13 |
| 1601 | 42: andi. r0,r3,1 |
| 1602 | beq 44f |
| 1603 | ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ |
| 1604 | li r0,IPI_PRIORITY |
| 1605 | li r7,XICS_MFRR |
| 1606 | stbcix r0,r7,r8 /* trigger the IPI */ |
| 1607 | 44: srdi. r3,r3,1 |
| 1608 | addi r6,r6,PACA_SIZE |
| 1609 | bne 42b |
| 1610 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1611 | secondary_too_late: |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1612 | /* Secondary threads wait for primary to do partition switch */ |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1613 | 43: ld r5,HSTATE_KVM_VCORE(r13) |
| 1614 | ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ |
| 1615 | lbz r3,HSTATE_PTID(r13) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1616 | cmpwi r3,0 |
| 1617 | beq 15f |
| 1618 | HMT_LOW |
| 1619 | 13: lbz r3,VCORE_IN_GUEST(r5) |
| 1620 | cmpwi r3,0 |
| 1621 | bne 13b |
| 1622 | HMT_MEDIUM |
| 1623 | b 16f |
| 1624 | |
| 1625 | /* Primary thread waits for all the secondaries to exit guest */ |
| 1626 | 15: lwz r3,VCORE_ENTRY_EXIT(r5) |
| 1627 | srwi r0,r3,8 |
| 1628 | clrldi r3,r3,56 |
| 1629 | cmpw r3,r0 |
| 1630 | bne 15b |
| 1631 | isync |
| 1632 | |
| 1633 | /* Primary thread switches back to host partition */ |
| 1634 | ld r6,KVM_HOST_SDR1(r4) |
| 1635 | lwz r7,KVM_HOST_LPID(r4) |
| 1636 | li r8,LPID_RSVD /* switch to reserved LPID */ |
| 1637 | mtspr SPRN_LPID,r8 |
| 1638 | ptesync |
| 1639 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ |
| 1640 | mtspr SPRN_LPID,r7 |
| 1641 | isync |
| 1642 | |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1643 | BEGIN_FTR_SECTION |
| 1644 | /* DPDES is shared between threads */ |
| 1645 | mfspr r7, SPRN_DPDES |
| 1646 | std r7, VCORE_DPDES(r5) |
| 1647 | /* clear DPDES so we don't get guest doorbells in the host */ |
| 1648 | li r8, 0 |
| 1649 | mtspr SPRN_DPDES, r8 |
| 1650 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
| 1651 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1652 | /* Subtract timebase offset from timebase */ |
| 1653 | ld r8,VCORE_TB_OFFSET(r5) |
| 1654 | cmpdi r8,0 |
| 1655 | beq 17f |
Paul Mackerras | c5fb80d | 2014-03-25 10:47:07 +1100 | [diff] [blame] | 1656 | mftb r6 /* current guest timebase */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1657 | subf r8,r8,r6 |
| 1658 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ |
| 1659 | mftb r7 /* check if lower 24 bits overflowed */ |
| 1660 | clrldi r6,r6,40 |
| 1661 | clrldi r7,r7,40 |
| 1662 | cmpld r7,r6 |
| 1663 | bge 17f |
| 1664 | addis r8,r8,0x100 /* if so, increment upper 40 bits */ |
| 1665 | mtspr SPRN_TBU40,r8 |
| 1666 | |
| 1667 | /* Reset PCR */ |
| 1668 | 17: ld r0, VCORE_PCR(r5) |
| 1669 | cmpdi r0, 0 |
| 1670 | beq 18f |
| 1671 | li r0, 0 |
| 1672 | mtspr SPRN_PCR, r0 |
| 1673 | 18: |
| 1674 | /* Signal secondary CPUs to continue */ |
| 1675 | stb r0,VCORE_IN_GUEST(r5) |
| 1676 | lis r8,0x7fff /* MAX_INT@h */ |
| 1677 | mtspr SPRN_HDEC,r8 |
| 1678 | |
| 1679 | 16: ld r8,KVM_HOST_LPCR(r4) |
| 1680 | mtspr SPRN_LPCR,r8 |
| 1681 | isync |
| 1682 | b 33f |
| 1683 | |
| 1684 | /* |
| 1685 | * PPC970 guest -> host partition switch code. |
| 1686 | * We have to lock against concurrent tlbies, and |
| 1687 | * we have to flush the whole TLB. |
| 1688 | */ |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1689 | 32: ld r5,HSTATE_KVM_VCORE(r13) |
| 1690 | ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1691 | |
| 1692 | /* Take the guest's tlbie_lock */ |
| 1693 | #ifdef __BIG_ENDIAN__ |
| 1694 | lwz r8,PACA_LOCK_TOKEN(r13) |
| 1695 | #else |
| 1696 | lwz r8,PACAPACAINDEX(r13) |
| 1697 | #endif |
| 1698 | addi r3,r4,KVM_TLBIE_LOCK |
| 1699 | 24: lwarx r0,0,r3 |
| 1700 | cmpwi r0,0 |
| 1701 | bne 24b |
| 1702 | stwcx. r8,0,r3 |
| 1703 | bne 24b |
| 1704 | isync |
| 1705 | |
| 1706 | ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */ |
| 1707 | li r0,0x18f |
| 1708 | rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ |
| 1709 | or r0,r7,r0 |
| 1710 | ptesync |
| 1711 | sync |
| 1712 | mtspr SPRN_HID4,r0 /* switch to reserved LPID */ |
| 1713 | isync |
| 1714 | li r0,0 |
| 1715 | stw r0,0(r3) /* drop guest tlbie_lock */ |
| 1716 | |
| 1717 | /* invalidate the whole TLB */ |
| 1718 | li r0,256 |
| 1719 | mtctr r0 |
| 1720 | li r6,0 |
| 1721 | 25: tlbiel r6 |
| 1722 | addi r6,r6,0x1000 |
| 1723 | bdnz 25b |
| 1724 | ptesync |
| 1725 | |
| 1726 | /* take native_tlbie_lock */ |
| 1727 | ld r3,toc_tlbie_lock@toc(2) |
| 1728 | 24: lwarx r0,0,r3 |
| 1729 | cmpwi r0,0 |
| 1730 | bne 24b |
| 1731 | stwcx. r8,0,r3 |
| 1732 | bne 24b |
| 1733 | isync |
| 1734 | |
| 1735 | ld r6,KVM_HOST_SDR1(r4) |
| 1736 | mtspr SPRN_SDR1,r6 /* switch to host page table */ |
| 1737 | |
| 1738 | /* Set up host HID4 value */ |
| 1739 | sync |
| 1740 | mtspr SPRN_HID4,r7 |
| 1741 | isync |
| 1742 | li r0,0 |
| 1743 | stw r0,0(r3) /* drop native_tlbie_lock */ |
| 1744 | |
| 1745 | lis r8,0x7fff /* MAX_INT@h */ |
| 1746 | mtspr SPRN_HDEC,r8 |
| 1747 | |
| 1748 | /* Disable HDEC interrupts */ |
| 1749 | mfspr r0,SPRN_HID0 |
| 1750 | li r3,0 |
| 1751 | rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 |
| 1752 | sync |
| 1753 | mtspr SPRN_HID0,r0 |
| 1754 | mfspr r0,SPRN_HID0 |
| 1755 | mfspr r0,SPRN_HID0 |
| 1756 | mfspr r0,SPRN_HID0 |
| 1757 | mfspr r0,SPRN_HID0 |
| 1758 | mfspr r0,SPRN_HID0 |
| 1759 | mfspr r0,SPRN_HID0 |
| 1760 | |
| 1761 | /* load host SLB entries */ |
| 1762 | 33: ld r8,PACA_SLBSHADOWPTR(r13) |
| 1763 | |
| 1764 | .rept SLB_NUM_BOLTED |
Alexander Graf | 0865a58 | 2014-06-11 10:36:17 +0200 | [diff] [blame] | 1765 | li r3, SLBSHADOW_SAVEAREA |
| 1766 | LDX_BE r5, r8, r3 |
| 1767 | addi r3, r3, 8 |
| 1768 | LDX_BE r6, r8, r3 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1769 | andis. r7,r5,SLB_ESID_V@h |
| 1770 | beq 1f |
| 1771 | slbmte r6,r5 |
| 1772 | 1: addi r8,r8,16 |
| 1773 | .endr |
| 1774 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1775 | /* Unset guest mode */ |
| 1776 | li r0, KVM_GUEST_MODE_NONE |
| 1777 | stb r0, HSTATE_IN_GUEST(r13) |
| 1778 | |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 1779 | ld r0, 112+PPC_LR_STKOFF(r1) |
| 1780 | addi r1, r1, 112 |
| 1781 | mtlr r0 |
| 1782 | blr |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 1783 | |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1784 | /* |
| 1785 | * Check whether an HDSI is an HPTE not found fault or something else. |
| 1786 | * If it is an HPTE not found fault that is due to the guest accessing |
| 1787 | * a page that they have mapped but which we have paged out, then |
| 1788 | * we continue on with the guest exit path. In all other cases, |
| 1789 | * reflect the HDSI to the guest as a DSI. |
| 1790 | */ |
| 1791 | kvmppc_hdsi: |
| 1792 | mfspr r4, SPRN_HDAR |
| 1793 | mfspr r6, SPRN_HDSISR |
Paul Mackerras | 4cf302b | 2011-12-12 12:38:51 +0000 | [diff] [blame] | 1794 | /* HPTE not found fault or protection fault? */ |
| 1795 | andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1796 | beq 1f /* if not, send it to the guest */ |
| 1797 | andi. r0, r11, MSR_DR /* data relocation enabled? */ |
| 1798 | beq 3f |
| 1799 | clrrdi r0, r4, 28 |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1800 | PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1801 | bne 1f /* if no SLB entry found */ |
| 1802 | 4: std r4, VCPU_FAULT_DAR(r9) |
| 1803 | stw r6, VCPU_FAULT_DSISR(r9) |
| 1804 | |
| 1805 | /* Search the hash table. */ |
| 1806 | mr r3, r9 /* vcpu pointer */ |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1807 | li r7, 1 /* data fault */ |
Anton Blanchard | b1576fe | 2014-02-04 16:04:35 +1100 | [diff] [blame] | 1808 | bl kvmppc_hpte_hv_fault |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1809 | ld r9, HSTATE_KVM_VCPU(r13) |
| 1810 | ld r10, VCPU_PC(r9) |
| 1811 | ld r11, VCPU_MSR(r9) |
| 1812 | li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE |
| 1813 | cmpdi r3, 0 /* retry the instruction */ |
| 1814 | beq 6f |
| 1815 | cmpdi r3, -1 /* handle in kernel mode */ |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 1816 | beq guest_exit_cont |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1817 | cmpdi r3, -2 /* MMIO emulation; need instr word */ |
| 1818 | beq 2f |
| 1819 | |
| 1820 | /* Synthesize a DSI for the guest */ |
| 1821 | ld r4, VCPU_FAULT_DAR(r9) |
| 1822 | mr r6, r3 |
| 1823 | 1: mtspr SPRN_DAR, r4 |
| 1824 | mtspr SPRN_DSISR, r6 |
| 1825 | mtspr SPRN_SRR0, r10 |
| 1826 | mtspr SPRN_SRR1, r11 |
| 1827 | li r10, BOOK3S_INTERRUPT_DATA_STORAGE |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 1828 | bl kvmppc_msr_interrupt |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 1829 | fast_interrupt_c_return: |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1830 | 6: ld r7, VCPU_CTR(r9) |
| 1831 | lwz r8, VCPU_XER(r9) |
| 1832 | mtctr r7 |
| 1833 | mtxer r8 |
| 1834 | mr r4, r9 |
| 1835 | b fast_guest_return |
| 1836 | |
| 1837 | 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ |
| 1838 | ld r5, KVM_VRMA_SLB_V(r5) |
| 1839 | b 4b |
| 1840 | |
| 1841 | /* If this is for emulated MMIO, load the instruction word */ |
| 1842 | 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ |
| 1843 | |
| 1844 | /* Set guest mode to 'jump over instruction' so if lwz faults |
| 1845 | * we'll just continue at the next IP. */ |
| 1846 | li r0, KVM_GUEST_MODE_SKIP |
| 1847 | stb r0, HSTATE_IN_GUEST(r13) |
| 1848 | |
| 1849 | /* Do the access with MSR:DR enabled */ |
| 1850 | mfmsr r3 |
| 1851 | ori r4, r3, MSR_DR /* Enable paging for data */ |
| 1852 | mtmsrd r4 |
| 1853 | lwz r8, 0(r10) |
| 1854 | mtmsrd r3 |
| 1855 | |
| 1856 | /* Store the result */ |
| 1857 | stw r8, VCPU_LAST_INST(r9) |
| 1858 | |
| 1859 | /* Unset guest mode. */ |
Paul Mackerras | 44a3add | 2013-10-04 21:45:04 +1000 | [diff] [blame] | 1860 | li r0, KVM_GUEST_MODE_HOST_HV |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1861 | stb r0, HSTATE_IN_GUEST(r13) |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 1862 | b guest_exit_cont |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1863 | |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1864 | /* |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1865 | * Similarly for an HISI, reflect it to the guest as an ISI unless |
| 1866 | * it is an HPTE not found fault for a page that we have paged out. |
| 1867 | */ |
| 1868 | kvmppc_hisi: |
| 1869 | andis. r0, r11, SRR1_ISI_NOPT@h |
| 1870 | beq 1f |
| 1871 | andi. r0, r11, MSR_IR /* instruction relocation enabled? */ |
| 1872 | beq 3f |
| 1873 | clrrdi r0, r10, 28 |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1874 | PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1875 | bne 1f /* if no SLB entry found */ |
| 1876 | 4: |
| 1877 | /* Search the hash table. */ |
| 1878 | mr r3, r9 /* vcpu pointer */ |
| 1879 | mr r4, r10 |
| 1880 | mr r6, r11 |
| 1881 | li r7, 0 /* instruction fault */ |
Anton Blanchard | b1576fe | 2014-02-04 16:04:35 +1100 | [diff] [blame] | 1882 | bl kvmppc_hpte_hv_fault |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1883 | ld r9, HSTATE_KVM_VCPU(r13) |
| 1884 | ld r10, VCPU_PC(r9) |
| 1885 | ld r11, VCPU_MSR(r9) |
| 1886 | li r12, BOOK3S_INTERRUPT_H_INST_STORAGE |
| 1887 | cmpdi r3, 0 /* retry the instruction */ |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 1888 | beq fast_interrupt_c_return |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1889 | cmpdi r3, -1 /* handle in kernel mode */ |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 1890 | beq guest_exit_cont |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1891 | |
| 1892 | /* Synthesize an ISI for the guest */ |
| 1893 | mr r11, r3 |
| 1894 | 1: mtspr SPRN_SRR0, r10 |
| 1895 | mtspr SPRN_SRR1, r11 |
| 1896 | li r10, BOOK3S_INTERRUPT_INST_STORAGE |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 1897 | bl kvmppc_msr_interrupt |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 1898 | b fast_interrupt_c_return |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1899 | |
| 1900 | 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ |
| 1901 | ld r5, KVM_VRMA_SLB_V(r6) |
| 1902 | b 4b |
| 1903 | |
| 1904 | /* |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1905 | * Try to handle an hcall in real mode. |
| 1906 | * Returns to the guest if we handle it, or continues on up to |
| 1907 | * the kernel if we can't (i.e. if we don't have a handler for |
| 1908 | * it, or if the handler returns H_TOO_HARD). |
| 1909 | */ |
| 1910 | .globl hcall_try_real_mode |
| 1911 | hcall_try_real_mode: |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1912 | ld r3,VCPU_GPR(R3)(r9) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1913 | andi. r0,r11,MSR_PR |
Liu Ping Fan | 27025a6 | 2013-11-19 14:12:48 +0800 | [diff] [blame] | 1914 | /* sc 1 from userspace - reflect to guest syscall */ |
| 1915 | bne sc_1_fast_return |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1916 | clrrdi r3,r3,2 |
| 1917 | cmpldi r3,hcall_real_table_end - hcall_real_table |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 1918 | bge guest_exit_cont |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 1919 | /* See if this hcall is enabled for in-kernel handling */ |
| 1920 | ld r4, VCPU_KVM(r9) |
| 1921 | srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ |
| 1922 | sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ |
| 1923 | add r4, r4, r0 |
| 1924 | ld r0, KVM_ENABLED_HCALLS(r4) |
| 1925 | rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ |
| 1926 | srd r0, r0, r4 |
| 1927 | andi. r0, r0, 1 |
| 1928 | beq guest_exit_cont |
| 1929 | /* Get pointer to handler, if any, and call it */ |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1930 | LOAD_REG_ADDR(r4, hcall_real_table) |
Paul Mackerras | 4baa1d8 | 2013-07-08 20:09:53 +1000 | [diff] [blame] | 1931 | lwax r3,r3,r4 |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1932 | cmpwi r3,0 |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 1933 | beq guest_exit_cont |
Anton Blanchard | 05a308c | 2014-06-12 18:16:10 +1000 | [diff] [blame] | 1934 | add r12,r3,r4 |
| 1935 | mtctr r12 |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1936 | mr r3,r9 /* get vcpu pointer */ |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1937 | ld r4,VCPU_GPR(R4)(r9) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1938 | bctrl |
| 1939 | cmpdi r3,H_TOO_HARD |
| 1940 | beq hcall_real_fallback |
| 1941 | ld r4,HSTATE_KVM_VCPU(r13) |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1942 | std r3,VCPU_GPR(R3)(r4) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1943 | ld r10,VCPU_PC(r4) |
| 1944 | ld r11,VCPU_MSR(r4) |
| 1945 | b fast_guest_return |
| 1946 | |
Liu Ping Fan | 27025a6 | 2013-11-19 14:12:48 +0800 | [diff] [blame] | 1947 | sc_1_fast_return: |
| 1948 | mtspr SPRN_SRR0,r10 |
| 1949 | mtspr SPRN_SRR1,r11 |
| 1950 | li r10, BOOK3S_INTERRUPT_SYSCALL |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 1951 | bl kvmppc_msr_interrupt |
Liu Ping Fan | 27025a6 | 2013-11-19 14:12:48 +0800 | [diff] [blame] | 1952 | mr r4,r9 |
| 1953 | b fast_guest_return |
| 1954 | |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1955 | /* We've attempted a real mode hcall, but it's punted it back |
| 1956 | * to userspace. We need to restore some clobbered volatiles |
| 1957 | * before resuming the pass-it-to-qemu path */ |
| 1958 | hcall_real_fallback: |
| 1959 | li r12,BOOK3S_INTERRUPT_SYSCALL |
| 1960 | ld r9, HSTATE_KVM_VCPU(r13) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1961 | |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 1962 | b guest_exit_cont |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1963 | |
| 1964 | .globl hcall_real_table |
| 1965 | hcall_real_table: |
| 1966 | .long 0 /* 0 - unused */ |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 1967 | .long DOTSYM(kvmppc_h_remove) - hcall_real_table |
| 1968 | .long DOTSYM(kvmppc_h_enter) - hcall_real_table |
| 1969 | .long DOTSYM(kvmppc_h_read) - hcall_real_table |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1970 | .long 0 /* 0x10 - H_CLEAR_MOD */ |
| 1971 | .long 0 /* 0x14 - H_CLEAR_REF */ |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 1972 | .long DOTSYM(kvmppc_h_protect) - hcall_real_table |
| 1973 | .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table |
| 1974 | .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1975 | .long 0 /* 0x24 - H_SET_SPRG0 */ |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 1976 | .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1977 | .long 0 /* 0x2c */ |
| 1978 | .long 0 /* 0x30 */ |
| 1979 | .long 0 /* 0x34 */ |
| 1980 | .long 0 /* 0x38 */ |
| 1981 | .long 0 /* 0x3c */ |
| 1982 | .long 0 /* 0x40 */ |
| 1983 | .long 0 /* 0x44 */ |
| 1984 | .long 0 /* 0x48 */ |
| 1985 | .long 0 /* 0x4c */ |
| 1986 | .long 0 /* 0x50 */ |
| 1987 | .long 0 /* 0x54 */ |
| 1988 | .long 0 /* 0x58 */ |
| 1989 | .long 0 /* 0x5c */ |
| 1990 | .long 0 /* 0x60 */ |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 1991 | #ifdef CONFIG_KVM_XICS |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 1992 | .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table |
| 1993 | .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table |
| 1994 | .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 1995 | .long 0 /* 0x70 - H_IPOLL */ |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 1996 | .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 1997 | #else |
| 1998 | .long 0 /* 0x64 - H_EOI */ |
| 1999 | .long 0 /* 0x68 - H_CPPR */ |
| 2000 | .long 0 /* 0x6c - H_IPI */ |
| 2001 | .long 0 /* 0x70 - H_IPOLL */ |
| 2002 | .long 0 /* 0x74 - H_XIRR */ |
| 2003 | #endif |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2004 | .long 0 /* 0x78 */ |
| 2005 | .long 0 /* 0x7c */ |
| 2006 | .long 0 /* 0x80 */ |
| 2007 | .long 0 /* 0x84 */ |
| 2008 | .long 0 /* 0x88 */ |
| 2009 | .long 0 /* 0x8c */ |
| 2010 | .long 0 /* 0x90 */ |
| 2011 | .long 0 /* 0x94 */ |
| 2012 | .long 0 /* 0x98 */ |
| 2013 | .long 0 /* 0x9c */ |
| 2014 | .long 0 /* 0xa0 */ |
| 2015 | .long 0 /* 0xa4 */ |
| 2016 | .long 0 /* 0xa8 */ |
| 2017 | .long 0 /* 0xac */ |
| 2018 | .long 0 /* 0xb0 */ |
| 2019 | .long 0 /* 0xb4 */ |
| 2020 | .long 0 /* 0xb8 */ |
| 2021 | .long 0 /* 0xbc */ |
| 2022 | .long 0 /* 0xc0 */ |
| 2023 | .long 0 /* 0xc4 */ |
| 2024 | .long 0 /* 0xc8 */ |
| 2025 | .long 0 /* 0xcc */ |
| 2026 | .long 0 /* 0xd0 */ |
| 2027 | .long 0 /* 0xd4 */ |
| 2028 | .long 0 /* 0xd8 */ |
| 2029 | .long 0 /* 0xdc */ |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 2030 | .long DOTSYM(kvmppc_h_cede) - hcall_real_table |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2031 | .long 0 /* 0xe4 */ |
| 2032 | .long 0 /* 0xe8 */ |
| 2033 | .long 0 /* 0xec */ |
| 2034 | .long 0 /* 0xf0 */ |
| 2035 | .long 0 /* 0xf4 */ |
| 2036 | .long 0 /* 0xf8 */ |
| 2037 | .long 0 /* 0xfc */ |
| 2038 | .long 0 /* 0x100 */ |
| 2039 | .long 0 /* 0x104 */ |
| 2040 | .long 0 /* 0x108 */ |
| 2041 | .long 0 /* 0x10c */ |
| 2042 | .long 0 /* 0x110 */ |
| 2043 | .long 0 /* 0x114 */ |
| 2044 | .long 0 /* 0x118 */ |
| 2045 | .long 0 /* 0x11c */ |
| 2046 | .long 0 /* 0x120 */ |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 2047 | .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 2048 | .long 0 /* 0x128 */ |
| 2049 | .long 0 /* 0x12c */ |
| 2050 | .long 0 /* 0x130 */ |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 2051 | .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table |
Paul Mackerras | ae2113a | 2014-06-02 11:03:00 +1000 | [diff] [blame] | 2052 | .globl hcall_real_table_end |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2053 | hcall_real_table_end: |
| 2054 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2055 | ignore_hdec: |
| 2056 | mr r4,r9 |
| 2057 | b fast_guest_return |
| 2058 | |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 2059 | _GLOBAL(kvmppc_h_set_xdabr) |
| 2060 | andi. r0, r5, DABRX_USER | DABRX_KERNEL |
| 2061 | beq 6f |
| 2062 | li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI |
| 2063 | andc. r0, r5, r0 |
| 2064 | beq 3f |
| 2065 | 6: li r3, H_PARAMETER |
| 2066 | blr |
| 2067 | |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2068 | _GLOBAL(kvmppc_h_set_dabr) |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 2069 | li r5, DABRX_USER | DABRX_KERNEL |
| 2070 | 3: |
Michael Neuling | eee7ff9 | 2014-01-08 21:25:19 +1100 | [diff] [blame] | 2071 | BEGIN_FTR_SECTION |
| 2072 | b 2f |
| 2073 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2074 | std r4,VCPU_DABR(r3) |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 2075 | stw r5, VCPU_DABRX(r3) |
| 2076 | mtspr SPRN_DABRX, r5 |
Paul Mackerras | 8943633 | 2012-03-02 01:38:23 +0000 | [diff] [blame] | 2077 | /* Work around P7 bug where DABR can get corrupted on mtspr */ |
| 2078 | 1: mtspr SPRN_DABR,r4 |
| 2079 | mfspr r5, SPRN_DABR |
| 2080 | cmpd r4, r5 |
| 2081 | bne 1b |
| 2082 | isync |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2083 | li r3,0 |
| 2084 | blr |
| 2085 | |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 2086 | /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ |
| 2087 | 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW |
| 2088 | rlwimi r5, r4, 1, DAWRX_WT |
| 2089 | clrrdi r4, r4, 3 |
| 2090 | std r4, VCPU_DAWR(r3) |
| 2091 | std r5, VCPU_DAWRX(r3) |
| 2092 | mtspr SPRN_DAWR, r4 |
| 2093 | mtspr SPRN_DAWRX, r5 |
| 2094 | li r3, 0 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2095 | blr |
| 2096 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2097 | _GLOBAL(kvmppc_h_cede) |
| 2098 | ori r11,r11,MSR_EE |
| 2099 | std r11,VCPU_MSR(r3) |
| 2100 | li r0,1 |
| 2101 | stb r0,VCPU_CEDED(r3) |
| 2102 | sync /* order setting ceded vs. testing prodded */ |
| 2103 | lbz r5,VCPU_PRODDED(r3) |
| 2104 | cmpwi r5,0 |
Paul Mackerras | 04f995a | 2012-08-06 00:03:28 +0000 | [diff] [blame] | 2105 | bne kvm_cede_prodded |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2106 | li r0,0 /* set trap to 0 to say hcall is handled */ |
| 2107 | stw r0,VCPU_TRAP(r3) |
| 2108 | li r0,H_SUCCESS |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 2109 | std r0,VCPU_GPR(R3)(r3) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2110 | BEGIN_FTR_SECTION |
Paul Mackerras | 04f995a | 2012-08-06 00:03:28 +0000 | [diff] [blame] | 2111 | b kvm_cede_exit /* just send it up to host on 970 */ |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2112 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) |
| 2113 | |
| 2114 | /* |
| 2115 | * Set our bit in the bitmask of napping threads unless all the |
| 2116 | * other threads are already napping, in which case we send this |
| 2117 | * up to the host. |
| 2118 | */ |
| 2119 | ld r5,HSTATE_KVM_VCORE(r13) |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 2120 | lbz r6,HSTATE_PTID(r13) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2121 | lwz r8,VCORE_ENTRY_EXIT(r5) |
| 2122 | clrldi r8,r8,56 |
| 2123 | li r0,1 |
| 2124 | sld r0,r0,r6 |
| 2125 | addi r6,r5,VCORE_NAPPING_THREADS |
| 2126 | 31: lwarx r4,0,r6 |
| 2127 | or r4,r4,r0 |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 2128 | PPC_POPCNTW(R7,R4) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2129 | cmpw r7,r8 |
Paul Mackerras | 04f995a | 2012-08-06 00:03:28 +0000 | [diff] [blame] | 2130 | bge kvm_cede_exit |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2131 | stwcx. r4,0,r6 |
| 2132 | bne 31b |
Paul Mackerras | f019b7a | 2013-11-16 17:46:03 +1100 | [diff] [blame] | 2133 | /* order napping_threads update vs testing entry_exit_count */ |
| 2134 | isync |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 2135 | li r0,NAPPING_CEDE |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2136 | stb r0,HSTATE_NAPPING(r13) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2137 | lwz r7,VCORE_ENTRY_EXIT(r5) |
| 2138 | cmpwi r7,0x100 |
| 2139 | bge 33f /* another thread already exiting */ |
| 2140 | |
| 2141 | /* |
| 2142 | * Although not specifically required by the architecture, POWER7 |
| 2143 | * preserves the following registers in nap mode, even if an SMT mode |
| 2144 | * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, |
| 2145 | * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. |
| 2146 | */ |
| 2147 | /* Save non-volatile GPRs */ |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 2148 | std r14, VCPU_GPR(R14)(r3) |
| 2149 | std r15, VCPU_GPR(R15)(r3) |
| 2150 | std r16, VCPU_GPR(R16)(r3) |
| 2151 | std r17, VCPU_GPR(R17)(r3) |
| 2152 | std r18, VCPU_GPR(R18)(r3) |
| 2153 | std r19, VCPU_GPR(R19)(r3) |
| 2154 | std r20, VCPU_GPR(R20)(r3) |
| 2155 | std r21, VCPU_GPR(R21)(r3) |
| 2156 | std r22, VCPU_GPR(R22)(r3) |
| 2157 | std r23, VCPU_GPR(R23)(r3) |
| 2158 | std r24, VCPU_GPR(R24)(r3) |
| 2159 | std r25, VCPU_GPR(R25)(r3) |
| 2160 | std r26, VCPU_GPR(R26)(r3) |
| 2161 | std r27, VCPU_GPR(R27)(r3) |
| 2162 | std r28, VCPU_GPR(R28)(r3) |
| 2163 | std r29, VCPU_GPR(R29)(r3) |
| 2164 | std r30, VCPU_GPR(R30)(r3) |
| 2165 | std r31, VCPU_GPR(R31)(r3) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2166 | |
| 2167 | /* save FP state */ |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 2168 | bl kvmppc_save_fp |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2169 | |
| 2170 | /* |
Paul Mackerras | aa31e84 | 2014-01-08 21:25:26 +1100 | [diff] [blame] | 2171 | * Take a nap until a decrementer or external or doobell interrupt |
Preeti U Murthy | 582b910 | 2014-04-11 16:02:08 +0530 | [diff] [blame] | 2172 | * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the |
| 2173 | * runlatch bit before napping. |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2174 | */ |
Preeti U Murthy | 582b910 | 2014-04-11 16:02:08 +0530 | [diff] [blame] | 2175 | mfspr r2, SPRN_CTRLF |
| 2176 | clrrdi r2, r2, 1 |
| 2177 | mtspr SPRN_CTRLT, r2 |
| 2178 | |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 2179 | li r0,1 |
| 2180 | stb r0,HSTATE_HWTHREAD_REQ(r13) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2181 | mfspr r5,SPRN_LPCR |
| 2182 | ori r5,r5,LPCR_PECE0 | LPCR_PECE1 |
Paul Mackerras | aa31e84 | 2014-01-08 21:25:26 +1100 | [diff] [blame] | 2183 | BEGIN_FTR_SECTION |
| 2184 | oris r5,r5,LPCR_PECEDP@h |
| 2185 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2186 | mtspr SPRN_LPCR,r5 |
| 2187 | isync |
| 2188 | li r0, 0 |
| 2189 | std r0, HSTATE_SCRATCH0(r13) |
| 2190 | ptesync |
| 2191 | ld r0, HSTATE_SCRATCH0(r13) |
| 2192 | 1: cmpd r0, r0 |
| 2193 | bne 1b |
| 2194 | nap |
| 2195 | b . |
| 2196 | |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2197 | 33: mr r4, r3 |
| 2198 | li r3, 0 |
| 2199 | li r12, 0 |
| 2200 | b 34f |
| 2201 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2202 | kvm_end_cede: |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 2203 | /* get vcpu pointer */ |
| 2204 | ld r4, HSTATE_KVM_VCPU(r13) |
| 2205 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2206 | /* Woken by external or decrementer interrupt */ |
| 2207 | ld r1, HSTATE_HOST_R1(r13) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2208 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2209 | /* load up FP state */ |
| 2210 | bl kvmppc_load_fp |
| 2211 | |
| 2212 | /* Load NV GPRS */ |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 2213 | ld r14, VCPU_GPR(R14)(r4) |
| 2214 | ld r15, VCPU_GPR(R15)(r4) |
| 2215 | ld r16, VCPU_GPR(R16)(r4) |
| 2216 | ld r17, VCPU_GPR(R17)(r4) |
| 2217 | ld r18, VCPU_GPR(R18)(r4) |
| 2218 | ld r19, VCPU_GPR(R19)(r4) |
| 2219 | ld r20, VCPU_GPR(R20)(r4) |
| 2220 | ld r21, VCPU_GPR(R21)(r4) |
| 2221 | ld r22, VCPU_GPR(R22)(r4) |
| 2222 | ld r23, VCPU_GPR(R23)(r4) |
| 2223 | ld r24, VCPU_GPR(R24)(r4) |
| 2224 | ld r25, VCPU_GPR(R25)(r4) |
| 2225 | ld r26, VCPU_GPR(R26)(r4) |
| 2226 | ld r27, VCPU_GPR(R27)(r4) |
| 2227 | ld r28, VCPU_GPR(R28)(r4) |
| 2228 | ld r29, VCPU_GPR(R29)(r4) |
| 2229 | ld r30, VCPU_GPR(R30)(r4) |
| 2230 | ld r31, VCPU_GPR(R31)(r4) |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2231 | |
| 2232 | /* Check the wake reason in SRR1 to see why we got here */ |
| 2233 | bl kvmppc_check_wake_reason |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2234 | |
| 2235 | /* clear our bit in vcore->napping_threads */ |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2236 | 34: ld r5,HSTATE_KVM_VCORE(r13) |
| 2237 | lbz r7,HSTATE_PTID(r13) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2238 | li r0,1 |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2239 | sld r0,r0,r7 |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2240 | addi r6,r5,VCORE_NAPPING_THREADS |
| 2241 | 32: lwarx r7,0,r6 |
| 2242 | andc r7,r7,r0 |
| 2243 | stwcx. r7,0,r6 |
| 2244 | bne 32b |
| 2245 | li r0,0 |
| 2246 | stb r0,HSTATE_NAPPING(r13) |
| 2247 | |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2248 | /* See if the wake reason means we need to exit */ |
| 2249 | stw r12, VCPU_TRAP(r4) |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 2250 | mr r9, r4 |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2251 | cmpdi r3, 0 |
| 2252 | bgt guest_exit_cont |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 2253 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2254 | /* see if any other thread is already exiting */ |
| 2255 | lwz r0,VCORE_ENTRY_EXIT(r5) |
| 2256 | cmpwi r0,0x100 |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2257 | bge guest_exit_cont |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2258 | |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2259 | b kvmppc_cede_reentry /* if not go back to guest */ |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2260 | |
| 2261 | /* cede when already previously prodded case */ |
Paul Mackerras | 04f995a | 2012-08-06 00:03:28 +0000 | [diff] [blame] | 2262 | kvm_cede_prodded: |
| 2263 | li r0,0 |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2264 | stb r0,VCPU_PRODDED(r3) |
| 2265 | sync /* order testing prodded vs. clearing ceded */ |
| 2266 | stb r0,VCPU_CEDED(r3) |
| 2267 | li r3,H_SUCCESS |
| 2268 | blr |
| 2269 | |
| 2270 | /* we've ceded but we want to give control to the host */ |
Paul Mackerras | 04f995a | 2012-08-06 00:03:28 +0000 | [diff] [blame] | 2271 | kvm_cede_exit: |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 2272 | b hcall_real_fallback |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2273 | |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2274 | /* Try to handle a machine check in real mode */ |
| 2275 | machine_check_realmode: |
| 2276 | mr r3, r9 /* get vcpu pointer */ |
Anton Blanchard | b1576fe | 2014-02-04 16:04:35 +1100 | [diff] [blame] | 2277 | bl kvmppc_realmode_machine_check |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2278 | nop |
Mahesh Salgaonkar | 74845bc | 2014-06-11 14:18:21 +0530 | [diff] [blame] | 2279 | cmpdi r3, 0 /* Did we handle MCE ? */ |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2280 | ld r9, HSTATE_KVM_VCPU(r13) |
| 2281 | li r12, BOOK3S_INTERRUPT_MACHINE_CHECK |
Mahesh Salgaonkar | 74845bc | 2014-06-11 14:18:21 +0530 | [diff] [blame] | 2282 | /* |
| 2283 | * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through |
| 2284 | * machine check interrupt (set HSRR0 to 0x200). And for handled |
| 2285 | * errors (no-fatal), just go back to guest execution with current |
| 2286 | * HSRR0 instead of exiting guest. This new approach will inject |
| 2287 | * machine check to guest for fatal error causing guest to crash. |
| 2288 | * |
| 2289 | * The old code used to return to host for unhandled errors which |
| 2290 | * was causing guest to hang with soft lockups inside guest and |
| 2291 | * makes it difficult to recover guest instance. |
| 2292 | */ |
| 2293 | ld r10, VCPU_PC(r9) |
| 2294 | ld r11, VCPU_MSR(r9) |
| 2295 | bne 2f /* Continue guest execution. */ |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2296 | /* If not, deliver a machine check. SRR0/1 are already set */ |
| 2297 | li r10, BOOK3S_INTERRUPT_MACHINE_CHECK |
Paul Mackerras | 000a25d | 2014-05-26 19:48:41 +1000 | [diff] [blame] | 2298 | ld r11, VCPU_MSR(r9) |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 2299 | bl kvmppc_msr_interrupt |
Mahesh Salgaonkar | 74845bc | 2014-06-11 14:18:21 +0530 | [diff] [blame] | 2300 | 2: b fast_interrupt_c_return |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2301 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2302 | /* |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2303 | * Check the reason we woke from nap, and take appropriate action. |
| 2304 | * Returns: |
| 2305 | * 0 if nothing needs to be done |
| 2306 | * 1 if something happened that needs to be handled by the host |
| 2307 | * -1 if there was a guest wakeup (IPI) |
| 2308 | * |
| 2309 | * Also sets r12 to the interrupt vector for any interrupt that needs |
| 2310 | * to be handled now by the host (0x500 for external interrupt), or zero. |
| 2311 | */ |
| 2312 | kvmppc_check_wake_reason: |
| 2313 | mfspr r6, SPRN_SRR1 |
Paul Mackerras | aa31e84 | 2014-01-08 21:25:26 +1100 | [diff] [blame] | 2314 | BEGIN_FTR_SECTION |
| 2315 | rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ |
| 2316 | FTR_SECTION_ELSE |
| 2317 | rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ |
| 2318 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) |
| 2319 | cmpwi r6, 8 /* was it an external interrupt? */ |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2320 | li r12, BOOK3S_INTERRUPT_EXTERNAL |
| 2321 | beq kvmppc_read_intr /* if so, see what it was */ |
| 2322 | li r3, 0 |
| 2323 | li r12, 0 |
| 2324 | cmpwi r6, 6 /* was it the decrementer? */ |
| 2325 | beq 0f |
Paul Mackerras | aa31e84 | 2014-01-08 21:25:26 +1100 | [diff] [blame] | 2326 | BEGIN_FTR_SECTION |
| 2327 | cmpwi r6, 5 /* privileged doorbell? */ |
| 2328 | beq 0f |
Paul Mackerras | 5d00f66 | 2014-01-08 21:25:28 +1100 | [diff] [blame] | 2329 | cmpwi r6, 3 /* hypervisor doorbell? */ |
| 2330 | beq 3f |
Paul Mackerras | aa31e84 | 2014-01-08 21:25:26 +1100 | [diff] [blame] | 2331 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2332 | li r3, 1 /* anything else, return 1 */ |
| 2333 | 0: blr |
| 2334 | |
Paul Mackerras | 5d00f66 | 2014-01-08 21:25:28 +1100 | [diff] [blame] | 2335 | /* hypervisor doorbell */ |
| 2336 | 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL |
| 2337 | li r3, 1 |
| 2338 | blr |
| 2339 | |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2340 | /* |
Paul Mackerras | c934243 | 2013-09-06 13:24:13 +1000 | [diff] [blame] | 2341 | * Determine what sort of external interrupt is pending (if any). |
| 2342 | * Returns: |
| 2343 | * 0 if no interrupt is pending |
| 2344 | * 1 if an interrupt is pending that needs to be handled by the host |
| 2345 | * -1 if there was a guest wakeup IPI (which has now been cleared) |
| 2346 | */ |
| 2347 | kvmppc_read_intr: |
| 2348 | /* see if a host IPI is pending */ |
| 2349 | li r3, 1 |
| 2350 | lbz r0, HSTATE_HOST_IPI(r13) |
| 2351 | cmpwi r0, 0 |
| 2352 | bne 1f |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2353 | |
Paul Mackerras | c934243 | 2013-09-06 13:24:13 +1000 | [diff] [blame] | 2354 | /* Now read the interrupt from the ICP */ |
| 2355 | ld r6, HSTATE_XICS_PHYS(r13) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2356 | li r7, XICS_XIRR |
Paul Mackerras | c934243 | 2013-09-06 13:24:13 +1000 | [diff] [blame] | 2357 | cmpdi r6, 0 |
| 2358 | beq- 1f |
| 2359 | lwzcix r0, r6, r7 |
Alexander Graf | 76d072f | 2014-06-11 10:37:52 +0200 | [diff] [blame] | 2360 | /* |
| 2361 | * Save XIRR for later. Since we get in in reverse endian on LE |
| 2362 | * systems, save it byte reversed and fetch it back in host endian. |
| 2363 | */ |
| 2364 | li r3, HSTATE_SAVED_XIRR |
| 2365 | STWX_BE r0, r3, r13 |
| 2366 | #ifdef __LITTLE_ENDIAN__ |
| 2367 | lwz r3, HSTATE_SAVED_XIRR(r13) |
| 2368 | #else |
| 2369 | mr r3, r0 |
| 2370 | #endif |
| 2371 | rlwinm. r3, r3, 0, 0xffffff |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2372 | sync |
Paul Mackerras | c934243 | 2013-09-06 13:24:13 +1000 | [diff] [blame] | 2373 | beq 1f /* if nothing pending in the ICP */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2374 | |
Paul Mackerras | c934243 | 2013-09-06 13:24:13 +1000 | [diff] [blame] | 2375 | /* We found something in the ICP... |
| 2376 | * |
| 2377 | * If it's not an IPI, stash it in the PACA and return to |
| 2378 | * the host, we don't (yet) handle directing real external |
| 2379 | * interrupts directly to the guest |
| 2380 | */ |
| 2381 | cmpwi r3, XICS_IPI /* if there is, is it an IPI? */ |
Paul Mackerras | c934243 | 2013-09-06 13:24:13 +1000 | [diff] [blame] | 2382 | bne 42f |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2383 | |
Paul Mackerras | c934243 | 2013-09-06 13:24:13 +1000 | [diff] [blame] | 2384 | /* It's an IPI, clear the MFRR and EOI it */ |
| 2385 | li r3, 0xff |
| 2386 | li r8, XICS_MFRR |
| 2387 | stbcix r3, r6, r8 /* clear the IPI */ |
| 2388 | stwcix r0, r6, r7 /* EOI it */ |
| 2389 | sync |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2390 | |
Paul Mackerras | c934243 | 2013-09-06 13:24:13 +1000 | [diff] [blame] | 2391 | /* We need to re-check host IPI now in case it got set in the |
| 2392 | * meantime. If it's clear, we bounce the interrupt to the |
| 2393 | * guest |
| 2394 | */ |
| 2395 | lbz r0, HSTATE_HOST_IPI(r13) |
| 2396 | cmpwi r0, 0 |
| 2397 | bne- 43f |
| 2398 | |
| 2399 | /* OK, it's an IPI for us */ |
| 2400 | li r3, -1 |
| 2401 | 1: blr |
| 2402 | |
Alexander Graf | 76d072f | 2014-06-11 10:37:52 +0200 | [diff] [blame] | 2403 | 42: /* It's not an IPI and it's for the host. We saved a copy of XIRR in |
| 2404 | * the PACA earlier, it will be picked up by the host ICP driver |
Paul Mackerras | c934243 | 2013-09-06 13:24:13 +1000 | [diff] [blame] | 2405 | */ |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2406 | li r3, 1 |
Paul Mackerras | c934243 | 2013-09-06 13:24:13 +1000 | [diff] [blame] | 2407 | b 1b |
| 2408 | |
| 2409 | 43: /* We raced with the host, we need to resend that IPI, bummer */ |
| 2410 | li r0, IPI_PRIORITY |
| 2411 | stbcix r0, r6, r8 /* set the IPI */ |
| 2412 | sync |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2413 | li r3, 1 |
Paul Mackerras | c934243 | 2013-09-06 13:24:13 +1000 | [diff] [blame] | 2414 | b 1b |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2415 | |
| 2416 | /* |
| 2417 | * Save away FP, VMX and VSX registers. |
| 2418 | * r3 = vcpu pointer |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 2419 | * N.B. r30 and r31 are volatile across this function, |
| 2420 | * thus it is not callable from C. |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2421 | */ |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 2422 | kvmppc_save_fp: |
| 2423 | mflr r30 |
| 2424 | mr r31,r3 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2425 | mfmsr r5 |
| 2426 | ori r8,r5,MSR_FP |
| 2427 | #ifdef CONFIG_ALTIVEC |
| 2428 | BEGIN_FTR_SECTION |
| 2429 | oris r8,r8,MSR_VEC@h |
| 2430 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 2431 | #endif |
| 2432 | #ifdef CONFIG_VSX |
| 2433 | BEGIN_FTR_SECTION |
| 2434 | oris r8,r8,MSR_VSX@h |
| 2435 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) |
| 2436 | #endif |
| 2437 | mtmsrd r8 |
| 2438 | isync |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 2439 | addi r3,r3,VCPU_FPRS |
Alexander Graf | 9bf163f | 2014-06-16 14:41:15 +0200 | [diff] [blame] | 2440 | bl store_fp_state |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2441 | #ifdef CONFIG_ALTIVEC |
| 2442 | BEGIN_FTR_SECTION |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 2443 | addi r3,r31,VCPU_VRS |
Alexander Graf | 9bf163f | 2014-06-16 14:41:15 +0200 | [diff] [blame] | 2444 | bl store_vr_state |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2445 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 2446 | #endif |
| 2447 | mfspr r6,SPRN_VRSAVE |
Paul Mackerras | e724f08 | 2014-03-13 20:02:48 +1100 | [diff] [blame] | 2448 | stw r6,VCPU_VRSAVE(r31) |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 2449 | mtlr r30 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2450 | blr |
| 2451 | |
| 2452 | /* |
| 2453 | * Load up FP, VMX and VSX registers |
| 2454 | * r4 = vcpu pointer |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 2455 | * N.B. r30 and r31 are volatile across this function, |
| 2456 | * thus it is not callable from C. |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2457 | */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2458 | kvmppc_load_fp: |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 2459 | mflr r30 |
| 2460 | mr r31,r4 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2461 | mfmsr r9 |
| 2462 | ori r8,r9,MSR_FP |
| 2463 | #ifdef CONFIG_ALTIVEC |
| 2464 | BEGIN_FTR_SECTION |
| 2465 | oris r8,r8,MSR_VEC@h |
| 2466 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 2467 | #endif |
| 2468 | #ifdef CONFIG_VSX |
| 2469 | BEGIN_FTR_SECTION |
| 2470 | oris r8,r8,MSR_VSX@h |
| 2471 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) |
| 2472 | #endif |
| 2473 | mtmsrd r8 |
| 2474 | isync |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 2475 | addi r3,r4,VCPU_FPRS |
Alexander Graf | 9bf163f | 2014-06-16 14:41:15 +0200 | [diff] [blame] | 2476 | bl load_fp_state |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2477 | #ifdef CONFIG_ALTIVEC |
| 2478 | BEGIN_FTR_SECTION |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 2479 | addi r3,r31,VCPU_VRS |
Alexander Graf | 9bf163f | 2014-06-16 14:41:15 +0200 | [diff] [blame] | 2480 | bl load_vr_state |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2481 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 2482 | #endif |
Paul Mackerras | e724f08 | 2014-03-13 20:02:48 +1100 | [diff] [blame] | 2483 | lwz r7,VCPU_VRSAVE(r31) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2484 | mtspr SPRN_VRSAVE,r7 |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 2485 | mtlr r30 |
| 2486 | mr r4,r31 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2487 | blr |
Paul Mackerras | 44a3add | 2013-10-04 21:45:04 +1000 | [diff] [blame] | 2488 | |
| 2489 | /* |
| 2490 | * We come here if we get any exception or interrupt while we are |
| 2491 | * executing host real mode code while in guest MMU context. |
| 2492 | * For now just spin, but we should do something better. |
| 2493 | */ |
| 2494 | kvmppc_bad_host_intr: |
| 2495 | b . |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 2496 | |
| 2497 | /* |
| 2498 | * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken |
| 2499 | * from VCPU_INTR_MSR and is modified based on the required TM state changes. |
| 2500 | * r11 has the guest MSR value (in/out) |
| 2501 | * r9 has a vcpu pointer (in) |
| 2502 | * r0 is used as a scratch register |
| 2503 | */ |
| 2504 | kvmppc_msr_interrupt: |
| 2505 | rldicl r0, r11, 64 - MSR_TS_S_LG, 62 |
| 2506 | cmpwi r0, 2 /* Check if we are in transactional state.. */ |
| 2507 | ld r11, VCPU_INTR_MSR(r9) |
| 2508 | bne 1f |
| 2509 | /* ... if transactional, change to suspended */ |
| 2510 | li r0, 1 |
| 2511 | 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG |
| 2512 | blr |
Paul Mackerras | 9bc01a9 | 2014-05-26 19:48:40 +1000 | [diff] [blame] | 2513 | |
| 2514 | /* |
| 2515 | * This works around a hardware bug on POWER8E processors, where |
| 2516 | * writing a 1 to the MMCR0[PMAO] bit doesn't generate a |
| 2517 | * performance monitor interrupt. Instead, when we need to have |
| 2518 | * an interrupt pending, we have to arrange for a counter to overflow. |
| 2519 | */ |
| 2520 | kvmppc_fix_pmao: |
| 2521 | li r3, 0 |
| 2522 | mtspr SPRN_MMCR2, r3 |
| 2523 | lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h |
| 2524 | ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN |
| 2525 | mtspr SPRN_MMCR0, r3 |
| 2526 | lis r3, 0x7fff |
| 2527 | ori r3, r3, 0xffff |
| 2528 | mtspr SPRN_PMC6, r3 |
| 2529 | isync |
| 2530 | blr |