blob: ae6d93ee99d40fbbacb29e9916ead5e6e7d91574 [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100023#include <asm/mmu.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000024#include <asm/page.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100025#include <asm/ptrace.h>
26#include <asm/hvcall.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000027#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
Paul Mackerrasf0888f72012-02-03 00:54:17 +000029#include <asm/kvm_book3s_asm.h>
Aneesh Kumar K.Vf64e8082016-03-01 12:59:20 +053030#include <asm/book3s/64/mmu-hash.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110031#include <asm/tm.h>
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +053032#include <asm/opal.h>
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100033#include <asm/xive-regs.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110034
Paul Mackerras2f272462017-05-22 16:25:14 +100035/* Sign-extend HDEC if not on POWER9 */
36#define EXTEND_HDEC(reg) \
37BEGIN_FTR_SECTION; \
38 extsw reg, reg; \
39END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
40
Michael Neulinge4e38122014-03-25 10:47:02 +110041#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
Paul Mackerrasde56a942011-06-29 00:21:34 +000042
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110043/* Values in HSTATE_NAPPING(r13) */
44#define NAPPING_CEDE 1
45#define NAPPING_NOVCPU 2
46
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100047/* Stack frame offsets for kvmppc_hv_entry */
Paul Mackerras769377f2017-02-15 14:30:17 +110048#define SFS 160
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100049#define STACK_SLOT_TRAP (SFS-4)
50#define STACK_SLOT_TID (SFS-16)
51#define STACK_SLOT_PSSCR (SFS-24)
52#define STACK_SLOT_PID (SFS-32)
53#define STACK_SLOT_IAMR (SFS-40)
54#define STACK_SLOT_CIABR (SFS-48)
55#define STACK_SLOT_DAWR (SFS-56)
56#define STACK_SLOT_DAWRX (SFS-64)
Paul Mackerras769377f2017-02-15 14:30:17 +110057#define STACK_SLOT_HFSCR (SFS-72)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100058
Paul Mackerrasde56a942011-06-29 00:21:34 +000059/*
Paul Mackerras19ccb762011-07-23 17:42:46 +100060 * Call kvmppc_hv_entry in real mode.
Paul Mackerrasde56a942011-06-29 00:21:34 +000061 * Must be called with interrupts hard-disabled.
62 *
63 * Input Registers:
64 *
65 * LR = return address to continue at after eventually re-enabling MMU
66 */
Anton Blanchard6ed179b2014-06-12 18:16:53 +100067_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
Paul Mackerras218309b2013-09-06 13:23:44 +100068 mflr r0
69 std r0, PPC_LR_STKOFF(r1)
70 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +000071 mfmsr r10
Paul Mackerras218309b2013-09-06 13:23:44 +100072 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
Paul Mackerrasde56a942011-06-29 00:21:34 +000073 li r0,MSR_RI
74 andc r0,r10,r0
75 li r6,MSR_IR | MSR_DR
76 andc r6,r10,r6
77 mtmsrd r0,1 /* clear RI in MSR */
78 mtsrr0 r5
79 mtsrr1 r6
80 RFI
81
Paul Mackerras218309b2013-09-06 13:23:44 +100082kvmppc_call_hv_entry:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110083 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100084 bl kvmppc_hv_entry
85
86 /* Back from guest - restore host state and return to caller */
87
Michael Neulingeee7ff92014-01-08 21:25:19 +110088BEGIN_FTR_SECTION
Paul Mackerras218309b2013-09-06 13:23:44 +100089 /* Restore host DABR and DABRX */
90 ld r5,HSTATE_DABR(r13)
91 li r6,7
92 mtspr SPRN_DABR,r5
93 mtspr SPRN_DABRX,r6
Michael Neulingeee7ff92014-01-08 21:25:19 +110094END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +100095
96 /* Restore SPRG3 */
Scott Wood9d378df2014-03-10 17:29:38 -050097 ld r3,PACA_SPRG_VDSO(r13)
98 mtspr SPRN_SPRG_VDSO_WRITE,r3
Paul Mackerras218309b2013-09-06 13:23:44 +100099
Paul Mackerras218309b2013-09-06 13:23:44 +1000100 /* Reload the host's PMU registers */
101 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
102 lbz r4, LPPACA_PMCINUSE(r3)
103 cmpwi r4, 0
104 beq 23f /* skip if not */
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000105BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000106 ld r3, HSTATE_MMCR0(r13)
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000107 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
108 cmpwi r4, MMCR0_PMAO
109 beql kvmppc_fix_pmao
110END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000111 lwz r3, HSTATE_PMC1(r13)
112 lwz r4, HSTATE_PMC2(r13)
113 lwz r5, HSTATE_PMC3(r13)
114 lwz r6, HSTATE_PMC4(r13)
115 lwz r8, HSTATE_PMC5(r13)
116 lwz r9, HSTATE_PMC6(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000117 mtspr SPRN_PMC1, r3
118 mtspr SPRN_PMC2, r4
119 mtspr SPRN_PMC3, r5
120 mtspr SPRN_PMC4, r6
121 mtspr SPRN_PMC5, r8
122 mtspr SPRN_PMC6, r9
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000123 ld r3, HSTATE_MMCR0(r13)
124 ld r4, HSTATE_MMCR1(r13)
125 ld r5, HSTATE_MMCRA(r13)
126 ld r6, HSTATE_SIAR(r13)
127 ld r7, HSTATE_SDAR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000128 mtspr SPRN_MMCR1, r4
129 mtspr SPRN_MMCRA, r5
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100130 mtspr SPRN_SIAR, r6
131 mtspr SPRN_SDAR, r7
132BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000133 ld r8, HSTATE_MMCR2(r13)
134 ld r9, HSTATE_SIER(r13)
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100135 mtspr SPRN_MMCR2, r8
136 mtspr SPRN_SIER, r9
137END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +1000138 mtspr SPRN_MMCR0, r3
139 isync
14023:
141
142 /*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100143 * Reload DEC. HDEC interrupts were disabled when
144 * we reloaded the host's LPCR value.
145 */
146 ld r3, HSTATE_DECEXP(r13)
147 mftb r4
148 subf r4, r4, r3
149 mtspr SPRN_DEC, r4
150
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000151 /* hwthread_req may have got set by cede or no vcpu, so clear it */
152 li r0, 0
153 stb r0, HSTATE_HWTHREAD_REQ(r13)
154
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100155 /*
Paul Mackerras218309b2013-09-06 13:23:44 +1000156 * For external and machine check interrupts, we need
157 * to call the Linux handler to process the interrupt.
158 * We do that by jumping to absolute address 0x500 for
159 * external interrupts, or the machine_check_fwnmi label
160 * for machine checks (since firmware might have patched
161 * the vector area at 0x200). The [h]rfid at the end of the
162 * handler will return to the book3s_hv_interrupts.S code.
163 * For other interrupts we do the rfid to get back
164 * to the book3s_hv_interrupts.S code here.
165 */
166 ld r8, 112+PPC_LR_STKOFF(r1)
167 addi r1, r1, 112
168 ld r7, HSTATE_HOST_MSR(r13)
169
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100170 /*
171 * If we came back from the guest via a relocation-on interrupt,
172 * we will be in virtual mode at this point, which makes it a
173 * little easier to get back to the caller.
174 */
175 mfmsr r0
176 andi. r0, r0, MSR_IR /* in real mode? */
177 bne .Lvirt_return
178
Paul Mackerras218309b2013-09-06 13:23:44 +1000179 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
180 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras218309b2013-09-06 13:23:44 +1000181 beq 11f
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +0530182 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
183 beq 15f /* Invoke the H_DOORBELL handler */
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +0530184 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
185 beq cr2, 14f /* HMI check */
Paul Mackerras218309b2013-09-06 13:23:44 +1000186
187 /* RFI into the highmem handler, or branch to interrupt handler */
188 mfmsr r6
189 li r0, MSR_RI
190 andc r6, r6, r0
191 mtmsrd r6, 1 /* Clear RI in MSR */
192 mtsrr0 r8
193 mtsrr1 r7
Paul Mackerras218309b2013-09-06 13:23:44 +1000194 beq cr1, 13f /* machine check */
195 RFI
196
197 /* On POWER7, we have external interrupts set to use HSRR0/1 */
19811: mtspr SPRN_HSRR0, r8
199 mtspr SPRN_HSRR1, r7
200 ba 0x500
201
20213: b machine_check_fwnmi
203
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +053020414: mtspr SPRN_HSRR0, r8
205 mtspr SPRN_HSRR1, r7
206 b hmi_exception_after_realmode
207
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +053020815: mtspr SPRN_HSRR0, r8
209 mtspr SPRN_HSRR1, r7
210 ba 0xe80
211
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100212 /* Virtual-mode return - can't get here for HMI or machine check */
213.Lvirt_return:
214 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
215 beq 16f
216 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
217 beq 17f
218 andi. r0, r7, MSR_EE /* were interrupts hard-enabled? */
219 beq 18f
220 mtmsrd r7, 1 /* if so then re-enable them */
22118: mtlr r8
222 blr
223
22416: mtspr SPRN_HSRR0, r8 /* jump to reloc-on external vector */
225 mtspr SPRN_HSRR1, r7
226 b exc_virt_0x4500_hardware_interrupt
227
22817: mtspr SPRN_HSRR0, r8
229 mtspr SPRN_HSRR1, r7
230 b exc_virt_0x4e80_h_doorbell
231
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100232kvmppc_primary_no_guest:
233 /* We handle this much like a ceded vcpu */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100234 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
Paul Mackerras2f272462017-05-22 16:25:14 +1000235 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
236 /* HDEC value came from DEC in the first place, it will fit */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100237 mfspr r3, SPRN_HDEC
238 mtspr SPRN_DEC, r3
Paul Mackerras6af27c82015-03-28 14:21:10 +1100239 /*
240 * Make sure the primary has finished the MMU switch.
241 * We should never get here on a secondary thread, but
242 * check it for robustness' sake.
243 */
244 ld r5, HSTATE_KVM_VCORE(r13)
24565: lbz r0, VCORE_IN_GUEST(r5)
246 cmpwi r0, 0
247 beq 65b
248 /* Set LPCR. */
249 ld r8,VCORE_LPCR(r5)
250 mtspr SPRN_LPCR,r8
251 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100252 /* set our bit in napping_threads */
253 ld r5, HSTATE_KVM_VCORE(r13)
254 lbz r7, HSTATE_PTID(r13)
255 li r0, 1
256 sld r0, r0, r7
257 addi r6, r5, VCORE_NAPPING_THREADS
2581: lwarx r3, 0, r6
259 or r3, r3, r0
260 stwcx. r3, 0, r6
261 bne 1b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100262 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100263 isync
264 li r12, 0
265 lwz r7, VCORE_ENTRY_EXIT(r5)
266 cmpwi r7, 0x100
267 bge kvm_novcpu_exit /* another thread already exiting */
268 li r3, NAPPING_NOVCPU
269 stb r3, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100270
Paul Mackerrasccc07772015-03-28 14:21:07 +1100271 li r3, 0 /* Don't wake on privileged (OS) doorbell */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100272 b kvm_do_nap
273
Suresh Warrier37f55d32016-08-19 15:35:46 +1000274/*
275 * kvm_novcpu_wakeup
276 * Entered from kvm_start_guest if kvm_hstate.napping is set
277 * to NAPPING_NOVCPU
278 * r2 = kernel TOC
279 * r13 = paca
280 */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100281kvm_novcpu_wakeup:
282 ld r1, HSTATE_HOST_R1(r13)
283 ld r5, HSTATE_KVM_VCORE(r13)
284 li r0, 0
285 stb r0, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100286
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100287 /* check the wake reason */
288 bl kvmppc_check_wake_reason
Paul Mackerras6af27c82015-03-28 14:21:10 +1100289
Suresh Warrier37f55d32016-08-19 15:35:46 +1000290 /*
291 * Restore volatile registers since we could have called
292 * a C routine in kvmppc_check_wake_reason.
293 * r5 = VCORE
294 */
295 ld r5, HSTATE_KVM_VCORE(r13)
296
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100297 /* see if any other thread is already exiting */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100298 lwz r0, VCORE_ENTRY_EXIT(r5)
299 cmpwi r0, 0x100
300 bge kvm_novcpu_exit
301
302 /* clear our bit in napping_threads */
303 lbz r7, HSTATE_PTID(r13)
304 li r0, 1
305 sld r0, r0, r7
306 addi r6, r5, VCORE_NAPPING_THREADS
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11003074: lwarx r7, 0, r6
308 andc r7, r7, r0
309 stwcx. r7, 0, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100310 bne 4b
311
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100312 /* See if the wake reason means we need to exit */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100313 cmpdi r3, 0
314 bge kvm_novcpu_exit
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100315
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100316 /* See if our timeslice has expired (HDEC is negative) */
317 mfspr r0, SPRN_HDEC
Paul Mackerras2f272462017-05-22 16:25:14 +1000318 EXTEND_HDEC(r0)
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100319 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerras2f272462017-05-22 16:25:14 +1000320 cmpdi r0, 0
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100321 blt kvm_novcpu_exit
322
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100323 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
324 ld r4, HSTATE_KVM_VCPU(r13)
325 cmpdi r4, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100326 beq kvmppc_primary_no_guest
327
328#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
329 addi r3, r4, VCPU_TB_RMENTRY
330 bl kvmhv_start_timing
331#endif
332 b kvmppc_got_guest
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100333
334kvm_novcpu_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100335#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
336 ld r4, HSTATE_KVM_VCPU(r13)
337 cmpdi r4, 0
338 beq 13f
339 addi r3, r4, VCPU_TB_RMEXIT
340 bl kvmhv_accumulate_time
341#endif
Paul Mackerraseddb60f2015-03-28 14:21:11 +110034213: mr r3, r12
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000343 stw r12, STACK_SLOT_TRAP(r1)
Paul Mackerraseddb60f2015-03-28 14:21:11 +1100344 bl kvmhv_commence_exit
345 nop
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000346 lwz r12, STACK_SLOT_TRAP(r1)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100347 b kvmhv_switch_to_host
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100348
Paul Mackerras371fefd2011-06-29 00:23:08 +0000349/*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100350 * We come in here when wakened from nap mode.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000351 * Relocation is off and most register values are lost.
352 * r13 points to the PACA.
353 */
354 .globl kvm_start_guest
355kvm_start_guest:
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530356
357 /* Set runlatch bit the minute you wake up from nap */
Paul Mackerras1f09c3e2015-03-28 14:21:04 +1100358 mfspr r0, SPRN_CTRLF
359 ori r0, r0, 1
360 mtspr SPRN_CTRLT, r0
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530361
Paul Mackerras19ccb762011-07-23 17:42:46 +1000362 ld r2,PACATOC(r13)
363
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000364 li r0,KVM_HWTHREAD_IN_KVM
365 stb r0,HSTATE_HWTHREAD_STATE(r13)
366
367 /* NV GPR values from power7_idle() will no longer be valid */
368 li r0,1
369 stb r0,PACA_NAPSTATELOST(r13)
370
Paul Mackerras4619ac82013-04-17 20:31:41 +0000371 /* were we napping due to cede? */
372 lbz r0,HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100373 cmpwi r0,NAPPING_CEDE
374 beq kvm_end_cede
375 cmpwi r0,NAPPING_NOVCPU
376 beq kvm_novcpu_wakeup
377
378 ld r1,PACAEMERGSP(r13)
379 subi r1,r1,STACK_FRAME_OVERHEAD
Paul Mackerras4619ac82013-04-17 20:31:41 +0000380
381 /*
382 * We weren't napping due to cede, so this must be a secondary
383 * thread being woken up to run a guest, or being woken up due
384 * to a stray IPI. (Or due to some machine check or hypervisor
385 * maintenance interrupt while the core is in KVM.)
386 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000387
388 /* Check the wake reason in SRR1 to see why we got here */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100389 bl kvmppc_check_wake_reason
Suresh Warrier37f55d32016-08-19 15:35:46 +1000390 /*
391 * kvmppc_check_wake_reason could invoke a C routine, but we
392 * have no volatile registers to restore when we return.
393 */
394
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100395 cmpdi r3, 0
396 bge kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000397
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000398 /* get vcore pointer, NULL if we have nothing to run */
399 ld r5,HSTATE_KVM_VCORE(r13)
400 cmpdi r5,0
401 /* if we have no vcore to run, go back to sleep */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000402 beq kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000403
Paul Mackerras56548fc2014-12-03 14:48:40 +1100404kvm_secondary_got_guest:
405
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100406 /* Set HSTATE_DSCR(r13) to something sensible */
Anshuman Khandual1db36522015-05-21 12:13:03 +0530407 ld r6, PACA_DSCR_DEFAULT(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100408 std r6, HSTATE_DSCR(r13)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000409
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000410 /* On thread 0 of a subcore, set HDEC to max */
411 lbz r4, HSTATE_PTID(r13)
412 cmpwi r4, 0
413 bne 63f
Paul Mackerras2f272462017-05-22 16:25:14 +1000414 LOAD_REG_ADDR(r6, decrementer_max)
415 ld r6, 0(r6)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000416 mtspr SPRN_HDEC, r6
417 /* and set per-LPAR registers, if doing dynamic micro-threading */
418 ld r6, HSTATE_SPLIT_MODE(r13)
419 cmpdi r6, 0
420 beq 63f
421 ld r0, KVM_SPLIT_RPR(r6)
422 mtspr SPRN_RPR, r0
423 ld r0, KVM_SPLIT_PMMAR(r6)
424 mtspr SPRN_PMMAR, r0
425 ld r0, KVM_SPLIT_LDBAR(r6)
426 mtspr SPRN_LDBAR, r0
427 isync
42863:
429 /* Order load of vcpu after load of vcore */
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100430 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000431 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100432 bl kvmppc_hv_entry
Paul Mackerras218309b2013-09-06 13:23:44 +1000433
434 /* Back from the guest, go back to nap */
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000435 /* Clear our vcpu and vcore pointers so we don't come back in early */
Paul Mackerras218309b2013-09-06 13:23:44 +1000436 li r0, 0
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000437 std r0, HSTATE_KVM_VCPU(r13)
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100438 /*
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000439 * Once we clear HSTATE_KVM_VCORE(r13), the code in
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100440 * kvmppc_run_core() is going to assume that all our vcpu
441 * state is visible in memory. This lwsync makes sure
442 * that that is true.
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100443 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000444 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000445 std r0, HSTATE_KVM_VCORE(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000446
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530447 /*
448 * All secondaries exiting guest will fall through this path.
449 * Before proceeding, just check for HMI interrupt and
450 * invoke opal hmi handler. By now we are sure that the
451 * primary thread on this core/subcore has already made partition
452 * switch/TB resync and we are good to call opal hmi handler.
453 */
454 cmpwi r12, BOOK3S_INTERRUPT_HMI
455 bne kvm_no_guest
456
457 li r3,0 /* NULL argument */
458 bl hmi_exception_realmode
Paul Mackerras56548fc2014-12-03 14:48:40 +1100459/*
460 * At this point we have finished executing in the guest.
461 * We need to wait for hwthread_req to become zero, since
462 * we may not turn on the MMU while hwthread_req is non-zero.
463 * While waiting we also need to check if we get given a vcpu to run.
464 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000465kvm_no_guest:
Paul Mackerras56548fc2014-12-03 14:48:40 +1100466 lbz r3, HSTATE_HWTHREAD_REQ(r13)
467 cmpwi r3, 0
468 bne 53f
469 HMT_MEDIUM
470 li r0, KVM_HWTHREAD_IN_KERNEL
Paul Mackerras218309b2013-09-06 13:23:44 +1000471 stb r0, HSTATE_HWTHREAD_STATE(r13)
Paul Mackerras56548fc2014-12-03 14:48:40 +1100472 /* need to recheck hwthread_req after a barrier, to avoid race */
473 sync
474 lbz r3, HSTATE_HWTHREAD_REQ(r13)
475 cmpwi r3, 0
476 bne 54f
477/*
Shreyas B. Prabhu5fa6b6b2016-07-08 11:50:46 +0530478 * We jump to pnv_wakeup_loss, which will return to the caller
Paul Mackerras56548fc2014-12-03 14:48:40 +1100479 * of power7_nap in the powernv cpu offline loop. The value we
480 * put in r3 becomes the return value for power7_nap.
481 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000482 li r3, LPCR_PECE0
483 mfspr r4, SPRN_LPCR
484 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
485 mtspr SPRN_LPCR, r4
Paul Mackerras56548fc2014-12-03 14:48:40 +1100486 li r3, 0
Shreyas B. Prabhu5fa6b6b2016-07-08 11:50:46 +0530487 b pnv_wakeup_loss
Paul Mackerras56548fc2014-12-03 14:48:40 +1100488
48953: HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000490 ld r5, HSTATE_KVM_VCORE(r13)
491 cmpdi r5, 0
492 bne 60f
493 ld r3, HSTATE_SPLIT_MODE(r13)
494 cmpdi r3, 0
495 beq kvm_no_guest
496 lbz r0, KVM_SPLIT_DO_NAP(r3)
497 cmpwi r0, 0
Paul Mackerras56548fc2014-12-03 14:48:40 +1100498 beq kvm_no_guest
499 HMT_MEDIUM
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000500 b kvm_unsplit_nap
50160: HMT_MEDIUM
Paul Mackerras56548fc2014-12-03 14:48:40 +1100502 b kvm_secondary_got_guest
503
50454: li r0, KVM_HWTHREAD_IN_KVM
505 stb r0, HSTATE_HWTHREAD_STATE(r13)
506 b kvm_no_guest
Paul Mackerras218309b2013-09-06 13:23:44 +1000507
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000508/*
509 * Here the primary thread is trying to return the core to
510 * whole-core mode, so we need to nap.
511 */
512kvm_unsplit_nap:
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530513 /*
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530514 * When secondaries are napping in kvm_unsplit_nap() with
515 * hwthread_req = 1, HMI goes ignored even though subcores are
516 * already exited the guest. Hence HMI keeps waking up secondaries
517 * from nap in a loop and secondaries always go back to nap since
518 * no vcore is assigned to them. This makes impossible for primary
519 * thread to get hold of secondary threads resulting into a soft
520 * lockup in KVM path.
521 *
522 * Let us check if HMI is pending and handle it before we go to nap.
523 */
524 cmpwi r12, BOOK3S_INTERRUPT_HMI
525 bne 55f
526 li r3, 0 /* NULL argument */
527 bl hmi_exception_realmode
52855:
529 /*
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530530 * Ensure that secondary doesn't nap when it has
531 * its vcore pointer set.
532 */
533 sync /* matches smp_mb() before setting split_info.do_nap */
534 ld r0, HSTATE_KVM_VCORE(r13)
535 cmpdi r0, 0
536 bne kvm_no_guest
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000537 /* clear any pending message */
538BEGIN_FTR_SECTION
539 lis r6, (PPC_DBELL_SERVER << (63-36))@h
540 PPC_MSGCLR(6)
541END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
542 /* Set kvm_split_mode.napped[tid] = 1 */
543 ld r3, HSTATE_SPLIT_MODE(r13)
544 li r0, 1
545 lhz r4, PACAPACAINDEX(r13)
546 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
547 addi r4, r4, KVM_SPLIT_NAPPED
548 stbx r0, r3, r4
549 /* Check the do_nap flag again after setting napped[] */
550 sync
551 lbz r0, KVM_SPLIT_DO_NAP(r3)
552 cmpwi r0, 0
553 beq 57f
554 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
Paul Mackerrasbf53c882016-11-18 14:34:07 +1100555 mfspr r5, SPRN_LPCR
556 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
557 b kvm_nap_sequence
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000558
55957: li r0, 0
560 stbx r0, r3, r4
561 b kvm_no_guest
562
Paul Mackerras218309b2013-09-06 13:23:44 +1000563/******************************************************************************
564 * *
565 * Entry code *
566 * *
567 *****************************************************************************/
568
Paul Mackerrasde56a942011-06-29 00:21:34 +0000569.global kvmppc_hv_entry
570kvmppc_hv_entry:
571
572 /* Required state:
573 *
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100574 * R4 = vcpu pointer (or NULL)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000575 * MSR = ~IR|DR
576 * R13 = PACA
577 * R1 = host R1
Michael Neuling06a29e42014-08-19 14:59:30 +1000578 * R2 = TOC
Paul Mackerrasde56a942011-06-29 00:21:34 +0000579 * all other volatile GPRS = free
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100580 * Does not preserve non-volatile GPRs or CR fields
Paul Mackerrasde56a942011-06-29 00:21:34 +0000581 */
582 mflr r0
Paul Mackerras218309b2013-09-06 13:23:44 +1000583 std r0, PPC_LR_STKOFF(r1)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000584 stdu r1, -SFS(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000585
Paul Mackerrasde56a942011-06-29 00:21:34 +0000586 /* Save R1 in the PACA */
587 std r1, HSTATE_HOST_R1(r13)
588
Paul Mackerras44a3add2013-10-04 21:45:04 +1000589 li r6, KVM_GUEST_MODE_HOST_HV
590 stb r6, HSTATE_IN_GUEST(r13)
591
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100592#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
593 /* Store initial timestamp */
594 cmpdi r4, 0
595 beq 1f
596 addi r3, r4, VCPU_TB_RMENTRY
597 bl kvmhv_start_timing
5981:
599#endif
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100600
601 /* Use cr7 as an indication of radix mode */
602 ld r5, HSTATE_KVM_VCORE(r13)
603 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
604 lbz r0, KVM_RADIX(r9)
605 cmpwi cr7, r0, 0
606
607 /* Clear out SLB if hash */
608 bne cr7, 2f
Paul Mackerrasde56a942011-06-29 00:21:34 +0000609 li r6,0
610 slbmte r6,r6
611 slbia
612 ptesync
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11006132:
Paul Mackerras9e368f22011-06-29 00:40:08 +0000614 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100615 * POWER7/POWER8 host -> guest partition switch code.
Paul Mackerras9e368f22011-06-29 00:40:08 +0000616 * We don't have to lock against concurrent tlbies,
617 * but we do have to coordinate across hardware threads.
618 */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100619 /* Set bit in entry map iff exit map is zero. */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100620 li r7, 1
621 lbz r6, HSTATE_PTID(r13)
622 sld r7, r7, r6
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100623 addi r8, r5, VCORE_ENTRY_EXIT
62421: lwarx r3, 0, r8
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100625 cmpwi r3, 0x100 /* any threads starting to exit? */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000626 bge secondary_too_late /* if so we're too late to the party */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100627 or r3, r3, r7
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100628 stwcx. r3, 0, r8
Paul Mackerras371fefd2011-06-29 00:23:08 +0000629 bne 21b
630
631 /* Primary thread switches to guest partition. */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000632 cmpwi r6,0
Paul Mackerras6af27c82015-03-28 14:21:10 +1100633 bne 10f
Paul Mackerrasde56a942011-06-29 00:21:34 +0000634 lwz r7,KVM_LPID(r9)
Paul Mackerras7a840842016-11-16 22:25:20 +1100635BEGIN_FTR_SECTION
636 ld r6,KVM_SDR1(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000637 li r0,LPID_RSVD /* switch to reserved LPID */
638 mtspr SPRN_LPID,r0
639 ptesync
640 mtspr SPRN_SDR1,r6 /* switch to partition page table */
Paul Mackerras7a840842016-11-16 22:25:20 +1100641END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000642 mtspr SPRN_LPID,r7
643 isync
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000644
645 /* See if we need to flush the TLB */
646 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100647BEGIN_FTR_SECTION
648 /*
649 * On POWER9, individual threads can come in here, but the
650 * TLB is shared between the 4 threads in a core, hence
651 * invalidating on one thread invalidates for all.
652 * Thus we make all 4 threads use the same bit here.
653 */
654 clrrdi r6,r6,2
655END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000656 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
657 srdi r6,r6,6 /* doubleword number */
658 sldi r6,r6,3 /* address offset */
659 add r6,r6,r9
660 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100661 li r8,1
662 sld r8,r8,r7
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000663 ld r7,0(r6)
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100664 and. r7,r7,r8
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000665 beq 22f
Paul Mackerrasca252052014-01-08 21:25:22 +1100666 /* Flush the TLB of any entries for this LPID */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100667 lwz r0,KVM_TLB_SETS(r9)
668 mtctr r0
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000669 li r7,0x800 /* IS field = 0b10 */
670 ptesync
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100671 li r0,0 /* RS for P9 version of tlbiel */
672 bne cr7, 29f
67328: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000674 addi r7,r7,0x1000
675 bdnz 28b
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100676 b 30f
67729: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */
678 addi r7,r7,0x1000
679 bdnz 29b
68030: ptesync
68123: ldarx r7,0,r6 /* clear the bit after TLB flushed */
682 andc r7,r7,r8
683 stdcx. r7,0,r6
684 bne 23b
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000685
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000686 /* Add timebase offset onto timebase */
68722: ld r8,VCORE_TB_OFFSET(r5)
688 cmpdi r8,0
689 beq 37f
690 mftb r6 /* current host timebase */
691 add r8,r8,r6
692 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
693 mftb r7 /* check if lower 24 bits overflowed */
694 clrldi r6,r6,40
695 clrldi r7,r7,40
696 cmpld r7,r6
697 bge 37f
698 addis r8,r8,0x100 /* if so, increment upper 40 bits */
699 mtspr SPRN_TBU40,r8
700
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000701 /* Load guest PCR value to select appropriate compat mode */
70237: ld r7, VCORE_PCR(r5)
703 cmpdi r7, 0
704 beq 38f
705 mtspr SPRN_PCR, r7
70638:
Michael Neulingb005255e2014-01-08 21:25:21 +1100707
708BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000709 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +1100710 ld r8, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000711 ld r7, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +1100712 mtspr SPRN_DPDES, r8
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000713 mtspr SPRN_VTB, r7
Michael Neulingb005255e2014-01-08 21:25:21 +1100714END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
715
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530716 /* Mark the subcore state as inside guest */
717 bl kvmppc_subcore_enter_guest
718 nop
719 ld r5, HSTATE_KVM_VCORE(r13)
720 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000721 li r0,1
Paul Mackerras371fefd2011-06-29 00:23:08 +0000722 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000723
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100724 /* Do we have a guest vcpu to run? */
Paul Mackerras6af27c82015-03-28 14:21:10 +110072510: cmpdi r4, 0
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100726 beq kvmppc_primary_no_guest
727kvmppc_got_guest:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000728
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100729 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100730 lwz r5,VCPU_SLB_MAX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000731 cmpwi r5,0
732 beq 9f
733 mtctr r5
734 addi r6,r4,VCPU_SLB
7351: ld r8,VCPU_SLB_E(r6)
736 ld r9,VCPU_SLB_V(r6)
737 slbmte r9,r8
738 addi r6,r6,VCPU_SLB_SIZE
739 bdnz 1b
7409:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100741 /* Increment yield count if they have a VPA */
742 ld r3, VCPU_VPA(r4)
743 cmpdi r3, 0
744 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +0200745 li r6, LPPACA_YIELDCOUNT
746 LWZX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100747 addi r5, r5, 1
Alexander Graf0865a582014-06-11 10:36:17 +0200748 STWX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100749 li r6, 1
750 stb r6, VCPU_VPA_DIRTY(r4)
75125:
752
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100753 /* Save purr/spurr */
754 mfspr r5,SPRN_PURR
755 mfspr r6,SPRN_SPURR
756 std r5,HSTATE_PURR(r13)
757 std r6,HSTATE_SPURR(r13)
758 ld r7,VCPU_PURR(r4)
759 ld r8,VCPU_SPURR(r4)
760 mtspr SPRN_PURR,r7
761 mtspr SPRN_SPURR,r8
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100762
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100763 /* Save host values of some registers */
764BEGIN_FTR_SECTION
765 mfspr r5, SPRN_TIDR
766 mfspr r6, SPRN_PSSCR
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100767 mfspr r7, SPRN_PID
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +1000768 mfspr r8, SPRN_IAMR
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100769 std r5, STACK_SLOT_TID(r1)
770 std r6, STACK_SLOT_PSSCR(r1)
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100771 std r7, STACK_SLOT_PID(r1)
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +1000772 std r8, STACK_SLOT_IAMR(r1)
Paul Mackerras769377f2017-02-15 14:30:17 +1100773 mfspr r5, SPRN_HFSCR
774 std r5, STACK_SLOT_HFSCR(r1)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100775END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000776BEGIN_FTR_SECTION
777 mfspr r5, SPRN_CIABR
778 mfspr r6, SPRN_DAWR
779 mfspr r7, SPRN_DAWRX
780 std r5, STACK_SLOT_CIABR(r1)
781 std r6, STACK_SLOT_DAWR(r1)
782 std r7, STACK_SLOT_DAWRX(r1)
783END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100784
Michael Neulingeee7ff92014-01-08 21:25:19 +1100785BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000786 /* Set partition DABR */
787 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
Paul Mackerras8563bf52014-01-08 21:25:29 +1100788 lwz r5,VCPU_DABRX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000789 ld r6,VCPU_DABR(r4)
790 mtspr SPRN_DABRX,r5
791 mtspr SPRN_DABR,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000792 isync
Michael Neulingeee7ff92014-01-08 21:25:19 +1100793END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000794
Michael Neulinge4e38122014-03-25 10:47:02 +1100795#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
796BEGIN_FTR_SECTION
Paul Mackerrasf024ee02016-06-22 14:21:59 +1000797 bl kvmppc_restore_tm
798END_FTR_SECTION_IFSET(CPU_FTR_TM)
Michael Neulinge4e38122014-03-25 10:47:02 +1100799#endif
800
Paul Mackerrasde56a942011-06-29 00:21:34 +0000801 /* Load guest PMU registers */
802 /* R4 is live here (vcpu pointer) */
803 li r3, 1
804 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
805 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
806 isync
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000807BEGIN_FTR_SECTION
808 ld r3, VCPU_MMCR(r4)
809 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
810 cmpwi r5, MMCR0_PMAO
811 beql kvmppc_fix_pmao
812END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000813 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
814 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
815 lwz r6, VCPU_PMC + 8(r4)
816 lwz r7, VCPU_PMC + 12(r4)
817 lwz r8, VCPU_PMC + 16(r4)
818 lwz r9, VCPU_PMC + 20(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000819 mtspr SPRN_PMC1, r3
820 mtspr SPRN_PMC2, r5
821 mtspr SPRN_PMC3, r6
822 mtspr SPRN_PMC4, r7
823 mtspr SPRN_PMC5, r8
824 mtspr SPRN_PMC6, r9
Paul Mackerrasde56a942011-06-29 00:21:34 +0000825 ld r3, VCPU_MMCR(r4)
826 ld r5, VCPU_MMCR + 8(r4)
827 ld r6, VCPU_MMCR + 16(r4)
828 ld r7, VCPU_SIAR(r4)
829 ld r8, VCPU_SDAR(r4)
830 mtspr SPRN_MMCR1, r5
831 mtspr SPRN_MMCRA, r6
832 mtspr SPRN_SIAR, r7
833 mtspr SPRN_SDAR, r8
Michael Neulingb005255e2014-01-08 21:25:21 +1100834BEGIN_FTR_SECTION
835 ld r5, VCPU_MMCR + 24(r4)
836 ld r6, VCPU_SIER(r4)
Paul Mackerras83677f52016-11-16 22:33:27 +1100837 mtspr SPRN_MMCR2, r5
838 mtspr SPRN_SIER, r6
839BEGIN_FTR_SECTION_NESTED(96)
Michael Neulingb005255e2014-01-08 21:25:21 +1100840 lwz r7, VCPU_PMC + 24(r4)
841 lwz r8, VCPU_PMC + 28(r4)
842 ld r9, VCPU_MMCR + 32(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100843 mtspr SPRN_SPMC1, r7
844 mtspr SPRN_SPMC2, r8
845 mtspr SPRN_MMCRS, r9
Paul Mackerras83677f52016-11-16 22:33:27 +1100846END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
Michael Neulingb005255e2014-01-08 21:25:21 +1100847END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000848 mtspr SPRN_MMCR0, r3
849 isync
850
851 /* Load up FP, VMX and VSX registers */
852 bl kvmppc_load_fp
853
854 ld r14, VCPU_GPR(R14)(r4)
855 ld r15, VCPU_GPR(R15)(r4)
856 ld r16, VCPU_GPR(R16)(r4)
857 ld r17, VCPU_GPR(R17)(r4)
858 ld r18, VCPU_GPR(R18)(r4)
859 ld r19, VCPU_GPR(R19)(r4)
860 ld r20, VCPU_GPR(R20)(r4)
861 ld r21, VCPU_GPR(R21)(r4)
862 ld r22, VCPU_GPR(R22)(r4)
863 ld r23, VCPU_GPR(R23)(r4)
864 ld r24, VCPU_GPR(R24)(r4)
865 ld r25, VCPU_GPR(R25)(r4)
866 ld r26, VCPU_GPR(R26)(r4)
867 ld r27, VCPU_GPR(R27)(r4)
868 ld r28, VCPU_GPR(R28)(r4)
869 ld r29, VCPU_GPR(R29)(r4)
870 ld r30, VCPU_GPR(R30)(r4)
871 ld r31, VCPU_GPR(R31)(r4)
872
Paul Mackerrasde56a942011-06-29 00:21:34 +0000873 /* Switch DSCR to guest value */
874 ld r5, VCPU_DSCR(r4)
875 mtspr SPRN_DSCR, r5
Paul Mackerrasde56a942011-06-29 00:21:34 +0000876
Michael Neulingb005255e2014-01-08 21:25:21 +1100877BEGIN_FTR_SECTION
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100878 /* Skip next section on POWER7 */
Michael Neulingb005255e2014-01-08 21:25:21 +1100879 b 8f
880END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +1100881 /* Load up POWER8-specific registers */
882 ld r5, VCPU_IAMR(r4)
883 lwz r6, VCPU_PSPB(r4)
884 ld r7, VCPU_FSCR(r4)
885 mtspr SPRN_IAMR, r5
886 mtspr SPRN_PSPB, r6
887 mtspr SPRN_FSCR, r7
888 ld r5, VCPU_DAWR(r4)
889 ld r6, VCPU_DAWRX(r4)
890 ld r7, VCPU_CIABR(r4)
891 ld r8, VCPU_TAR(r4)
892 mtspr SPRN_DAWR, r5
893 mtspr SPRN_DAWRX, r6
894 mtspr SPRN_CIABR, r7
895 mtspr SPRN_TAR, r8
896 ld r5, VCPU_IC(r4)
Michael Neuling7b490412014-01-08 21:25:32 +1100897 ld r8, VCPU_EBBHR(r4)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000898 mtspr SPRN_IC, r5
Michael Neulingb005255e2014-01-08 21:25:21 +1100899 mtspr SPRN_EBBHR, r8
900 ld r5, VCPU_EBBRR(r4)
901 ld r6, VCPU_BESCR(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100902 lwz r7, VCPU_GUEST_PID(r4)
903 ld r8, VCPU_WORT(r4)
Paul Mackerras83677f52016-11-16 22:33:27 +1100904 mtspr SPRN_EBBRR, r5
905 mtspr SPRN_BESCR, r6
Michael Neulingb005255e2014-01-08 21:25:21 +1100906 mtspr SPRN_PID, r7
907 mtspr SPRN_WORT, r8
Paul Mackerras83677f52016-11-16 22:33:27 +1100908BEGIN_FTR_SECTION
Paul Mackerrasf11f6f72017-01-30 21:21:52 +1100909 PPC_INVALIDATE_ERAT
910END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
911BEGIN_FTR_SECTION
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100912 /* POWER8-only registers */
Paul Mackerras83677f52016-11-16 22:33:27 +1100913 ld r5, VCPU_TCSCR(r4)
914 ld r6, VCPU_ACOP(r4)
915 ld r7, VCPU_CSIGR(r4)
916 ld r8, VCPU_TACR(r4)
917 mtspr SPRN_TCSCR, r5
918 mtspr SPRN_ACOP, r6
919 mtspr SPRN_CSIGR, r7
920 mtspr SPRN_TACR, r8
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100921FTR_SECTION_ELSE
922 /* POWER9-only registers */
923 ld r5, VCPU_TID(r4)
924 ld r6, VCPU_PSSCR(r4)
925 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
Paul Mackerras769377f2017-02-15 14:30:17 +1100926 ld r7, VCPU_HFSCR(r4)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100927 mtspr SPRN_TIDR, r5
928 mtspr SPRN_PSSCR, r6
Paul Mackerras769377f2017-02-15 14:30:17 +1100929 mtspr SPRN_HFSCR, r7
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100930ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Michael Neulingb005255e2014-01-08 21:25:21 +11009318:
932
Paul Mackerrasde56a942011-06-29 00:21:34 +0000933 /*
934 * Set the decrementer to the guest decrementer.
935 */
936 ld r8,VCPU_DEC_EXPIRES(r4)
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +1100937 /* r8 is a host timebase value here, convert to guest TB */
938 ld r5,HSTATE_KVM_VCORE(r13)
939 ld r6,VCORE_TB_OFFSET(r5)
940 add r8,r8,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000941 mftb r7
942 subf r3,r7,r8
943 mtspr SPRN_DEC,r3
Paul Mackerras1bc3fe82017-05-22 16:55:16 +1000944 std r3,VCPU_DEC(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000945
946 ld r5, VCPU_SPRG0(r4)
947 ld r6, VCPU_SPRG1(r4)
948 ld r7, VCPU_SPRG2(r4)
949 ld r8, VCPU_SPRG3(r4)
950 mtspr SPRN_SPRG0, r5
951 mtspr SPRN_SPRG1, r6
952 mtspr SPRN_SPRG2, r7
953 mtspr SPRN_SPRG3, r8
954
Paul Mackerrasde56a942011-06-29 00:21:34 +0000955 /* Load up DAR and DSISR */
956 ld r5, VCPU_DAR(r4)
957 lwz r6, VCPU_DSISR(r4)
958 mtspr SPRN_DAR, r5
959 mtspr SPRN_DSISR, r6
960
Paul Mackerrasde56a942011-06-29 00:21:34 +0000961 /* Restore AMR and UAMOR, set AMOR to all 1s */
962 ld r5,VCPU_AMR(r4)
963 ld r6,VCPU_UAMOR(r4)
964 li r7,-1
965 mtspr SPRN_AMR,r5
966 mtspr SPRN_UAMOR,r6
967 mtspr SPRN_AMOR,r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000968
969 /* Restore state of CTRL run bit; assume 1 on entry */
970 lwz r5,VCPU_CTRL(r4)
971 andi. r5,r5,1
972 bne 4f
973 mfspr r6,SPRN_CTRLF
974 clrrdi r6,r6,1
975 mtspr SPRN_CTRLT,r6
9764:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100977 /* Secondary threads wait for primary to have done partition switch */
978 ld r5, HSTATE_KVM_VCORE(r13)
979 lbz r6, HSTATE_PTID(r13)
980 cmpwi r6, 0
981 beq 21f
982 lbz r0, VCORE_IN_GUEST(r5)
983 cmpwi r0, 0
984 bne 21f
985 HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +100098620: lwz r3, VCORE_ENTRY_EXIT(r5)
987 cmpwi r3, 0x100
988 bge no_switch_exit
989 lbz r0, VCORE_IN_GUEST(r5)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100990 cmpwi r0, 0
991 beq 20b
992 HMT_MEDIUM
99321:
994 /* Set LPCR. */
995 ld r8,VCORE_LPCR(r5)
996 mtspr SPRN_LPCR,r8
997 isync
998
999 /* Check if HDEC expires soon */
1000 mfspr r3, SPRN_HDEC
Paul Mackerras2f272462017-05-22 16:25:14 +10001001 EXTEND_HDEC(r3)
1002 cmpdi r3, 512 /* 1 microsecond */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001003 blt hdec_soon
1004
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001005#ifdef CONFIG_KVM_XICS
1006 /* We are entering the guest on that thread, push VCPU to XIVE */
1007 ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1008 cmpldi cr0, r10, r0
1009 beq no_xive
1010 ld r11, VCPU_XIVE_SAVED_STATE(r4)
1011 li r9, TM_QW1_OS
1012 stdcix r11,r9,r10
1013 eieio
1014 lwz r11, VCPU_XIVE_CAM_WORD(r4)
1015 li r9, TM_QW1_OS + TM_WORD2
1016 stwcix r11,r9,r10
1017 li r9, 1
1018 stw r9, VCPU_XIVE_PUSHED(r4)
1019no_xive:
1020#endif /* CONFIG_KVM_XICS */
1021
Suresh Warrier37f55d32016-08-19 15:35:46 +10001022deliver_guest_interrupt:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001023 ld r6, VCPU_CTR(r4)
Sam bobroffc63517c2015-05-27 09:56:57 +10001024 ld r7, VCPU_XER(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001025
1026 mtctr r6
1027 mtxer r7
1028
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001029kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
Paul Mackerras4619ac82013-04-17 20:31:41 +00001030 ld r10, VCPU_PC(r4)
1031 ld r11, VCPU_MSR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001032 ld r6, VCPU_SRR0(r4)
1033 ld r7, VCPU_SRR1(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001034 mtspr SPRN_SRR0, r6
1035 mtspr SPRN_SRR1, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001036
Paul Mackerras4619ac82013-04-17 20:31:41 +00001037 /* r11 = vcpu->arch.msr & ~MSR_HV */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001038 rldicl r11, r11, 63 - MSR_HV_LG, 1
1039 rotldi r11, r11, 1 + MSR_HV_LG
1040 ori r11, r11, MSR_ME
1041
Paul Mackerras19ccb762011-07-23 17:42:46 +10001042 /* Check if we can deliver an external or decrementer interrupt now */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001043 ld r0, VCPU_PENDING_EXC(r4)
1044 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
1045 cmpdi cr1, r0, 0
1046 andi. r8, r11, MSR_EE
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001047 mfspr r8, SPRN_LPCR
1048 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
1049 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
1050 mtspr SPRN_LPCR, r8
Paul Mackerras19ccb762011-07-23 17:42:46 +10001051 isync
Paul Mackerras19ccb762011-07-23 17:42:46 +10001052 beq 5f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001053 li r0, BOOK3S_INTERRUPT_EXTERNAL
1054 bne cr1, 12f
1055 mfspr r0, SPRN_DEC
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10001056BEGIN_FTR_SECTION
1057 /* On POWER9 check whether the guest has large decrementer enabled */
1058 andis. r8, r8, LPCR_LD@h
1059 bne 15f
1060END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1061 extsw r0, r0
106215: cmpdi r0, 0
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001063 li r0, BOOK3S_INTERRUPT_DECREMENTER
1064 bge 5f
1065
106612: mtspr SPRN_SRR0, r10
Paul Mackerras19ccb762011-07-23 17:42:46 +10001067 mr r10,r0
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001068 mtspr SPRN_SRR1, r11
Michael Neulinge4e38122014-03-25 10:47:02 +11001069 mr r9, r4
1070 bl kvmppc_msr_interrupt
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +110010715:
Paul Mackerras57900692017-05-16 16:41:20 +10001072BEGIN_FTR_SECTION
1073 b fast_guest_return
1074END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1075 /* On POWER9, check for pending doorbell requests */
1076 lbz r0, VCPU_DBELL_REQ(r4)
1077 cmpwi r0, 0
1078 beq fast_guest_return
1079 ld r5, HSTATE_KVM_VCORE(r13)
1080 /* Set DPDES register so the CPU will take a doorbell interrupt */
1081 li r0, 1
1082 mtspr SPRN_DPDES, r0
1083 std r0, VCORE_DPDES(r5)
1084 /* Make sure other cpus see vcore->dpdes set before dbell req clear */
1085 lwsync
1086 /* Clear the pending doorbell request */
1087 li r0, 0
1088 stb r0, VCPU_DBELL_REQ(r4)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001089
Liu Ping Fan27025a62013-11-19 14:12:48 +08001090/*
1091 * Required state:
1092 * R4 = vcpu
1093 * R10: value for HSRR0
1094 * R11: value for HSRR1
1095 * R13 = PACA
1096 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001097fast_guest_return:
Paul Mackerras4619ac82013-04-17 20:31:41 +00001098 li r0,0
1099 stb r0,VCPU_CEDED(r4) /* cancel cede */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001100 mtspr SPRN_HSRR0,r10
1101 mtspr SPRN_HSRR1,r11
1102
1103 /* Activate guest mode, so faults get handled by KVM */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001104 li r9, KVM_GUEST_MODE_GUEST_HV
Paul Mackerrasde56a942011-06-29 00:21:34 +00001105 stb r9, HSTATE_IN_GUEST(r13)
1106
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001107#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1108 /* Accumulate timing */
1109 addi r3, r4, VCPU_TB_GUEST
1110 bl kvmhv_accumulate_time
1111#endif
1112
Paul Mackerrasde56a942011-06-29 00:21:34 +00001113 /* Enter guest */
1114
Paul Mackerras0acb9112013-02-04 18:10:51 +00001115BEGIN_FTR_SECTION
1116 ld r5, VCPU_CFAR(r4)
1117 mtspr SPRN_CFAR, r5
1118END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001119BEGIN_FTR_SECTION
1120 ld r0, VCPU_PPR(r4)
1121END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001122
Paul Mackerrasde56a942011-06-29 00:21:34 +00001123 ld r5, VCPU_LR(r4)
1124 lwz r6, VCPU_CR(r4)
1125 mtlr r5
1126 mtcr r6
1127
Michael Neulingc75df6f2012-06-25 13:33:10 +00001128 ld r1, VCPU_GPR(R1)(r4)
1129 ld r2, VCPU_GPR(R2)(r4)
1130 ld r3, VCPU_GPR(R3)(r4)
1131 ld r5, VCPU_GPR(R5)(r4)
1132 ld r6, VCPU_GPR(R6)(r4)
1133 ld r7, VCPU_GPR(R7)(r4)
1134 ld r8, VCPU_GPR(R8)(r4)
1135 ld r9, VCPU_GPR(R9)(r4)
1136 ld r10, VCPU_GPR(R10)(r4)
1137 ld r11, VCPU_GPR(R11)(r4)
1138 ld r12, VCPU_GPR(R12)(r4)
1139 ld r13, VCPU_GPR(R13)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001140
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001141BEGIN_FTR_SECTION
1142 mtspr SPRN_PPR, r0
1143END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1144 ld r0, VCPU_GPR(R0)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001145 ld r4, VCPU_GPR(R4)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001146
1147 hrfid
1148 b .
1149
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001150secondary_too_late:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001151 li r12, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001152 cmpdi r4, 0
1153 beq 11f
Paul Mackerras6af27c82015-03-28 14:21:10 +11001154 stw r12, VCPU_TRAP(r4)
1155#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001156 addi r3, r4, VCPU_TB_RMEXIT
1157 bl kvmhv_accumulate_time
Paul Mackerras6af27c82015-03-28 14:21:10 +11001158#endif
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100115911: b kvmhv_switch_to_host
1160
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001161no_switch_exit:
1162 HMT_MEDIUM
1163 li r12, 0
1164 b 12f
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001165hdec_soon:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001166 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000116712: stw r12, VCPU_TRAP(r4)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001168 mr r9, r4
1169#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001170 addi r3, r4, VCPU_TB_RMEXIT
1171 bl kvmhv_accumulate_time
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001172#endif
Paul Mackerras6af27c82015-03-28 14:21:10 +11001173 b guest_exit_cont
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001174
Paul Mackerrasde56a942011-06-29 00:21:34 +00001175/******************************************************************************
1176 * *
1177 * Exit code *
1178 * *
1179 *****************************************************************************/
1180
1181/*
1182 * We come here from the first-level interrupt handlers.
1183 */
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301184 .globl kvmppc_interrupt_hv
1185kvmppc_interrupt_hv:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001186 /*
1187 * Register contents:
Nicholas Piggind3918e72016-12-22 04:29:25 +10001188 * R12 = (guest CR << 32) | interrupt vector
Paul Mackerrasde56a942011-06-29 00:21:34 +00001189 * R13 = PACA
Nicholas Piggind3918e72016-12-22 04:29:25 +10001190 * guest R12 saved in shadow VCPU SCRATCH0
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001191 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
Paul Mackerrasde56a942011-06-29 00:21:34 +00001192 * guest R13 saved in SPRN_SCRATCH0
1193 */
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001194 std r9, HSTATE_SCRATCH2(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +10001195 lbz r9, HSTATE_IN_GUEST(r13)
1196 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1197 beq kvmppc_bad_host_intr
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301198#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1199 cmpwi r9, KVM_GUEST_MODE_GUEST
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001200 ld r9, HSTATE_SCRATCH2(r13)
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301201 beq kvmppc_interrupt_pr
1202#endif
Paul Mackerras44a3add2013-10-04 21:45:04 +10001203 /* We're now back in the host but in guest MMU context */
1204 li r9, KVM_GUEST_MODE_HOST_HV
1205 stb r9, HSTATE_IN_GUEST(r13)
1206
Paul Mackerrasde56a942011-06-29 00:21:34 +00001207 ld r9, HSTATE_KVM_VCPU(r13)
1208
1209 /* Save registers */
1210
Michael Neulingc75df6f2012-06-25 13:33:10 +00001211 std r0, VCPU_GPR(R0)(r9)
1212 std r1, VCPU_GPR(R1)(r9)
1213 std r2, VCPU_GPR(R2)(r9)
1214 std r3, VCPU_GPR(R3)(r9)
1215 std r4, VCPU_GPR(R4)(r9)
1216 std r5, VCPU_GPR(R5)(r9)
1217 std r6, VCPU_GPR(R6)(r9)
1218 std r7, VCPU_GPR(R7)(r9)
1219 std r8, VCPU_GPR(R8)(r9)
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001220 ld r0, HSTATE_SCRATCH2(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001221 std r0, VCPU_GPR(R9)(r9)
1222 std r10, VCPU_GPR(R10)(r9)
1223 std r11, VCPU_GPR(R11)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001224 ld r3, HSTATE_SCRATCH0(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001225 std r3, VCPU_GPR(R12)(r9)
Nicholas Piggind3918e72016-12-22 04:29:25 +10001226 /* CR is in the high half of r12 */
1227 srdi r4, r12, 32
Paul Mackerrasde56a942011-06-29 00:21:34 +00001228 stw r4, VCPU_CR(r9)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001229BEGIN_FTR_SECTION
1230 ld r3, HSTATE_CFAR(r13)
1231 std r3, VCPU_CFAR(r9)
1232END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001233BEGIN_FTR_SECTION
1234 ld r4, HSTATE_PPR(r13)
1235 std r4, VCPU_PPR(r9)
1236END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001237
1238 /* Restore R1/R2 so we can handle faults */
1239 ld r1, HSTATE_HOST_R1(r13)
1240 ld r2, PACATOC(r13)
1241
1242 mfspr r10, SPRN_SRR0
1243 mfspr r11, SPRN_SRR1
1244 std r10, VCPU_SRR0(r9)
1245 std r11, VCPU_SRR1(r9)
Nicholas Piggind3918e72016-12-22 04:29:25 +10001246 /* trap is in the low half of r12, clear CR from the high half */
1247 clrldi r12, r12, 32
Paul Mackerrasde56a942011-06-29 00:21:34 +00001248 andi. r0, r12, 2 /* need to read HSRR0/1? */
1249 beq 1f
1250 mfspr r10, SPRN_HSRR0
1251 mfspr r11, SPRN_HSRR1
1252 clrrdi r12, r12, 2
12531: std r10, VCPU_PC(r9)
1254 std r11, VCPU_MSR(r9)
1255
1256 GET_SCRATCH0(r3)
1257 mflr r4
Michael Neulingc75df6f2012-06-25 13:33:10 +00001258 std r3, VCPU_GPR(R13)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001259 std r4, VCPU_LR(r9)
1260
Paul Mackerrasde56a942011-06-29 00:21:34 +00001261 stw r12,VCPU_TRAP(r9)
1262
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001263#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1264 addi r3, r9, VCPU_TB_RMINTR
1265 mr r4, r9
1266 bl kvmhv_accumulate_time
1267 ld r5, VCPU_GPR(R5)(r9)
1268 ld r6, VCPU_GPR(R6)(r9)
1269 ld r7, VCPU_GPR(R7)(r9)
1270 ld r8, VCPU_GPR(R8)(r9)
1271#endif
1272
Paul Mackerras4a157d62014-12-03 13:30:39 +11001273 /* Save HEIR (HV emulation assist reg) in emul_inst
Paul Mackerras697d3892011-12-12 12:36:37 +00001274 if this is an HEI (HV emulation interrupt, e40) */
1275 li r3,KVM_INST_FETCH_FAILED
Paul Mackerras2bf27602015-03-20 20:39:40 +11001276 stw r3,VCPU_LAST_INST(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001277 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1278 bne 11f
1279 mfspr r3,SPRN_HEIR
Paul Mackerras4a157d62014-12-03 13:30:39 +1100128011: stw r3,VCPU_HEIR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001281
1282 /* these are volatile across C function calls */
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001283#ifdef CONFIG_RELOCATABLE
1284 ld r3, HSTATE_SCRATCH1(r13)
1285 mtctr r3
1286#else
Paul Mackerras697d3892011-12-12 12:36:37 +00001287 mfctr r3
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001288#endif
Paul Mackerras697d3892011-12-12 12:36:37 +00001289 mfxer r4
1290 std r3, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001291 std r4, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001292
Paul Mackerras697d3892011-12-12 12:36:37 +00001293 /* If this is a page table miss then see if it's theirs or ours */
1294 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1295 beq kvmppc_hdsi
Paul Mackerras342d3db2011-12-12 12:38:05 +00001296 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1297 beq kvmppc_hisi
Paul Mackerras697d3892011-12-12 12:36:37 +00001298
Paul Mackerrasde56a942011-06-29 00:21:34 +00001299 /* See if this is a leftover HDEC interrupt */
1300 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1301 bne 2f
1302 mfspr r3,SPRN_HDEC
1303 cmpwi r3,0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001304 mr r4,r9
1305 bge fast_guest_return
Paul Mackerrasde56a942011-06-29 00:21:34 +000013062:
Paul Mackerras697d3892011-12-12 12:36:37 +00001307 /* See if this is an hcall we can handle in real mode */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001308 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1309 beq hcall_try_real_mode
Paul Mackerrasde56a942011-06-29 00:21:34 +00001310
Paul Mackerras66feed62015-03-28 14:21:12 +11001311 /* Hypervisor doorbell - exit only if host IPI flag set */
1312 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1313 bne 3f
1314 lbz r0, HSTATE_HOST_IPI(r13)
Gautham R. Shenoy06554d92015-08-07 17:41:20 +05301315 cmpwi r0, 0
Paul Mackerras66feed62015-03-28 14:21:12 +11001316 beq 4f
1317 b guest_exit_cont
13183:
Paul Mackerras769377f2017-02-15 14:30:17 +11001319 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1320 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1321 bne 14f
1322 mfspr r3, SPRN_HFSCR
1323 std r3, VCPU_HFSCR(r9)
1324 b guest_exit_cont
132514:
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001326 /* External interrupt ? */
1327 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001328 bne+ guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001329
1330 /* External interrupt, first check for host_ipi. If this is
1331 * set, we know the host wants us out so let's do it now
1332 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001333 bl kvmppc_read_intr
Suresh Warrier37f55d32016-08-19 15:35:46 +10001334
1335 /*
1336 * Restore the active volatile registers after returning from
1337 * a C function.
1338 */
1339 ld r9, HSTATE_KVM_VCPU(r13)
1340 li r12, BOOK3S_INTERRUPT_EXTERNAL
1341
1342 /*
1343 * kvmppc_read_intr return codes:
1344 *
1345 * Exit to host (r3 > 0)
1346 * 1 An interrupt is pending that needs to be handled by the host
1347 * Exit guest and return to host by branching to guest_exit_cont
1348 *
Suresh Warrierf7af5202016-08-19 15:35:52 +10001349 * 2 Passthrough that needs completion in the host
1350 * Exit guest and return to host by branching to guest_exit_cont
1351 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1352 * to indicate to the host to complete handling the interrupt
1353 *
Suresh Warrier37f55d32016-08-19 15:35:46 +10001354 * Before returning to guest, we check if any CPU is heading out
1355 * to the host and if so, we head out also. If no CPUs are heading
1356 * check return values <= 0.
1357 *
1358 * Return to guest (r3 <= 0)
1359 * 0 No external interrupt is pending
1360 * -1 A guest wakeup IPI (which has now been cleared)
1361 * In either case, we return to guest to deliver any pending
1362 * guest interrupts.
Suresh Warriere3c13e52016-08-19 15:35:51 +10001363 *
1364 * -2 A PCI passthrough external interrupt was handled
1365 * (interrupt was delivered directly to guest)
1366 * Return to guest to deliver any pending guest interrupts.
Suresh Warrier37f55d32016-08-19 15:35:46 +10001367 */
1368
Suresh Warrierf7af5202016-08-19 15:35:52 +10001369 cmpdi r3, 1
1370 ble 1f
1371
1372 /* Return code = 2 */
1373 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1374 stw r12, VCPU_TRAP(r9)
1375 b guest_exit_cont
1376
13771: /* Return code <= 1 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001378 cmpdi r3, 0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001379 bgt guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001380
Suresh Warrier37f55d32016-08-19 15:35:46 +10001381 /* Return code <= 0 */
Paul Mackerras66feed62015-03-28 14:21:12 +110013824: ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras4619ac82013-04-17 20:31:41 +00001383 lwz r0, VCORE_ENTRY_EXIT(r5)
1384 cmpwi r0, 0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001385 mr r4, r9
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001386 blt deliver_guest_interrupt
Paul Mackerrasde56a942011-06-29 00:21:34 +00001387
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001388guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001389#ifdef CONFIG_KVM_XICS
1390 /* We are exiting, pull the VP from the XIVE */
1391 lwz r0, VCPU_XIVE_PUSHED(r9)
1392 cmpwi cr0, r0, 0
1393 beq 1f
1394 li r7, TM_SPC_PULL_OS_CTX
1395 li r6, TM_QW1_OS
1396 mfmsr r0
1397 andi. r0, r0, MSR_IR /* in real mode? */
1398 beq 2f
1399 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1400 cmpldi cr0, r10, 0
1401 beq 1f
1402 /* First load to pull the context, we ignore the value */
1403 lwzx r11, r7, r10
1404 eieio
1405 /* Second load to recover the context state (Words 0 and 1) */
1406 ldx r11, r6, r10
1407 b 3f
14082: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1409 cmpldi cr0, r10, 0
1410 beq 1f
1411 /* First load to pull the context, we ignore the value */
1412 lwzcix r11, r7, r10
1413 eieio
1414 /* Second load to recover the context state (Words 0 and 1) */
1415 ldcix r11, r6, r10
14163: std r11, VCPU_XIVE_SAVED_STATE(r9)
1417 /* Fixup some of the state for the next load */
1418 li r10, 0
1419 li r0, 0xff
1420 stw r10, VCPU_XIVE_PUSHED(r9)
1421 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1422 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
14231:
1424#endif /* CONFIG_KVM_XICS */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001425 /* Save more register state */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001426 mfdar r6
1427 mfdsisr r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001428 std r6, VCPU_DAR(r9)
1429 stw r7, VCPU_DSISR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001430 /* don't overwrite fault_dar/fault_dsisr if HDSI */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001431 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
Paul Mackerras6af27c82015-03-28 14:21:10 +11001432 beq mc_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001433 std r6, VCPU_FAULT_DAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001434 stw r7, VCPU_FAULT_DSISR(r9)
1435
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001436 /* See if it is a machine check */
1437 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1438 beq machine_check_realmode
1439mc_cont:
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001440#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1441 addi r3, r9, VCPU_TB_RMEXIT
1442 mr r4, r9
1443 bl kvmhv_accumulate_time
1444#endif
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001445
Gautham R. Shenoy7e022e72015-05-21 13:57:04 +05301446 mr r3, r12
Paul Mackerras6af27c82015-03-28 14:21:10 +11001447 /* Increment exit count, poke other threads to exit */
1448 bl kvmhv_commence_exit
Paul Mackerraseddb60f2015-03-28 14:21:11 +11001449 nop
1450 ld r9, HSTATE_KVM_VCPU(r13)
1451 lwz r12, VCPU_TRAP(r9)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001452
Paul Mackerrasec257162015-06-24 21:18:03 +10001453 /* Stop others sending VCPU interrupts to this physical CPU */
1454 li r0, -1
1455 stw r0, VCPU_CPU(r9)
1456 stw r0, VCPU_THREAD_CPU(r9)
1457
Paul Mackerrasde56a942011-06-29 00:21:34 +00001458 /* Save guest CTRL register, set runlatch to 1 */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001459 mfspr r6,SPRN_CTRLF
Paul Mackerrasde56a942011-06-29 00:21:34 +00001460 stw r6,VCPU_CTRL(r9)
1461 andi. r0,r6,1
1462 bne 4f
1463 ori r6,r6,1
1464 mtspr SPRN_CTRLT,r6
14654:
1466 /* Read the guest SLB and save it away */
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001467 ld r5, VCPU_KVM(r9)
1468 lbz r0, KVM_RADIX(r5)
1469 cmpwi r0, 0
1470 li r5, 0
1471 bne 3f /* for radix, save 0 entries */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001472 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1473 mtctr r0
1474 li r6,0
1475 addi r7,r9,VCPU_SLB
Paul Mackerrasde56a942011-06-29 00:21:34 +000014761: slbmfee r8,r6
1477 andis. r0,r8,SLB_ESID_V@h
1478 beq 2f
1479 add r8,r8,r6 /* put index in */
1480 slbmfev r3,r6
1481 std r8,VCPU_SLB_E(r7)
1482 std r3,VCPU_SLB_V(r7)
1483 addi r7,r7,VCPU_SLB_SIZE
1484 addi r5,r5,1
14852: addi r6,r6,1
1486 bdnz 1b
Paul Mackerrasf4c51f82017-01-30 21:21:45 +110014873: stw r5,VCPU_SLB_MAX(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001488
1489 /*
1490 * Save the guest PURR/SPURR
1491 */
1492 mfspr r5,SPRN_PURR
1493 mfspr r6,SPRN_SPURR
1494 ld r7,VCPU_PURR(r9)
1495 ld r8,VCPU_SPURR(r9)
1496 std r5,VCPU_PURR(r9)
1497 std r6,VCPU_SPURR(r9)
1498 subf r5,r7,r5
1499 subf r6,r8,r6
1500
1501 /*
1502 * Restore host PURR/SPURR and add guest times
1503 * so that the time in the guest gets accounted.
1504 */
1505 ld r3,HSTATE_PURR(r13)
1506 ld r4,HSTATE_SPURR(r13)
1507 add r3,r3,r5
1508 add r4,r4,r6
1509 mtspr SPRN_PURR,r3
1510 mtspr SPRN_SPURR,r4
1511
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001512 /* Save DEC */
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10001513 ld r3, HSTATE_KVM_VCORE(r13)
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001514 mfspr r5,SPRN_DEC
1515 mftb r6
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10001516 /* On P9, if the guest has large decr enabled, don't sign extend */
1517BEGIN_FTR_SECTION
1518 ld r4, VCORE_LPCR(r3)
1519 andis. r4, r4, LPCR_LD@h
1520 bne 16f
1521END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001522 extsw r5,r5
Paul Mackerras1bc3fe82017-05-22 16:55:16 +1000152316: add r5,r5,r6
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001524 /* r5 is a guest timebase value here, convert to host TB */
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001525 ld r4,VCORE_TB_OFFSET(r3)
1526 subf r5,r4,r5
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001527 std r5,VCPU_DEC_EXPIRES(r9)
1528
Michael Neulingb005255e2014-01-08 21:25:21 +11001529BEGIN_FTR_SECTION
1530 b 8f
1531END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +11001532 /* Save POWER8-specific registers */
1533 mfspr r5, SPRN_IAMR
1534 mfspr r6, SPRN_PSPB
1535 mfspr r7, SPRN_FSCR
1536 std r5, VCPU_IAMR(r9)
1537 stw r6, VCPU_PSPB(r9)
1538 std r7, VCPU_FSCR(r9)
1539 mfspr r5, SPRN_IC
Michael Neulingb005255e2014-01-08 21:25:21 +11001540 mfspr r7, SPRN_TAR
1541 std r5, VCPU_IC(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001542 std r7, VCPU_TAR(r9)
Michael Neuling7b490412014-01-08 21:25:32 +11001543 mfspr r8, SPRN_EBBHR
Michael Neulingb005255e2014-01-08 21:25:21 +11001544 std r8, VCPU_EBBHR(r9)
1545 mfspr r5, SPRN_EBBRR
1546 mfspr r6, SPRN_BESCR
Michael Neulingb005255e2014-01-08 21:25:21 +11001547 mfspr r7, SPRN_PID
1548 mfspr r8, SPRN_WORT
Paul Mackerras83677f52016-11-16 22:33:27 +11001549 std r5, VCPU_EBBRR(r9)
1550 std r6, VCPU_BESCR(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001551 stw r7, VCPU_GUEST_PID(r9)
1552 std r8, VCPU_WORT(r9)
Paul Mackerras83677f52016-11-16 22:33:27 +11001553BEGIN_FTR_SECTION
1554 mfspr r5, SPRN_TCSCR
1555 mfspr r6, SPRN_ACOP
1556 mfspr r7, SPRN_CSIGR
1557 mfspr r8, SPRN_TACR
1558 std r5, VCPU_TCSCR(r9)
1559 std r6, VCPU_ACOP(r9)
1560 std r7, VCPU_CSIGR(r9)
1561 std r8, VCPU_TACR(r9)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001562FTR_SECTION_ELSE
1563 mfspr r5, SPRN_TIDR
1564 mfspr r6, SPRN_PSSCR
1565 std r5, VCPU_TID(r9)
1566 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1567 rotldi r6, r6, 60
1568 std r6, VCPU_PSSCR(r9)
Paul Mackerras769377f2017-02-15 14:30:17 +11001569 /* Restore host HFSCR value */
1570 ld r7, STACK_SLOT_HFSCR(r1)
1571 mtspr SPRN_HFSCR, r7
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001572ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasccec4452016-03-05 19:34:39 +11001573 /*
1574 * Restore various registers to 0, where non-zero values
1575 * set by the guest could disrupt the host.
1576 */
1577 li r0, 0
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001578 mtspr SPRN_PSPB, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001579 mtspr SPRN_WORT, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001580BEGIN_FTR_SECTION
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001581 mtspr SPRN_IAMR, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001582 mtspr SPRN_TCSCR, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001583 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1584 li r0, 1
1585 sldi r0, r0, 31
1586 mtspr SPRN_MMCRS, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001587END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Michael Neulingb005255e2014-01-08 21:25:21 +110015888:
1589
Paul Mackerrasde56a942011-06-29 00:21:34 +00001590 /* Save and reset AMR and UAMOR before turning on the MMU */
1591 mfspr r5,SPRN_AMR
1592 mfspr r6,SPRN_UAMOR
1593 std r5,VCPU_AMR(r9)
1594 std r6,VCPU_UAMOR(r9)
1595 li r6,0
1596 mtspr SPRN_AMR,r6
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001597 mtspr SPRN_UAMOR, r6
Paul Mackerrasde56a942011-06-29 00:21:34 +00001598
Paul Mackerrasde56a942011-06-29 00:21:34 +00001599 /* Switch DSCR back to host value */
1600 mfspr r8, SPRN_DSCR
1601 ld r7, HSTATE_DSCR(r13)
Paul Mackerrascfc86022013-09-21 09:53:28 +10001602 std r8, VCPU_DSCR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001603 mtspr SPRN_DSCR, r7
1604
1605 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001606 std r14, VCPU_GPR(R14)(r9)
1607 std r15, VCPU_GPR(R15)(r9)
1608 std r16, VCPU_GPR(R16)(r9)
1609 std r17, VCPU_GPR(R17)(r9)
1610 std r18, VCPU_GPR(R18)(r9)
1611 std r19, VCPU_GPR(R19)(r9)
1612 std r20, VCPU_GPR(R20)(r9)
1613 std r21, VCPU_GPR(R21)(r9)
1614 std r22, VCPU_GPR(R22)(r9)
1615 std r23, VCPU_GPR(R23)(r9)
1616 std r24, VCPU_GPR(R24)(r9)
1617 std r25, VCPU_GPR(R25)(r9)
1618 std r26, VCPU_GPR(R26)(r9)
1619 std r27, VCPU_GPR(R27)(r9)
1620 std r28, VCPU_GPR(R28)(r9)
1621 std r29, VCPU_GPR(R29)(r9)
1622 std r30, VCPU_GPR(R30)(r9)
1623 std r31, VCPU_GPR(R31)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001624
1625 /* Save SPRGs */
1626 mfspr r3, SPRN_SPRG0
1627 mfspr r4, SPRN_SPRG1
1628 mfspr r5, SPRN_SPRG2
1629 mfspr r6, SPRN_SPRG3
1630 std r3, VCPU_SPRG0(r9)
1631 std r4, VCPU_SPRG1(r9)
1632 std r5, VCPU_SPRG2(r9)
1633 std r6, VCPU_SPRG3(r9)
1634
Paul Mackerras89436332012-03-02 01:38:23 +00001635 /* save FP state */
1636 mr r3, r9
Paul Mackerras595e4f72013-10-15 20:43:04 +11001637 bl kvmppc_save_fp
Paul Mackerras89436332012-03-02 01:38:23 +00001638
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001639#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1640BEGIN_FTR_SECTION
Paul Mackerrasf024ee02016-06-22 14:21:59 +10001641 bl kvmppc_save_tm
1642END_FTR_SECTION_IFSET(CPU_FTR_TM)
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001643#endif
1644
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001645 /* Increment yield count if they have a VPA */
1646 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1647 cmpdi r8, 0
1648 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +02001649 li r4, LPPACA_YIELDCOUNT
1650 LWZX_BE r3, r8, r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001651 addi r3, r3, 1
Alexander Graf0865a582014-06-11 10:36:17 +02001652 STWX_BE r3, r8, r4
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001653 li r3, 1
1654 stb r3, VCPU_VPA_DIRTY(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000165525:
1656 /* Save PMU registers if requested */
1657 /* r8 and cr0.eq are live here */
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001658BEGIN_FTR_SECTION
1659 /*
1660 * POWER8 seems to have a hardware bug where setting
1661 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1662 * when some counters are already negative doesn't seem
1663 * to cause a performance monitor alert (and hence interrupt).
1664 * The effect of this is that when saving the PMU state,
1665 * if there is no PMU alert pending when we read MMCR0
1666 * before freezing the counters, but one becomes pending
1667 * before we read the counters, we lose it.
1668 * To work around this, we need a way to freeze the counters
1669 * before reading MMCR0. Normally, freezing the counters
1670 * is done by writing MMCR0 (to set MMCR0[FC]) which
1671 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1672 * we can also freeze the counters using MMCR2, by writing
1673 * 1s to all the counter freeze condition bits (there are
1674 * 9 bits each for 6 counters).
1675 */
1676 li r3, -1 /* set all freeze bits */
1677 clrrdi r3, r3, 10
1678 mfspr r10, SPRN_MMCR2
1679 mtspr SPRN_MMCR2, r3
1680 isync
1681END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001682 li r3, 1
1683 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1684 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1685 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
Paul Mackerras89436332012-03-02 01:38:23 +00001686 mfspr r6, SPRN_MMCRA
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001687 /* Clear MMCRA in order to disable SDAR updates */
Paul Mackerras89436332012-03-02 01:38:23 +00001688 li r7, 0
1689 mtspr SPRN_MMCRA, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001690 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001691 beq 21f /* if no VPA, save PMU stuff anyway */
1692 lbz r7, LPPACA_PMCINUSE(r8)
1693 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1694 bne 21f
1695 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1696 b 22f
169721: mfspr r5, SPRN_MMCR1
Paul Mackerras14941782013-09-06 13:11:18 +10001698 mfspr r7, SPRN_SIAR
1699 mfspr r8, SPRN_SDAR
Paul Mackerrasde56a942011-06-29 00:21:34 +00001700 std r4, VCPU_MMCR(r9)
1701 std r5, VCPU_MMCR + 8(r9)
1702 std r6, VCPU_MMCR + 16(r9)
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001703BEGIN_FTR_SECTION
1704 std r10, VCPU_MMCR + 24(r9)
1705END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras14941782013-09-06 13:11:18 +10001706 std r7, VCPU_SIAR(r9)
1707 std r8, VCPU_SDAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001708 mfspr r3, SPRN_PMC1
1709 mfspr r4, SPRN_PMC2
1710 mfspr r5, SPRN_PMC3
1711 mfspr r6, SPRN_PMC4
1712 mfspr r7, SPRN_PMC5
1713 mfspr r8, SPRN_PMC6
1714 stw r3, VCPU_PMC(r9)
1715 stw r4, VCPU_PMC + 4(r9)
1716 stw r5, VCPU_PMC + 8(r9)
1717 stw r6, VCPU_PMC + 12(r9)
1718 stw r7, VCPU_PMC + 16(r9)
1719 stw r8, VCPU_PMC + 20(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001720BEGIN_FTR_SECTION
Michael Neulingb005255e2014-01-08 21:25:21 +11001721 mfspr r5, SPRN_SIER
Paul Mackerras83677f52016-11-16 22:33:27 +11001722 std r5, VCPU_SIER(r9)
1723BEGIN_FTR_SECTION_NESTED(96)
Michael Neulingb005255e2014-01-08 21:25:21 +11001724 mfspr r6, SPRN_SPMC1
1725 mfspr r7, SPRN_SPMC2
1726 mfspr r8, SPRN_MMCRS
Michael Neulingb005255e2014-01-08 21:25:21 +11001727 stw r6, VCPU_PMC + 24(r9)
1728 stw r7, VCPU_PMC + 28(r9)
1729 std r8, VCPU_MMCR + 32(r9)
1730 lis r4, 0x8000
1731 mtspr SPRN_MMCRS, r4
Paul Mackerras83677f52016-11-16 22:33:27 +11001732END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
Michael Neulingb005255e2014-01-08 21:25:21 +11001733END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000173422:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001735 /* Clear out SLB */
1736 li r5,0
1737 slbmte r5,r5
1738 slbia
1739 ptesync
1740
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001741 /* Restore host values of some registers */
1742BEGIN_FTR_SECTION
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10001743 ld r5, STACK_SLOT_CIABR(r1)
1744 ld r6, STACK_SLOT_DAWR(r1)
1745 ld r7, STACK_SLOT_DAWRX(r1)
1746 mtspr SPRN_CIABR, r5
1747 mtspr SPRN_DAWR, r6
1748 mtspr SPRN_DAWRX, r7
1749END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1750BEGIN_FTR_SECTION
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001751 ld r5, STACK_SLOT_TID(r1)
1752 ld r6, STACK_SLOT_PSSCR(r1)
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001753 ld r7, STACK_SLOT_PID(r1)
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001754 ld r8, STACK_SLOT_IAMR(r1)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001755 mtspr SPRN_TIDR, r5
1756 mtspr SPRN_PSSCR, r6
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001757 mtspr SPRN_PID, r7
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001758 mtspr SPRN_IAMR, r8
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001759END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerrasf11f6f72017-01-30 21:21:52 +11001760BEGIN_FTR_SECTION
1761 PPC_INVALIDATE_ERAT
1762END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001763
Paul Mackerrasde56a942011-06-29 00:21:34 +00001764 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001765 * POWER7/POWER8 guest -> host partition switch code.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001766 * We don't have to lock against tlbies but we do
1767 * have to coordinate the hardware threads.
1768 */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001769kvmhv_switch_to_host:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001770 /* Secondary threads wait for primary to do partition switch */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001771 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001772 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1773 lbz r3,HSTATE_PTID(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001774 cmpwi r3,0
1775 beq 15f
1776 HMT_LOW
177713: lbz r3,VCORE_IN_GUEST(r5)
1778 cmpwi r3,0
1779 bne 13b
1780 HMT_MEDIUM
1781 b 16f
1782
1783 /* Primary thread waits for all the secondaries to exit guest */
178415: lwz r3,VCORE_ENTRY_EXIT(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001785 rlwinm r0,r3,32-8,0xff
Paul Mackerrasde56a942011-06-29 00:21:34 +00001786 clrldi r3,r3,56
1787 cmpw r3,r0
1788 bne 15b
1789 isync
1790
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001791 /* Did we actually switch to the guest at all? */
1792 lbz r6, VCORE_IN_GUEST(r5)
1793 cmpwi r6, 0
1794 beq 19f
1795
Paul Mackerrasde56a942011-06-29 00:21:34 +00001796 /* Primary thread switches back to host partition */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001797 lwz r7,KVM_HOST_LPID(r4)
Paul Mackerras7a840842016-11-16 22:25:20 +11001798BEGIN_FTR_SECTION
1799 ld r6,KVM_HOST_SDR1(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001800 li r8,LPID_RSVD /* switch to reserved LPID */
1801 mtspr SPRN_LPID,r8
1802 ptesync
Paul Mackerras7a840842016-11-16 22:25:20 +11001803 mtspr SPRN_SDR1,r6 /* switch to host page table */
1804END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001805 mtspr SPRN_LPID,r7
1806 isync
1807
Michael Neulingb005255e2014-01-08 21:25:21 +11001808BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001809 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +11001810 mfspr r7, SPRN_DPDES
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001811 mfspr r8, SPRN_VTB
Michael Neulingb005255e2014-01-08 21:25:21 +11001812 std r7, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001813 std r8, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +11001814 /* clear DPDES so we don't get guest doorbells in the host */
1815 li r8, 0
1816 mtspr SPRN_DPDES, r8
1817END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1818
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301819 /* If HMI, call kvmppc_realmode_hmi_handler() */
1820 cmpwi r12, BOOK3S_INTERRUPT_HMI
1821 bne 27f
1822 bl kvmppc_realmode_hmi_handler
1823 nop
1824 li r12, BOOK3S_INTERRUPT_HMI
1825 /*
1826 * At this point kvmppc_realmode_hmi_handler would have resync-ed
1827 * the TB. Hence it is not required to subtract guest timebase
1828 * offset from timebase. So, skip it.
1829 *
1830 * Also, do not call kvmppc_subcore_exit_guest() because it has
1831 * been invoked as part of kvmppc_realmode_hmi_handler().
1832 */
1833 b 30f
1834
183527:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001836 /* Subtract timebase offset from timebase */
1837 ld r8,VCORE_TB_OFFSET(r5)
1838 cmpdi r8,0
1839 beq 17f
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001840 mftb r6 /* current guest timebase */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001841 subf r8,r8,r6
1842 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1843 mftb r7 /* check if lower 24 bits overflowed */
1844 clrldi r6,r6,40
1845 clrldi r7,r7,40
1846 cmpld r7,r6
1847 bge 17f
1848 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1849 mtspr SPRN_TBU40,r8
1850
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530185117: bl kvmppc_subcore_exit_guest
1852 nop
185330: ld r5,HSTATE_KVM_VCORE(r13)
1854 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1855
Paul Mackerrasde56a942011-06-29 00:21:34 +00001856 /* Reset PCR */
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301857 ld r0, VCORE_PCR(r5)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001858 cmpdi r0, 0
1859 beq 18f
1860 li r0, 0
1861 mtspr SPRN_PCR, r0
186218:
1863 /* Signal secondary CPUs to continue */
1864 stb r0,VCORE_IN_GUEST(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000186519: lis r8,0x7fff /* MAX_INT@h */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001866 mtspr SPRN_HDEC,r8
1867
186816: ld r8,KVM_HOST_LPCR(r4)
1869 mtspr SPRN_LPCR,r8
1870 isync
Paul Mackerrasde56a942011-06-29 00:21:34 +00001871
1872 /* load host SLB entries */
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001873BEGIN_MMU_FTR_SECTION
1874 b 0f
1875END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001876 ld r8,PACA_SLBSHADOWPTR(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001877
1878 .rept SLB_NUM_BOLTED
Alexander Graf0865a582014-06-11 10:36:17 +02001879 li r3, SLBSHADOW_SAVEAREA
1880 LDX_BE r5, r8, r3
1881 addi r3, r3, 8
1882 LDX_BE r6, r8, r3
Paul Mackerrasde56a942011-06-29 00:21:34 +00001883 andis. r7,r5,SLB_ESID_V@h
1884 beq 1f
1885 slbmte r6,r5
18861: addi r8,r8,16
1887 .endr
Paul Mackerrasf4c51f82017-01-30 21:21:45 +110018880:
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001889#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1890 /* Finish timing, if we have a vcpu */
1891 ld r4, HSTATE_KVM_VCPU(r13)
1892 cmpdi r4, 0
1893 li r3, 0
1894 beq 2f
1895 bl kvmhv_accumulate_time
18962:
1897#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +00001898 /* Unset guest mode */
1899 li r0, KVM_GUEST_MODE_NONE
1900 stb r0, HSTATE_IN_GUEST(r13)
1901
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10001902 ld r0, SFS+PPC_LR_STKOFF(r1)
1903 addi r1, r1, SFS
Paul Mackerras218309b2013-09-06 13:23:44 +10001904 mtlr r0
1905 blr
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001906
Paul Mackerras697d3892011-12-12 12:36:37 +00001907/*
1908 * Check whether an HDSI is an HPTE not found fault or something else.
1909 * If it is an HPTE not found fault that is due to the guest accessing
1910 * a page that they have mapped but which we have paged out, then
1911 * we continue on with the guest exit path. In all other cases,
1912 * reflect the HDSI to the guest as a DSI.
1913 */
1914kvmppc_hdsi:
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001915 ld r3, VCPU_KVM(r9)
1916 lbz r0, KVM_RADIX(r3)
1917 cmpwi r0, 0
Paul Mackerras697d3892011-12-12 12:36:37 +00001918 mfspr r4, SPRN_HDAR
1919 mfspr r6, SPRN_HDSISR
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001920 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001921 /* HPTE not found fault or protection fault? */
1922 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
Paul Mackerras697d3892011-12-12 12:36:37 +00001923 beq 1f /* if not, send it to the guest */
Paul Mackerras4e5acdc2017-02-28 11:05:47 +11001924 andi. r0, r11, MSR_DR /* data relocation enabled? */
1925 beq 3f
Paul Mackerrasef8c6402017-01-30 21:21:43 +11001926BEGIN_FTR_SECTION
1927 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
1928 b 4f
1929END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras697d3892011-12-12 12:36:37 +00001930 clrrdi r0, r4, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001931 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11001932 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
1933 bne 7f /* if no SLB entry found */
Paul Mackerras697d3892011-12-12 12:36:37 +000019344: std r4, VCPU_FAULT_DAR(r9)
1935 stw r6, VCPU_FAULT_DSISR(r9)
1936
1937 /* Search the hash table. */
1938 mr r3, r9 /* vcpu pointer */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001939 li r7, 1 /* data fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001940 bl kvmppc_hpte_hv_fault
Paul Mackerras697d3892011-12-12 12:36:37 +00001941 ld r9, HSTATE_KVM_VCPU(r13)
1942 ld r10, VCPU_PC(r9)
1943 ld r11, VCPU_MSR(r9)
1944 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1945 cmpdi r3, 0 /* retry the instruction */
1946 beq 6f
1947 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001948 beq guest_exit_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001949 cmpdi r3, -2 /* MMIO emulation; need instr word */
1950 beq 2f
1951
Paul Mackerrascf29b212015-10-27 16:10:20 +11001952 /* Synthesize a DSI (or DSegI) for the guest */
Paul Mackerras697d3892011-12-12 12:36:37 +00001953 ld r4, VCPU_FAULT_DAR(r9)
1954 mr r6, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110019551: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
Paul Mackerras697d3892011-12-12 12:36:37 +00001956 mtspr SPRN_DSISR, r6
Paul Mackerrascf29b212015-10-27 16:10:20 +110019577: mtspr SPRN_DAR, r4
Paul Mackerras697d3892011-12-12 12:36:37 +00001958 mtspr SPRN_SRR0, r10
1959 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11001960 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11001961 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001962fast_interrupt_c_return:
Paul Mackerras697d3892011-12-12 12:36:37 +000019636: ld r7, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001964 ld r8, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001965 mtctr r7
1966 mtxer r8
1967 mr r4, r9
1968 b fast_guest_return
1969
19703: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1971 ld r5, KVM_VRMA_SLB_V(r5)
1972 b 4b
1973
1974 /* If this is for emulated MMIO, load the instruction word */
19752: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1976
1977 /* Set guest mode to 'jump over instruction' so if lwz faults
1978 * we'll just continue at the next IP. */
1979 li r0, KVM_GUEST_MODE_SKIP
1980 stb r0, HSTATE_IN_GUEST(r13)
1981
1982 /* Do the access with MSR:DR enabled */
1983 mfmsr r3
1984 ori r4, r3, MSR_DR /* Enable paging for data */
1985 mtmsrd r4
1986 lwz r8, 0(r10)
1987 mtmsrd r3
1988
1989 /* Store the result */
1990 stw r8, VCPU_LAST_INST(r9)
1991
1992 /* Unset guest mode. */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001993 li r0, KVM_GUEST_MODE_HOST_HV
Paul Mackerras697d3892011-12-12 12:36:37 +00001994 stb r0, HSTATE_IN_GUEST(r13)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001995 b guest_exit_cont
Paul Mackerrasde56a942011-06-29 00:21:34 +00001996
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001997.Lradix_hdsi:
1998 std r4, VCPU_FAULT_DAR(r9)
1999 stw r6, VCPU_FAULT_DSISR(r9)
2000.Lradix_hisi:
2001 mfspr r5, SPRN_ASDR
2002 std r5, VCPU_FAULT_GPA(r9)
2003 b guest_exit_cont
2004
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002005/*
Paul Mackerras342d3db2011-12-12 12:38:05 +00002006 * Similarly for an HISI, reflect it to the guest as an ISI unless
2007 * it is an HPTE not found fault for a page that we have paged out.
2008 */
2009kvmppc_hisi:
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002010 ld r3, VCPU_KVM(r9)
2011 lbz r0, KVM_RADIX(r3)
2012 cmpwi r0, 0
2013 bne .Lradix_hisi /* for radix, just save ASDR */
Paul Mackerras342d3db2011-12-12 12:38:05 +00002014 andis. r0, r11, SRR1_ISI_NOPT@h
2015 beq 1f
Paul Mackerras4e5acdc2017-02-28 11:05:47 +11002016 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
2017 beq 3f
Paul Mackerrasef8c6402017-01-30 21:21:43 +11002018BEGIN_FTR_SECTION
2019 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2020 b 4f
2021END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras342d3db2011-12-12 12:38:05 +00002022 clrrdi r0, r10, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00002023 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11002024 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2025 bne 7f /* if no SLB entry found */
Paul Mackerras342d3db2011-12-12 12:38:05 +000020264:
2027 /* Search the hash table. */
2028 mr r3, r9 /* vcpu pointer */
2029 mr r4, r10
2030 mr r6, r11
2031 li r7, 0 /* instruction fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002032 bl kvmppc_hpte_hv_fault
Paul Mackerras342d3db2011-12-12 12:38:05 +00002033 ld r9, HSTATE_KVM_VCPU(r13)
2034 ld r10, VCPU_PC(r9)
2035 ld r11, VCPU_MSR(r9)
2036 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2037 cmpdi r3, 0 /* retry the instruction */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002038 beq fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00002039 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002040 beq guest_exit_cont
Paul Mackerras342d3db2011-12-12 12:38:05 +00002041
Paul Mackerrascf29b212015-10-27 16:10:20 +11002042 /* Synthesize an ISI (or ISegI) for the guest */
Paul Mackerras342d3db2011-12-12 12:38:05 +00002043 mr r11, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110020441: li r0, BOOK3S_INTERRUPT_INST_STORAGE
20457: mtspr SPRN_SRR0, r10
Paul Mackerras342d3db2011-12-12 12:38:05 +00002046 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11002047 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11002048 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002049 b fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00002050
20513: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2052 ld r5, KVM_VRMA_SLB_V(r6)
2053 b 4b
2054
2055/*
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002056 * Try to handle an hcall in real mode.
2057 * Returns to the guest if we handle it, or continues on up to
2058 * the kernel if we can't (i.e. if we don't have a handler for
2059 * it, or if the handler returns H_TOO_HARD).
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002060 *
2061 * r5 - r8 contain hcall args,
2062 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002063 */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002064hcall_try_real_mode:
Michael Neulingc75df6f2012-06-25 13:33:10 +00002065 ld r3,VCPU_GPR(R3)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002066 andi. r0,r11,MSR_PR
Liu Ping Fan27025a62013-11-19 14:12:48 +08002067 /* sc 1 from userspace - reflect to guest syscall */
2068 bne sc_1_fast_return
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002069 clrrdi r3,r3,2
2070 cmpldi r3,hcall_real_table_end - hcall_real_table
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002071 bge guest_exit_cont
Paul Mackerras699a0ea2014-06-02 11:02:59 +10002072 /* See if this hcall is enabled for in-kernel handling */
2073 ld r4, VCPU_KVM(r9)
2074 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2075 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2076 add r4, r4, r0
2077 ld r0, KVM_ENABLED_HCALLS(r4)
2078 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2079 srd r0, r0, r4
2080 andi. r0, r0, 1
2081 beq guest_exit_cont
2082 /* Get pointer to handler, if any, and call it */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002083 LOAD_REG_ADDR(r4, hcall_real_table)
Paul Mackerras4baa1d82013-07-08 20:09:53 +10002084 lwax r3,r3,r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002085 cmpwi r3,0
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002086 beq guest_exit_cont
Anton Blanchard05a308c2014-06-12 18:16:10 +10002087 add r12,r3,r4
2088 mtctr r12
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002089 mr r3,r9 /* get vcpu pointer */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002090 ld r4,VCPU_GPR(R4)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002091 bctrl
2092 cmpdi r3,H_TOO_HARD
2093 beq hcall_real_fallback
2094 ld r4,HSTATE_KVM_VCPU(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00002095 std r3,VCPU_GPR(R3)(r4)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002096 ld r10,VCPU_PC(r4)
2097 ld r11,VCPU_MSR(r4)
2098 b fast_guest_return
2099
Liu Ping Fan27025a62013-11-19 14:12:48 +08002100sc_1_fast_return:
2101 mtspr SPRN_SRR0,r10
2102 mtspr SPRN_SRR1,r11
2103 li r10, BOOK3S_INTERRUPT_SYSCALL
Michael Neulinge4e38122014-03-25 10:47:02 +11002104 bl kvmppc_msr_interrupt
Liu Ping Fan27025a62013-11-19 14:12:48 +08002105 mr r4,r9
2106 b fast_guest_return
2107
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002108 /* We've attempted a real mode hcall, but it's punted it back
2109 * to userspace. We need to restore some clobbered volatiles
2110 * before resuming the pass-it-to-qemu path */
2111hcall_real_fallback:
2112 li r12,BOOK3S_INTERRUPT_SYSCALL
2113 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002114
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002115 b guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002116
2117 .globl hcall_real_table
2118hcall_real_table:
2119 .long 0 /* 0 - unused */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002120 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2121 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2122 .long DOTSYM(kvmppc_h_read) - hcall_real_table
Paul Mackerrascdeee512015-06-24 21:18:07 +10002123 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2124 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002125 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2126 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11002127 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002128 .long 0 /* 0x24 - H_SET_SPRG0 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002129 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002130 .long 0 /* 0x2c */
2131 .long 0 /* 0x30 */
2132 .long 0 /* 0x34 */
2133 .long 0 /* 0x38 */
2134 .long 0 /* 0x3c */
2135 .long 0 /* 0x40 */
2136 .long 0 /* 0x44 */
2137 .long 0 /* 0x48 */
2138 .long 0 /* 0x4c */
2139 .long 0 /* 0x50 */
2140 .long 0 /* 0x54 */
2141 .long 0 /* 0x58 */
2142 .long 0 /* 0x5c */
2143 .long 0 /* 0x60 */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00002144#ifdef CONFIG_KVM_XICS
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002145 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2146 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2147 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10002148 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002149 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00002150#else
2151 .long 0 /* 0x64 - H_EOI */
2152 .long 0 /* 0x68 - H_CPPR */
2153 .long 0 /* 0x6c - H_IPI */
2154 .long 0 /* 0x70 - H_IPOLL */
2155 .long 0 /* 0x74 - H_XIRR */
2156#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002157 .long 0 /* 0x78 */
2158 .long 0 /* 0x7c */
2159 .long 0 /* 0x80 */
2160 .long 0 /* 0x84 */
2161 .long 0 /* 0x88 */
2162 .long 0 /* 0x8c */
2163 .long 0 /* 0x90 */
2164 .long 0 /* 0x94 */
2165 .long 0 /* 0x98 */
2166 .long 0 /* 0x9c */
2167 .long 0 /* 0xa0 */
2168 .long 0 /* 0xa4 */
2169 .long 0 /* 0xa8 */
2170 .long 0 /* 0xac */
2171 .long 0 /* 0xb0 */
2172 .long 0 /* 0xb4 */
2173 .long 0 /* 0xb8 */
2174 .long 0 /* 0xbc */
2175 .long 0 /* 0xc0 */
2176 .long 0 /* 0xc4 */
2177 .long 0 /* 0xc8 */
2178 .long 0 /* 0xcc */
2179 .long 0 /* 0xd0 */
2180 .long 0 /* 0xd4 */
2181 .long 0 /* 0xd8 */
2182 .long 0 /* 0xdc */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002183 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
Sam Bobroff90fd09f2014-12-03 13:30:40 +11002184 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002185 .long 0 /* 0xe8 */
2186 .long 0 /* 0xec */
2187 .long 0 /* 0xf0 */
2188 .long 0 /* 0xf4 */
2189 .long 0 /* 0xf8 */
2190 .long 0 /* 0xfc */
2191 .long 0 /* 0x100 */
2192 .long 0 /* 0x104 */
2193 .long 0 /* 0x108 */
2194 .long 0 /* 0x10c */
2195 .long 0 /* 0x110 */
2196 .long 0 /* 0x114 */
2197 .long 0 /* 0x118 */
2198 .long 0 /* 0x11c */
2199 .long 0 /* 0x120 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002200 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
Paul Mackerras8563bf52014-01-08 21:25:29 +11002201 .long 0 /* 0x128 */
2202 .long 0 /* 0x12c */
2203 .long 0 /* 0x130 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002204 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11002205 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +11002206 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
Michael Ellermane928e9c2015-03-20 20:39:41 +11002207 .long 0 /* 0x140 */
2208 .long 0 /* 0x144 */
2209 .long 0 /* 0x148 */
2210 .long 0 /* 0x14c */
2211 .long 0 /* 0x150 */
2212 .long 0 /* 0x154 */
2213 .long 0 /* 0x158 */
2214 .long 0 /* 0x15c */
2215 .long 0 /* 0x160 */
2216 .long 0 /* 0x164 */
2217 .long 0 /* 0x168 */
2218 .long 0 /* 0x16c */
2219 .long 0 /* 0x170 */
2220 .long 0 /* 0x174 */
2221 .long 0 /* 0x178 */
2222 .long 0 /* 0x17c */
2223 .long 0 /* 0x180 */
2224 .long 0 /* 0x184 */
2225 .long 0 /* 0x188 */
2226 .long 0 /* 0x18c */
2227 .long 0 /* 0x190 */
2228 .long 0 /* 0x194 */
2229 .long 0 /* 0x198 */
2230 .long 0 /* 0x19c */
2231 .long 0 /* 0x1a0 */
2232 .long 0 /* 0x1a4 */
2233 .long 0 /* 0x1a8 */
2234 .long 0 /* 0x1ac */
2235 .long 0 /* 0x1b0 */
2236 .long 0 /* 0x1b4 */
2237 .long 0 /* 0x1b8 */
2238 .long 0 /* 0x1bc */
2239 .long 0 /* 0x1c0 */
2240 .long 0 /* 0x1c4 */
2241 .long 0 /* 0x1c8 */
2242 .long 0 /* 0x1cc */
2243 .long 0 /* 0x1d0 */
2244 .long 0 /* 0x1d4 */
2245 .long 0 /* 0x1d8 */
2246 .long 0 /* 0x1dc */
2247 .long 0 /* 0x1e0 */
2248 .long 0 /* 0x1e4 */
2249 .long 0 /* 0x1e8 */
2250 .long 0 /* 0x1ec */
2251 .long 0 /* 0x1f0 */
2252 .long 0 /* 0x1f4 */
2253 .long 0 /* 0x1f8 */
2254 .long 0 /* 0x1fc */
2255 .long 0 /* 0x200 */
2256 .long 0 /* 0x204 */
2257 .long 0 /* 0x208 */
2258 .long 0 /* 0x20c */
2259 .long 0 /* 0x210 */
2260 .long 0 /* 0x214 */
2261 .long 0 /* 0x218 */
2262 .long 0 /* 0x21c */
2263 .long 0 /* 0x220 */
2264 .long 0 /* 0x224 */
2265 .long 0 /* 0x228 */
2266 .long 0 /* 0x22c */
2267 .long 0 /* 0x230 */
2268 .long 0 /* 0x234 */
2269 .long 0 /* 0x238 */
2270 .long 0 /* 0x23c */
2271 .long 0 /* 0x240 */
2272 .long 0 /* 0x244 */
2273 .long 0 /* 0x248 */
2274 .long 0 /* 0x24c */
2275 .long 0 /* 0x250 */
2276 .long 0 /* 0x254 */
2277 .long 0 /* 0x258 */
2278 .long 0 /* 0x25c */
2279 .long 0 /* 0x260 */
2280 .long 0 /* 0x264 */
2281 .long 0 /* 0x268 */
2282 .long 0 /* 0x26c */
2283 .long 0 /* 0x270 */
2284 .long 0 /* 0x274 */
2285 .long 0 /* 0x278 */
2286 .long 0 /* 0x27c */
2287 .long 0 /* 0x280 */
2288 .long 0 /* 0x284 */
2289 .long 0 /* 0x288 */
2290 .long 0 /* 0x28c */
2291 .long 0 /* 0x290 */
2292 .long 0 /* 0x294 */
2293 .long 0 /* 0x298 */
2294 .long 0 /* 0x29c */
2295 .long 0 /* 0x2a0 */
2296 .long 0 /* 0x2a4 */
2297 .long 0 /* 0x2a8 */
2298 .long 0 /* 0x2ac */
2299 .long 0 /* 0x2b0 */
2300 .long 0 /* 0x2b4 */
2301 .long 0 /* 0x2b8 */
2302 .long 0 /* 0x2bc */
2303 .long 0 /* 0x2c0 */
2304 .long 0 /* 0x2c4 */
2305 .long 0 /* 0x2c8 */
2306 .long 0 /* 0x2cc */
2307 .long 0 /* 0x2d0 */
2308 .long 0 /* 0x2d4 */
2309 .long 0 /* 0x2d8 */
2310 .long 0 /* 0x2dc */
2311 .long 0 /* 0x2e0 */
2312 .long 0 /* 0x2e4 */
2313 .long 0 /* 0x2e8 */
2314 .long 0 /* 0x2ec */
2315 .long 0 /* 0x2f0 */
2316 .long 0 /* 0x2f4 */
2317 .long 0 /* 0x2f8 */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10002318#ifdef CONFIG_KVM_XICS
2319 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2320#else
2321 .long 0 /* 0x2fc - H_XIRR_X*/
2322#endif
Michael Ellermane928e9c2015-03-20 20:39:41 +11002323 .long DOTSYM(kvmppc_h_random) - hcall_real_table
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002324 .globl hcall_real_table_end
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002325hcall_real_table_end:
2326
Paul Mackerras8563bf52014-01-08 21:25:29 +11002327_GLOBAL(kvmppc_h_set_xdabr)
2328 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2329 beq 6f
2330 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2331 andc. r0, r5, r0
2332 beq 3f
23336: li r3, H_PARAMETER
2334 blr
2335
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002336_GLOBAL(kvmppc_h_set_dabr)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002337 li r5, DABRX_USER | DABRX_KERNEL
23383:
Michael Neulingeee7ff92014-01-08 21:25:19 +11002339BEGIN_FTR_SECTION
2340 b 2f
2341END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002342 std r4,VCPU_DABR(r3)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002343 stw r5, VCPU_DABRX(r3)
2344 mtspr SPRN_DABRX, r5
Paul Mackerras89436332012-03-02 01:38:23 +00002345 /* Work around P7 bug where DABR can get corrupted on mtspr */
23461: mtspr SPRN_DABR,r4
2347 mfspr r5, SPRN_DABR
2348 cmpd r4, r5
2349 bne 1b
2350 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002351 li r3,0
2352 blr
2353
Paul Mackerras8563bf52014-01-08 21:25:29 +11002354 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
23552: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
Thomas Huth760a7362015-11-20 09:11:45 +01002356 rlwimi r5, r4, 2, DAWRX_WT
Paul Mackerras8563bf52014-01-08 21:25:29 +11002357 clrrdi r4, r4, 3
2358 std r4, VCPU_DAWR(r3)
2359 std r5, VCPU_DAWRX(r3)
2360 mtspr SPRN_DAWR, r4
2361 mtspr SPRN_DAWRX, r5
2362 li r3, 0
Paul Mackerrasde56a942011-06-29 00:21:34 +00002363 blr
2364
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002365_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002366 ori r11,r11,MSR_EE
2367 std r11,VCPU_MSR(r3)
2368 li r0,1
2369 stb r0,VCPU_CEDED(r3)
2370 sync /* order setting ceded vs. testing prodded */
2371 lbz r5,VCPU_PRODDED(r3)
2372 cmpwi r5,0
Paul Mackerras04f995a2012-08-06 00:03:28 +00002373 bne kvm_cede_prodded
Paul Mackerras6af27c82015-03-28 14:21:10 +11002374 li r12,0 /* set trap to 0 to say hcall is handled */
2375 stw r12,VCPU_TRAP(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002376 li r0,H_SUCCESS
Michael Neulingc75df6f2012-06-25 13:33:10 +00002377 std r0,VCPU_GPR(R3)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002378
2379 /*
2380 * Set our bit in the bitmask of napping threads unless all the
2381 * other threads are already napping, in which case we send this
2382 * up to the host.
2383 */
2384 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002385 lbz r6,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002386 lwz r8,VCORE_ENTRY_EXIT(r5)
2387 clrldi r8,r8,56
2388 li r0,1
2389 sld r0,r0,r6
2390 addi r6,r5,VCORE_NAPPING_THREADS
239131: lwarx r4,0,r6
2392 or r4,r4,r0
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002393 cmpw r4,r8
2394 beq kvm_cede_exit
Paul Mackerras19ccb762011-07-23 17:42:46 +10002395 stwcx. r4,0,r6
2396 bne 31b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002397 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11002398 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002399 li r0,NAPPING_CEDE
Paul Mackerras19ccb762011-07-23 17:42:46 +10002400 stb r0,HSTATE_NAPPING(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002401 lwz r7,VCORE_ENTRY_EXIT(r5)
2402 cmpwi r7,0x100
2403 bge 33f /* another thread already exiting */
2404
2405/*
2406 * Although not specifically required by the architecture, POWER7
2407 * preserves the following registers in nap mode, even if an SMT mode
2408 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2409 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2410 */
2411 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002412 std r14, VCPU_GPR(R14)(r3)
2413 std r15, VCPU_GPR(R15)(r3)
2414 std r16, VCPU_GPR(R16)(r3)
2415 std r17, VCPU_GPR(R17)(r3)
2416 std r18, VCPU_GPR(R18)(r3)
2417 std r19, VCPU_GPR(R19)(r3)
2418 std r20, VCPU_GPR(R20)(r3)
2419 std r21, VCPU_GPR(R21)(r3)
2420 std r22, VCPU_GPR(R22)(r3)
2421 std r23, VCPU_GPR(R23)(r3)
2422 std r24, VCPU_GPR(R24)(r3)
2423 std r25, VCPU_GPR(R25)(r3)
2424 std r26, VCPU_GPR(R26)(r3)
2425 std r27, VCPU_GPR(R27)(r3)
2426 std r28, VCPU_GPR(R28)(r3)
2427 std r29, VCPU_GPR(R29)(r3)
2428 std r30, VCPU_GPR(R30)(r3)
2429 std r31, VCPU_GPR(R31)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002430
2431 /* save FP state */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002432 bl kvmppc_save_fp
Paul Mackerras19ccb762011-07-23 17:42:46 +10002433
Paul Mackerras93d17392016-06-22 15:52:55 +10002434#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2435BEGIN_FTR_SECTION
2436 ld r9, HSTATE_KVM_VCPU(r13)
2437 bl kvmppc_save_tm
2438END_FTR_SECTION_IFSET(CPU_FTR_TM)
2439#endif
2440
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002441 /*
2442 * Set DEC to the smaller of DEC and HDEC, so that we wake
2443 * no later than the end of our timeslice (HDEC interrupts
2444 * don't wake us from nap).
2445 */
2446 mfspr r3, SPRN_DEC
2447 mfspr r4, SPRN_HDEC
2448 mftb r5
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10002449BEGIN_FTR_SECTION
2450 /* On P9 check whether the guest has large decrementer mode enabled */
2451 ld r6, HSTATE_KVM_VCORE(r13)
2452 ld r6, VCORE_LPCR(r6)
2453 andis. r6, r6, LPCR_LD@h
2454 bne 68f
2455END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras2f272462017-05-22 16:25:14 +10002456 extsw r3, r3
Paul Mackerras1bc3fe82017-05-22 16:55:16 +1000245768: EXTEND_HDEC(r4)
Paul Mackerras2f272462017-05-22 16:25:14 +10002458 cmpd r3, r4
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002459 ble 67f
2460 mtspr SPRN_DEC, r4
246167:
2462 /* save expiry time of guest decrementer */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002463 add r3, r3, r5
2464 ld r4, HSTATE_KVM_VCPU(r13)
2465 ld r5, HSTATE_KVM_VCORE(r13)
2466 ld r6, VCORE_TB_OFFSET(r5)
2467 subf r3, r6, r3 /* convert to host TB value */
2468 std r3, VCPU_DEC_EXPIRES(r4)
2469
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002470#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2471 ld r4, HSTATE_KVM_VCPU(r13)
2472 addi r3, r4, VCPU_TB_CEDE
2473 bl kvmhv_accumulate_time
2474#endif
2475
Paul Mackerrasccc07772015-03-28 14:21:07 +11002476 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2477
Paul Mackerras19ccb762011-07-23 17:42:46 +10002478 /*
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002479 * Take a nap until a decrementer or external or doobell interrupt
Paul Mackerrasccc07772015-03-28 14:21:07 +11002480 * occurs, with PECE1 and PECE0 set in LPCR.
Paul Mackerras66feed62015-03-28 14:21:12 +11002481 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
Paul Mackerrasccc07772015-03-28 14:21:07 +11002482 * Also clear the runlatch bit before napping.
Paul Mackerras19ccb762011-07-23 17:42:46 +10002483 */
Paul Mackerras56548fc2014-12-03 14:48:40 +11002484kvm_do_nap:
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002485 mfspr r0, SPRN_CTRLF
2486 clrrdi r0, r0, 1
2487 mtspr SPRN_CTRLT, r0
Preeti U Murthy582b9102014-04-11 16:02:08 +05302488
Paul Mackerrasf0888f72012-02-03 00:54:17 +00002489 li r0,1
2490 stb r0,HSTATE_HWTHREAD_REQ(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002491 mfspr r5,SPRN_LPCR
2492 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002493BEGIN_FTR_SECTION
Paul Mackerras66feed62015-03-28 14:21:12 +11002494 ori r5, r5, LPCR_PECEDH
Paul Mackerrasccc07772015-03-28 14:21:07 +11002495 rlwimi r5, r3, 0, LPCR_PECEDP
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002496END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002497
2498kvm_nap_sequence: /* desired LPCR value in r5 */
2499BEGIN_FTR_SECTION
2500 /*
2501 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2502 * enable state loss = 1 (allow SMT mode switch)
2503 * requested level = 0 (just stop dispatching)
2504 */
2505 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2506 mtspr SPRN_PSSCR, r3
2507 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2508 li r4, LPCR_PECE_HVEE@higher
2509 sldi r4, r4, 32
2510 or r5, r5, r4
2511END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002512 mtspr SPRN_LPCR,r5
2513 isync
2514 li r0, 0
2515 std r0, HSTATE_SCRATCH0(r13)
2516 ptesync
2517 ld r0, HSTATE_SCRATCH0(r13)
25181: cmpd r0, r0
2519 bne 1b
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002520BEGIN_FTR_SECTION
Paul Mackerras19ccb762011-07-23 17:42:46 +10002521 nap
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002522FTR_SECTION_ELSE
2523 PPC_STOP
2524ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002525 b .
2526
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100252733: mr r4, r3
2528 li r3, 0
2529 li r12, 0
2530 b 34f
2531
Paul Mackerras19ccb762011-07-23 17:42:46 +10002532kvm_end_cede:
Paul Mackerras4619ac82013-04-17 20:31:41 +00002533 /* get vcpu pointer */
2534 ld r4, HSTATE_KVM_VCPU(r13)
2535
Paul Mackerras19ccb762011-07-23 17:42:46 +10002536 /* Woken by external or decrementer interrupt */
2537 ld r1, HSTATE_HOST_R1(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002538
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002539#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2540 addi r3, r4, VCPU_TB_RMINTR
2541 bl kvmhv_accumulate_time
2542#endif
2543
Paul Mackerras93d17392016-06-22 15:52:55 +10002544#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2545BEGIN_FTR_SECTION
2546 bl kvmppc_restore_tm
2547END_FTR_SECTION_IFSET(CPU_FTR_TM)
2548#endif
2549
Paul Mackerras19ccb762011-07-23 17:42:46 +10002550 /* load up FP state */
2551 bl kvmppc_load_fp
2552
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002553 /* Restore guest decrementer */
2554 ld r3, VCPU_DEC_EXPIRES(r4)
2555 ld r5, HSTATE_KVM_VCORE(r13)
2556 ld r6, VCORE_TB_OFFSET(r5)
2557 add r3, r3, r6 /* convert host TB to guest TB value */
2558 mftb r7
2559 subf r3, r7, r3
2560 mtspr SPRN_DEC, r3
2561
Paul Mackerras19ccb762011-07-23 17:42:46 +10002562 /* Load NV GPRS */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002563 ld r14, VCPU_GPR(R14)(r4)
2564 ld r15, VCPU_GPR(R15)(r4)
2565 ld r16, VCPU_GPR(R16)(r4)
2566 ld r17, VCPU_GPR(R17)(r4)
2567 ld r18, VCPU_GPR(R18)(r4)
2568 ld r19, VCPU_GPR(R19)(r4)
2569 ld r20, VCPU_GPR(R20)(r4)
2570 ld r21, VCPU_GPR(R21)(r4)
2571 ld r22, VCPU_GPR(R22)(r4)
2572 ld r23, VCPU_GPR(R23)(r4)
2573 ld r24, VCPU_GPR(R24)(r4)
2574 ld r25, VCPU_GPR(R25)(r4)
2575 ld r26, VCPU_GPR(R26)(r4)
2576 ld r27, VCPU_GPR(R27)(r4)
2577 ld r28, VCPU_GPR(R28)(r4)
2578 ld r29, VCPU_GPR(R29)(r4)
2579 ld r30, VCPU_GPR(R30)(r4)
2580 ld r31, VCPU_GPR(R31)(r4)
Suresh Warrier37f55d32016-08-19 15:35:46 +10002581
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002582 /* Check the wake reason in SRR1 to see why we got here */
2583 bl kvmppc_check_wake_reason
Paul Mackerras19ccb762011-07-23 17:42:46 +10002584
Suresh Warrier37f55d32016-08-19 15:35:46 +10002585 /*
2586 * Restore volatile registers since we could have called a
2587 * C routine in kvmppc_check_wake_reason
2588 * r4 = VCPU
2589 * r3 tells us whether we need to return to host or not
2590 * WARNING: it gets checked further down:
2591 * should not modify r3 until this check is done.
2592 */
2593 ld r4, HSTATE_KVM_VCPU(r13)
2594
Paul Mackerras19ccb762011-07-23 17:42:46 +10002595 /* clear our bit in vcore->napping_threads */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100259634: ld r5,HSTATE_KVM_VCORE(r13)
2597 lbz r7,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002598 li r0,1
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002599 sld r0,r0,r7
Paul Mackerras19ccb762011-07-23 17:42:46 +10002600 addi r6,r5,VCORE_NAPPING_THREADS
260132: lwarx r7,0,r6
2602 andc r7,r7,r0
2603 stwcx. r7,0,r6
2604 bne 32b
2605 li r0,0
2606 stb r0,HSTATE_NAPPING(r13)
2607
Suresh Warrier37f55d32016-08-19 15:35:46 +10002608 /* See if the wake reason saved in r3 means we need to exit */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002609 stw r12, VCPU_TRAP(r4)
Paul Mackerras4619ac82013-04-17 20:31:41 +00002610 mr r9, r4
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002611 cmpdi r3, 0
2612 bgt guest_exit_cont
Paul Mackerras4619ac82013-04-17 20:31:41 +00002613
Paul Mackerras19ccb762011-07-23 17:42:46 +10002614 /* see if any other thread is already exiting */
2615 lwz r0,VCORE_ENTRY_EXIT(r5)
2616 cmpwi r0,0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002617 bge guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002618
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002619 b kvmppc_cede_reentry /* if not go back to guest */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002620
2621 /* cede when already previously prodded case */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002622kvm_cede_prodded:
2623 li r0,0
Paul Mackerras19ccb762011-07-23 17:42:46 +10002624 stb r0,VCPU_PRODDED(r3)
2625 sync /* order testing prodded vs. clearing ceded */
2626 stb r0,VCPU_CEDED(r3)
2627 li r3,H_SUCCESS
2628 blr
2629
2630 /* we've ceded but we want to give control to the host */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002631kvm_cede_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +11002632 ld r9, HSTATE_KVM_VCPU(r13)
2633 b guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002634
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002635 /* Try to handle a machine check in real mode */
2636machine_check_realmode:
2637 mr r3, r9 /* get vcpu pointer */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002638 bl kvmppc_realmode_machine_check
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002639 nop
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002640 ld r9, HSTATE_KVM_VCPU(r13)
2641 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302642 /*
2643 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2644 * machine check interrupt (set HSRR0 to 0x200). And for handled
2645 * errors (no-fatal), just go back to guest execution with current
2646 * HSRR0 instead of exiting guest. This new approach will inject
2647 * machine check to guest for fatal error causing guest to crash.
2648 *
2649 * The old code used to return to host for unhandled errors which
2650 * was causing guest to hang with soft lockups inside guest and
2651 * makes it difficult to recover guest instance.
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302652 *
2653 * if we receive machine check with MSR(RI=0) then deliver it to
2654 * guest as machine check causing guest to crash.
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302655 */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302656 ld r11, VCPU_MSR(r9)
Paul Mackerras1c9e3d52015-11-12 16:43:48 +11002657 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2658 bne mc_cont /* if so, exit to host */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302659 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2660 beq 1f /* Deliver a machine check to guest */
2661 ld r10, VCPU_PC(r9)
2662 cmpdi r3, 0 /* Did we handle MCE ? */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302663 bne 2f /* Continue guest execution. */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002664 /* If not, deliver a machine check. SRR0/1 are already set */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +053026651: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
Michael Neulinge4e38122014-03-25 10:47:02 +11002666 bl kvmppc_msr_interrupt
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +053026672: b fast_interrupt_c_return
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002668
Paul Mackerrasde56a942011-06-29 00:21:34 +00002669/*
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002670 * Check the reason we woke from nap, and take appropriate action.
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002671 * Returns (in r3):
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002672 * 0 if nothing needs to be done
2673 * 1 if something happened that needs to be handled by the host
Paul Mackerras66feed62015-03-28 14:21:12 +11002674 * -1 if there was a guest wakeup (IPI or msgsnd)
Suresh Warriere3c13e52016-08-19 15:35:51 +10002675 * -2 if we handled a PCI passthrough interrupt (returned by
2676 * kvmppc_read_intr only)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002677 *
2678 * Also sets r12 to the interrupt vector for any interrupt that needs
2679 * to be handled now by the host (0x500 for external interrupt), or zero.
Suresh Warrier37f55d32016-08-19 15:35:46 +10002680 * Modifies all volatile registers (since it may call a C function).
2681 * This routine calls kvmppc_read_intr, a C function, if an external
2682 * interrupt is pending.
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002683 */
2684kvmppc_check_wake_reason:
2685 mfspr r6, SPRN_SRR1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002686BEGIN_FTR_SECTION
2687 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2688FTR_SECTION_ELSE
2689 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2690ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2691 cmpwi r6, 8 /* was it an external interrupt? */
Suresh Warrier37f55d32016-08-19 15:35:46 +10002692 beq 7f /* if so, see what it was */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002693 li r3, 0
2694 li r12, 0
2695 cmpwi r6, 6 /* was it the decrementer? */
2696 beq 0f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002697BEGIN_FTR_SECTION
2698 cmpwi r6, 5 /* privileged doorbell? */
2699 beq 0f
Paul Mackerras5d00f662014-01-08 21:25:28 +11002700 cmpwi r6, 3 /* hypervisor doorbell? */
2701 beq 3f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002702END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302703 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2704 beq 4f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002705 li r3, 1 /* anything else, return 1 */
27060: blr
2707
Paul Mackerras5d00f662014-01-08 21:25:28 +11002708 /* hypervisor doorbell */
27093: li r12, BOOK3S_INTERRUPT_H_DOORBELL
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05302710
2711 /*
2712 * Clear the doorbell as we will invoke the handler
2713 * explicitly in the guest exit path.
2714 */
2715 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2716 PPC_MSGCLR(6)
Paul Mackerras66feed62015-03-28 14:21:12 +11002717 /* see if it's a host IPI */
Paul Mackerras5d00f662014-01-08 21:25:28 +11002718 li r3, 1
Paul Mackerras66feed62015-03-28 14:21:12 +11002719 lbz r0, HSTATE_HOST_IPI(r13)
2720 cmpwi r0, 0
2721 bnelr
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05302722 /* if not, return -1 */
Paul Mackerras66feed62015-03-28 14:21:12 +11002723 li r3, -1
Paul Mackerras5d00f662014-01-08 21:25:28 +11002724 blr
2725
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302726 /* Woken up due to Hypervisor maintenance interrupt */
27274: li r12, BOOK3S_INTERRUPT_HMI
2728 li r3, 1
2729 blr
2730
Suresh Warrier37f55d32016-08-19 15:35:46 +10002731 /* external interrupt - create a stack frame so we can call C */
27327: mflr r0
2733 std r0, PPC_LR_STKOFF(r1)
2734 stdu r1, -PPC_MIN_STKFRM(r1)
2735 bl kvmppc_read_intr
2736 nop
2737 li r12, BOOK3S_INTERRUPT_EXTERNAL
Suresh Warrierf7af5202016-08-19 15:35:52 +10002738 cmpdi r3, 1
2739 ble 1f
2740
2741 /*
2742 * Return code of 2 means PCI passthrough interrupt, but
2743 * we need to return back to host to complete handling the
2744 * interrupt. Trap reason is expected in r12 by guest
2745 * exit code.
2746 */
2747 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
27481:
Suresh Warrier37f55d32016-08-19 15:35:46 +10002749 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
2750 addi r1, r1, PPC_MIN_STKFRM
2751 mtlr r0
2752 blr
Paul Mackerrasde56a942011-06-29 00:21:34 +00002753
2754/*
2755 * Save away FP, VMX and VSX registers.
2756 * r3 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002757 * N.B. r30 and r31 are volatile across this function,
2758 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002759 */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002760kvmppc_save_fp:
2761 mflr r30
2762 mr r31,r3
Paul Mackerras89436332012-03-02 01:38:23 +00002763 mfmsr r5
2764 ori r8,r5,MSR_FP
Paul Mackerrasde56a942011-06-29 00:21:34 +00002765#ifdef CONFIG_ALTIVEC
2766BEGIN_FTR_SECTION
2767 oris r8,r8,MSR_VEC@h
2768END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2769#endif
2770#ifdef CONFIG_VSX
2771BEGIN_FTR_SECTION
2772 oris r8,r8,MSR_VSX@h
2773END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2774#endif
2775 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002776 addi r3,r3,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002777 bl store_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002778#ifdef CONFIG_ALTIVEC
2779BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002780 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002781 bl store_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002782END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2783#endif
2784 mfspr r6,SPRN_VRSAVE
Paul Mackerrase724f082014-03-13 20:02:48 +11002785 stw r6,VCPU_VRSAVE(r31)
Paul Mackerras595e4f72013-10-15 20:43:04 +11002786 mtlr r30
Paul Mackerrasde56a942011-06-29 00:21:34 +00002787 blr
2788
2789/*
2790 * Load up FP, VMX and VSX registers
2791 * r4 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002792 * N.B. r30 and r31 are volatile across this function,
2793 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002794 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002795kvmppc_load_fp:
Paul Mackerras595e4f72013-10-15 20:43:04 +11002796 mflr r30
2797 mr r31,r4
Paul Mackerrasde56a942011-06-29 00:21:34 +00002798 mfmsr r9
2799 ori r8,r9,MSR_FP
2800#ifdef CONFIG_ALTIVEC
2801BEGIN_FTR_SECTION
2802 oris r8,r8,MSR_VEC@h
2803END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2804#endif
2805#ifdef CONFIG_VSX
2806BEGIN_FTR_SECTION
2807 oris r8,r8,MSR_VSX@h
2808END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2809#endif
2810 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002811 addi r3,r4,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002812 bl load_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002813#ifdef CONFIG_ALTIVEC
2814BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002815 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002816 bl load_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002817END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2818#endif
Paul Mackerrase724f082014-03-13 20:02:48 +11002819 lwz r7,VCPU_VRSAVE(r31)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002820 mtspr SPRN_VRSAVE,r7
Paul Mackerras595e4f72013-10-15 20:43:04 +11002821 mtlr r30
2822 mr r4,r31
Paul Mackerrasde56a942011-06-29 00:21:34 +00002823 blr
Paul Mackerras44a3add2013-10-04 21:45:04 +10002824
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002825#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2826/*
2827 * Save transactional state and TM-related registers.
2828 * Called with r9 pointing to the vcpu struct.
2829 * This can modify all checkpointed registers, but
2830 * restores r1, r2 and r9 (vcpu pointer) before exit.
2831 */
2832kvmppc_save_tm:
2833 mflr r0
2834 std r0, PPC_LR_STKOFF(r1)
2835
2836 /* Turn on TM. */
2837 mfmsr r8
2838 li r0, 1
2839 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
2840 mtmsrd r8
2841
2842 ld r5, VCPU_MSR(r9)
2843 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2844 beq 1f /* TM not active in guest. */
2845
2846 std r1, HSTATE_HOST_R1(r13)
2847 li r3, TM_CAUSE_KVM_RESCHED
2848
2849 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2850 li r5, 0
2851 mtmsrd r5, 1
2852
2853 /* All GPRs are volatile at this point. */
2854 TRECLAIM(R3)
2855
2856 /* Temporarily store r13 and r9 so we have some regs to play with */
2857 SET_SCRATCH0(r13)
2858 GET_PACA(r13)
2859 std r9, PACATMSCRATCH(r13)
2860 ld r9, HSTATE_KVM_VCPU(r13)
2861
2862 /* Get a few more GPRs free. */
2863 std r29, VCPU_GPRS_TM(29)(r9)
2864 std r30, VCPU_GPRS_TM(30)(r9)
2865 std r31, VCPU_GPRS_TM(31)(r9)
2866
2867 /* Save away PPR and DSCR soon so don't run with user values. */
2868 mfspr r31, SPRN_PPR
2869 HMT_MEDIUM
2870 mfspr r30, SPRN_DSCR
2871 ld r29, HSTATE_DSCR(r13)
2872 mtspr SPRN_DSCR, r29
2873
2874 /* Save all but r9, r13 & r29-r31 */
2875 reg = 0
2876 .rept 29
2877 .if (reg != 9) && (reg != 13)
2878 std reg, VCPU_GPRS_TM(reg)(r9)
2879 .endif
2880 reg = reg + 1
2881 .endr
2882 /* ... now save r13 */
2883 GET_SCRATCH0(r4)
2884 std r4, VCPU_GPRS_TM(13)(r9)
2885 /* ... and save r9 */
2886 ld r4, PACATMSCRATCH(r13)
2887 std r4, VCPU_GPRS_TM(9)(r9)
2888
2889 /* Reload stack pointer and TOC. */
2890 ld r1, HSTATE_HOST_R1(r13)
2891 ld r2, PACATOC(r13)
2892
2893 /* Set MSR RI now we have r1 and r13 back. */
2894 li r5, MSR_RI
2895 mtmsrd r5, 1
2896
2897 /* Save away checkpinted SPRs. */
2898 std r31, VCPU_PPR_TM(r9)
2899 std r30, VCPU_DSCR_TM(r9)
2900 mflr r5
2901 mfcr r6
2902 mfctr r7
2903 mfspr r8, SPRN_AMR
2904 mfspr r10, SPRN_TAR
Paul Mackerras0d808df2016-11-07 15:09:58 +11002905 mfxer r11
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002906 std r5, VCPU_LR_TM(r9)
2907 stw r6, VCPU_CR_TM(r9)
2908 std r7, VCPU_CTR_TM(r9)
2909 std r8, VCPU_AMR_TM(r9)
2910 std r10, VCPU_TAR_TM(r9)
Paul Mackerras0d808df2016-11-07 15:09:58 +11002911 std r11, VCPU_XER_TM(r9)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002912
2913 /* Restore r12 as trap number. */
2914 lwz r12, VCPU_TRAP(r9)
2915
2916 /* Save FP/VSX. */
2917 addi r3, r9, VCPU_FPRS_TM
2918 bl store_fp_state
2919 addi r3, r9, VCPU_VRS_TM
2920 bl store_vr_state
2921 mfspr r6, SPRN_VRSAVE
2922 stw r6, VCPU_VRSAVE_TM(r9)
29231:
2924 /*
2925 * We need to save these SPRs after the treclaim so that the software
2926 * error code is recorded correctly in the TEXASR. Also the user may
2927 * change these outside of a transaction, so they must always be
2928 * context switched.
2929 */
2930 mfspr r5, SPRN_TFHAR
2931 mfspr r6, SPRN_TFIAR
2932 mfspr r7, SPRN_TEXASR
2933 std r5, VCPU_TFHAR(r9)
2934 std r6, VCPU_TFIAR(r9)
2935 std r7, VCPU_TEXASR(r9)
2936
2937 ld r0, PPC_LR_STKOFF(r1)
2938 mtlr r0
2939 blr
2940
2941/*
2942 * Restore transactional state and TM-related registers.
2943 * Called with r4 pointing to the vcpu struct.
2944 * This potentially modifies all checkpointed registers.
2945 * It restores r1, r2, r4 from the PACA.
2946 */
2947kvmppc_restore_tm:
2948 mflr r0
2949 std r0, PPC_LR_STKOFF(r1)
2950
2951 /* Turn on TM/FP/VSX/VMX so we can restore them. */
2952 mfmsr r5
2953 li r6, MSR_TM >> 32
2954 sldi r6, r6, 32
2955 or r5, r5, r6
2956 ori r5, r5, MSR_FP
2957 oris r5, r5, (MSR_VEC | MSR_VSX)@h
2958 mtmsrd r5
2959
2960 /*
2961 * The user may change these outside of a transaction, so they must
2962 * always be context switched.
2963 */
2964 ld r5, VCPU_TFHAR(r4)
2965 ld r6, VCPU_TFIAR(r4)
2966 ld r7, VCPU_TEXASR(r4)
2967 mtspr SPRN_TFHAR, r5
2968 mtspr SPRN_TFIAR, r6
2969 mtspr SPRN_TEXASR, r7
2970
2971 ld r5, VCPU_MSR(r4)
2972 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2973 beqlr /* TM not active in guest */
2974 std r1, HSTATE_HOST_R1(r13)
2975
2976 /* Make sure the failure summary is set, otherwise we'll program check
2977 * when we trechkpt. It's possible that this might have been not set
2978 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
2979 * host.
2980 */
2981 oris r7, r7, (TEXASR_FS)@h
2982 mtspr SPRN_TEXASR, r7
2983
2984 /*
2985 * We need to load up the checkpointed state for the guest.
2986 * We need to do this early as it will blow away any GPRs, VSRs and
2987 * some SPRs.
2988 */
2989
2990 mr r31, r4
2991 addi r3, r31, VCPU_FPRS_TM
2992 bl load_fp_state
2993 addi r3, r31, VCPU_VRS_TM
2994 bl load_vr_state
2995 mr r4, r31
2996 lwz r7, VCPU_VRSAVE_TM(r4)
2997 mtspr SPRN_VRSAVE, r7
2998
2999 ld r5, VCPU_LR_TM(r4)
3000 lwz r6, VCPU_CR_TM(r4)
3001 ld r7, VCPU_CTR_TM(r4)
3002 ld r8, VCPU_AMR_TM(r4)
3003 ld r9, VCPU_TAR_TM(r4)
Paul Mackerras0d808df2016-11-07 15:09:58 +11003004 ld r10, VCPU_XER_TM(r4)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003005 mtlr r5
3006 mtcr r6
3007 mtctr r7
3008 mtspr SPRN_AMR, r8
3009 mtspr SPRN_TAR, r9
Paul Mackerras0d808df2016-11-07 15:09:58 +11003010 mtxer r10
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003011
3012 /*
3013 * Load up PPR and DSCR values but don't put them in the actual SPRs
3014 * till the last moment to avoid running with userspace PPR and DSCR for
3015 * too long.
3016 */
3017 ld r29, VCPU_DSCR_TM(r4)
3018 ld r30, VCPU_PPR_TM(r4)
3019
3020 std r2, PACATMSCRATCH(r13) /* Save TOC */
3021
3022 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
3023 li r5, 0
3024 mtmsrd r5, 1
3025
3026 /* Load GPRs r0-r28 */
3027 reg = 0
3028 .rept 29
3029 ld reg, VCPU_GPRS_TM(reg)(r31)
3030 reg = reg + 1
3031 .endr
3032
3033 mtspr SPRN_DSCR, r29
3034 mtspr SPRN_PPR, r30
3035
3036 /* Load final GPRs */
3037 ld 29, VCPU_GPRS_TM(29)(r31)
3038 ld 30, VCPU_GPRS_TM(30)(r31)
3039 ld 31, VCPU_GPRS_TM(31)(r31)
3040
3041 /* TM checkpointed state is now setup. All GPRs are now volatile. */
3042 TRECHKPT
3043
3044 /* Now let's get back the state we need. */
3045 HMT_MEDIUM
3046 GET_PACA(r13)
3047 ld r29, HSTATE_DSCR(r13)
3048 mtspr SPRN_DSCR, r29
3049 ld r4, HSTATE_KVM_VCPU(r13)
3050 ld r1, HSTATE_HOST_R1(r13)
3051 ld r2, PACATMSCRATCH(r13)
3052
3053 /* Set the MSR RI since we have our registers back. */
3054 li r5, MSR_RI
3055 mtmsrd r5, 1
3056
3057 ld r0, PPC_LR_STKOFF(r1)
3058 mtlr r0
3059 blr
3060#endif
3061
Paul Mackerras44a3add2013-10-04 21:45:04 +10003062/*
3063 * We come here if we get any exception or interrupt while we are
3064 * executing host real mode code while in guest MMU context.
3065 * For now just spin, but we should do something better.
3066 */
3067kvmppc_bad_host_intr:
3068 b .
Michael Neulinge4e38122014-03-25 10:47:02 +11003069
3070/*
3071 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3072 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3073 * r11 has the guest MSR value (in/out)
3074 * r9 has a vcpu pointer (in)
3075 * r0 is used as a scratch register
3076 */
3077kvmppc_msr_interrupt:
3078 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3079 cmpwi r0, 2 /* Check if we are in transactional state.. */
3080 ld r11, VCPU_INTR_MSR(r9)
3081 bne 1f
3082 /* ... if transactional, change to suspended */
3083 li r0, 1
30841: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3085 blr
Paul Mackerras9bc01a92014-05-26 19:48:40 +10003086
3087/*
3088 * This works around a hardware bug on POWER8E processors, where
3089 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3090 * performance monitor interrupt. Instead, when we need to have
3091 * an interrupt pending, we have to arrange for a counter to overflow.
3092 */
3093kvmppc_fix_pmao:
3094 li r3, 0
3095 mtspr SPRN_MMCR2, r3
3096 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3097 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3098 mtspr SPRN_MMCR0, r3
3099 lis r3, 0x7fff
3100 ori r3, r3, 0xffff
3101 mtspr SPRN_PMC6, r3
3102 isync
3103 blr
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003104
3105#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3106/*
3107 * Start timing an activity
3108 * r3 = pointer to time accumulation struct, r4 = vcpu
3109 */
3110kvmhv_start_timing:
3111 ld r5, HSTATE_KVM_VCORE(r13)
3112 lbz r6, VCORE_IN_GUEST(r5)
3113 cmpwi r6, 0
3114 beq 5f /* if in guest, need to */
3115 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
31165: mftb r5
3117 subf r5, r6, r5
3118 std r3, VCPU_CUR_ACTIVITY(r4)
3119 std r5, VCPU_ACTIVITY_START(r4)
3120 blr
3121
3122/*
3123 * Accumulate time to one activity and start another.
3124 * r3 = pointer to new time accumulation struct, r4 = vcpu
3125 */
3126kvmhv_accumulate_time:
3127 ld r5, HSTATE_KVM_VCORE(r13)
3128 lbz r8, VCORE_IN_GUEST(r5)
3129 cmpwi r8, 0
3130 beq 4f /* if in guest, need to */
3131 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
31324: ld r5, VCPU_CUR_ACTIVITY(r4)
3133 ld r6, VCPU_ACTIVITY_START(r4)
3134 std r3, VCPU_CUR_ACTIVITY(r4)
3135 mftb r7
3136 subf r7, r8, r7
3137 std r7, VCPU_ACTIVITY_START(r4)
3138 cmpdi r5, 0
3139 beqlr
3140 subf r3, r6, r7
3141 ld r8, TAS_SEQCOUNT(r5)
3142 cmpdi r8, 0
3143 addi r8, r8, 1
3144 std r8, TAS_SEQCOUNT(r5)
3145 lwsync
3146 ld r7, TAS_TOTAL(r5)
3147 add r7, r7, r3
3148 std r7, TAS_TOTAL(r5)
3149 ld r6, TAS_MIN(r5)
3150 ld r7, TAS_MAX(r5)
3151 beq 3f
3152 cmpd r3, r6
3153 bge 1f
31543: std r3, TAS_MIN(r5)
31551: cmpd r3, r7
3156 ble 2f
3157 std r3, TAS_MAX(r5)
31582: lwsync
3159 addi r8, r8, 1
3160 std r8, TAS_SEQCOUNT(r5)
3161 blr
3162#endif