blob: 9c9c983b864f8d64a81eac456adc405cc0252e5d [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100023#include <asm/mmu.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000024#include <asm/page.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100025#include <asm/ptrace.h>
26#include <asm/hvcall.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000027#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
Paul Mackerrasf0888f72012-02-03 00:54:17 +000029#include <asm/kvm_book3s_asm.h>
Aneesh Kumar K.Vf64e8082016-03-01 12:59:20 +053030#include <asm/book3s/64/mmu-hash.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110031#include <asm/tm.h>
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +053032#include <asm/opal.h>
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100033#include <asm/xive-regs.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110034
Paul Mackerras2f272462017-05-22 16:25:14 +100035/* Sign-extend HDEC if not on POWER9 */
36#define EXTEND_HDEC(reg) \
37BEGIN_FTR_SECTION; \
38 extsw reg, reg; \
39END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
40
Michael Neulinge4e38122014-03-25 10:47:02 +110041#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
Paul Mackerrasde56a942011-06-29 00:21:34 +000042
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110043/* Values in HSTATE_NAPPING(r13) */
44#define NAPPING_CEDE 1
45#define NAPPING_NOVCPU 2
46
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100047/* Stack frame offsets for kvmppc_hv_entry */
Paul Mackerras769377f2017-02-15 14:30:17 +110048#define SFS 160
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100049#define STACK_SLOT_TRAP (SFS-4)
50#define STACK_SLOT_TID (SFS-16)
51#define STACK_SLOT_PSSCR (SFS-24)
52#define STACK_SLOT_PID (SFS-32)
53#define STACK_SLOT_IAMR (SFS-40)
54#define STACK_SLOT_CIABR (SFS-48)
55#define STACK_SLOT_DAWR (SFS-56)
56#define STACK_SLOT_DAWRX (SFS-64)
Paul Mackerras769377f2017-02-15 14:30:17 +110057#define STACK_SLOT_HFSCR (SFS-72)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100058
Paul Mackerrasde56a942011-06-29 00:21:34 +000059/*
Paul Mackerras19ccb762011-07-23 17:42:46 +100060 * Call kvmppc_hv_entry in real mode.
Paul Mackerrasde56a942011-06-29 00:21:34 +000061 * Must be called with interrupts hard-disabled.
62 *
63 * Input Registers:
64 *
65 * LR = return address to continue at after eventually re-enabling MMU
66 */
Anton Blanchard6ed179b2014-06-12 18:16:53 +100067_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
Paul Mackerras218309b2013-09-06 13:23:44 +100068 mflr r0
69 std r0, PPC_LR_STKOFF(r1)
70 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +000071 mfmsr r10
Paul Mackerras8b24e692017-06-26 15:45:51 +100072 std r10, HSTATE_HOST_MSR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100073 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
Paul Mackerrasde56a942011-06-29 00:21:34 +000074 li r0,MSR_RI
75 andc r0,r10,r0
76 li r6,MSR_IR | MSR_DR
77 andc r6,r10,r6
78 mtmsrd r0,1 /* clear RI in MSR */
79 mtsrr0 r5
80 mtsrr1 r6
81 RFI
82
Paul Mackerras218309b2013-09-06 13:23:44 +100083kvmppc_call_hv_entry:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110084 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100085 bl kvmppc_hv_entry
86
87 /* Back from guest - restore host state and return to caller */
88
Michael Neulingeee7ff92014-01-08 21:25:19 +110089BEGIN_FTR_SECTION
Paul Mackerras218309b2013-09-06 13:23:44 +100090 /* Restore host DABR and DABRX */
91 ld r5,HSTATE_DABR(r13)
92 li r6,7
93 mtspr SPRN_DABR,r5
94 mtspr SPRN_DABRX,r6
Michael Neulingeee7ff92014-01-08 21:25:19 +110095END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +100096
97 /* Restore SPRG3 */
Scott Wood9d378df2014-03-10 17:29:38 -050098 ld r3,PACA_SPRG_VDSO(r13)
99 mtspr SPRN_SPRG_VDSO_WRITE,r3
Paul Mackerras218309b2013-09-06 13:23:44 +1000100
Paul Mackerras218309b2013-09-06 13:23:44 +1000101 /* Reload the host's PMU registers */
102 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
103 lbz r4, LPPACA_PMCINUSE(r3)
104 cmpwi r4, 0
105 beq 23f /* skip if not */
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000106BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000107 ld r3, HSTATE_MMCR0(r13)
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000108 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
109 cmpwi r4, MMCR0_PMAO
110 beql kvmppc_fix_pmao
111END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000112 lwz r3, HSTATE_PMC1(r13)
113 lwz r4, HSTATE_PMC2(r13)
114 lwz r5, HSTATE_PMC3(r13)
115 lwz r6, HSTATE_PMC4(r13)
116 lwz r8, HSTATE_PMC5(r13)
117 lwz r9, HSTATE_PMC6(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000118 mtspr SPRN_PMC1, r3
119 mtspr SPRN_PMC2, r4
120 mtspr SPRN_PMC3, r5
121 mtspr SPRN_PMC4, r6
122 mtspr SPRN_PMC5, r8
123 mtspr SPRN_PMC6, r9
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000124 ld r3, HSTATE_MMCR0(r13)
125 ld r4, HSTATE_MMCR1(r13)
126 ld r5, HSTATE_MMCRA(r13)
127 ld r6, HSTATE_SIAR(r13)
128 ld r7, HSTATE_SDAR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000129 mtspr SPRN_MMCR1, r4
130 mtspr SPRN_MMCRA, r5
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100131 mtspr SPRN_SIAR, r6
132 mtspr SPRN_SDAR, r7
133BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000134 ld r8, HSTATE_MMCR2(r13)
135 ld r9, HSTATE_SIER(r13)
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100136 mtspr SPRN_MMCR2, r8
137 mtspr SPRN_SIER, r9
138END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +1000139 mtspr SPRN_MMCR0, r3
140 isync
14123:
142
143 /*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100144 * Reload DEC. HDEC interrupts were disabled when
145 * we reloaded the host's LPCR value.
146 */
147 ld r3, HSTATE_DECEXP(r13)
148 mftb r4
149 subf r4, r4, r3
150 mtspr SPRN_DEC, r4
151
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000152 /* hwthread_req may have got set by cede or no vcpu, so clear it */
153 li r0, 0
154 stb r0, HSTATE_HWTHREAD_REQ(r13)
155
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100156 /*
Aravinda Prasade20bbd32017-05-11 16:33:37 +0530157 * For external interrupts we need to call the Linux
158 * handler to process the interrupt. We do that by jumping
159 * to absolute address 0x500 for external interrupts.
160 * The [h]rfid at the end of the handler will return to
161 * the book3s_hv_interrupts.S code. For other interrupts
162 * we do the rfid to get back to the book3s_hv_interrupts.S
163 * code here.
Paul Mackerras218309b2013-09-06 13:23:44 +1000164 */
165 ld r8, 112+PPC_LR_STKOFF(r1)
166 addi r1, r1, 112
167 ld r7, HSTATE_HOST_MSR(r13)
168
Paul Mackerras8b24e692017-06-26 15:45:51 +1000169 /* Return the trap number on this thread as the return value */
170 mr r3, r12
171
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100172 /*
173 * If we came back from the guest via a relocation-on interrupt,
174 * we will be in virtual mode at this point, which makes it a
175 * little easier to get back to the caller.
176 */
177 mfmsr r0
178 andi. r0, r0, MSR_IR /* in real mode? */
179 bne .Lvirt_return
180
Paul Mackerras8b24e692017-06-26 15:45:51 +1000181 /* RFI into the highmem handler */
Paul Mackerras218309b2013-09-06 13:23:44 +1000182 mfmsr r6
183 li r0, MSR_RI
184 andc r6, r6, r0
185 mtmsrd r6, 1 /* Clear RI in MSR */
186 mtsrr0 r8
187 mtsrr1 r7
Paul Mackerras218309b2013-09-06 13:23:44 +1000188 RFI
189
Paul Mackerras8b24e692017-06-26 15:45:51 +1000190 /* Virtual-mode return */
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100191.Lvirt_return:
Paul Mackerras8b24e692017-06-26 15:45:51 +1000192 mtlr r8
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100193 blr
194
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100195kvmppc_primary_no_guest:
196 /* We handle this much like a ceded vcpu */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100197 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
Paul Mackerras2f272462017-05-22 16:25:14 +1000198 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
199 /* HDEC value came from DEC in the first place, it will fit */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100200 mfspr r3, SPRN_HDEC
201 mtspr SPRN_DEC, r3
Paul Mackerras6af27c82015-03-28 14:21:10 +1100202 /*
203 * Make sure the primary has finished the MMU switch.
204 * We should never get here on a secondary thread, but
205 * check it for robustness' sake.
206 */
207 ld r5, HSTATE_KVM_VCORE(r13)
20865: lbz r0, VCORE_IN_GUEST(r5)
209 cmpwi r0, 0
210 beq 65b
211 /* Set LPCR. */
212 ld r8,VCORE_LPCR(r5)
213 mtspr SPRN_LPCR,r8
214 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100215 /* set our bit in napping_threads */
216 ld r5, HSTATE_KVM_VCORE(r13)
217 lbz r7, HSTATE_PTID(r13)
218 li r0, 1
219 sld r0, r0, r7
220 addi r6, r5, VCORE_NAPPING_THREADS
2211: lwarx r3, 0, r6
222 or r3, r3, r0
223 stwcx. r3, 0, r6
224 bne 1b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100225 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100226 isync
227 li r12, 0
228 lwz r7, VCORE_ENTRY_EXIT(r5)
229 cmpwi r7, 0x100
230 bge kvm_novcpu_exit /* another thread already exiting */
231 li r3, NAPPING_NOVCPU
232 stb r3, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100233
Paul Mackerrasccc07772015-03-28 14:21:07 +1100234 li r3, 0 /* Don't wake on privileged (OS) doorbell */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100235 b kvm_do_nap
236
Suresh Warrier37f55d32016-08-19 15:35:46 +1000237/*
238 * kvm_novcpu_wakeup
239 * Entered from kvm_start_guest if kvm_hstate.napping is set
240 * to NAPPING_NOVCPU
241 * r2 = kernel TOC
242 * r13 = paca
243 */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100244kvm_novcpu_wakeup:
245 ld r1, HSTATE_HOST_R1(r13)
246 ld r5, HSTATE_KVM_VCORE(r13)
247 li r0, 0
248 stb r0, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100249
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100250 /* check the wake reason */
251 bl kvmppc_check_wake_reason
Paul Mackerras6af27c82015-03-28 14:21:10 +1100252
Suresh Warrier37f55d32016-08-19 15:35:46 +1000253 /*
254 * Restore volatile registers since we could have called
255 * a C routine in kvmppc_check_wake_reason.
256 * r5 = VCORE
257 */
258 ld r5, HSTATE_KVM_VCORE(r13)
259
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100260 /* see if any other thread is already exiting */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100261 lwz r0, VCORE_ENTRY_EXIT(r5)
262 cmpwi r0, 0x100
263 bge kvm_novcpu_exit
264
265 /* clear our bit in napping_threads */
266 lbz r7, HSTATE_PTID(r13)
267 li r0, 1
268 sld r0, r0, r7
269 addi r6, r5, VCORE_NAPPING_THREADS
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002704: lwarx r7, 0, r6
271 andc r7, r7, r0
272 stwcx. r7, 0, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100273 bne 4b
274
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100275 /* See if the wake reason means we need to exit */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100276 cmpdi r3, 0
277 bge kvm_novcpu_exit
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100278
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100279 /* See if our timeslice has expired (HDEC is negative) */
280 mfspr r0, SPRN_HDEC
Paul Mackerras2f272462017-05-22 16:25:14 +1000281 EXTEND_HDEC(r0)
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100282 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerras2f272462017-05-22 16:25:14 +1000283 cmpdi r0, 0
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100284 blt kvm_novcpu_exit
285
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100286 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
287 ld r4, HSTATE_KVM_VCPU(r13)
288 cmpdi r4, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100289 beq kvmppc_primary_no_guest
290
291#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
292 addi r3, r4, VCPU_TB_RMENTRY
293 bl kvmhv_start_timing
294#endif
295 b kvmppc_got_guest
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100296
297kvm_novcpu_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100298#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
299 ld r4, HSTATE_KVM_VCPU(r13)
300 cmpdi r4, 0
301 beq 13f
302 addi r3, r4, VCPU_TB_RMEXIT
303 bl kvmhv_accumulate_time
304#endif
Paul Mackerraseddb60f2015-03-28 14:21:11 +110030513: mr r3, r12
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000306 stw r12, STACK_SLOT_TRAP(r1)
Paul Mackerraseddb60f2015-03-28 14:21:11 +1100307 bl kvmhv_commence_exit
308 nop
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000309 lwz r12, STACK_SLOT_TRAP(r1)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100310 b kvmhv_switch_to_host
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100311
Paul Mackerras371fefd2011-06-29 00:23:08 +0000312/*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100313 * We come in here when wakened from nap mode.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000314 * Relocation is off and most register values are lost.
315 * r13 points to the PACA.
Nicholas Piggin9d292502017-06-13 23:05:51 +1000316 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000317 */
318 .globl kvm_start_guest
319kvm_start_guest:
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530320 /* Set runlatch bit the minute you wake up from nap */
Paul Mackerras1f09c3e2015-03-28 14:21:04 +1100321 mfspr r0, SPRN_CTRLF
322 ori r0, r0, 1
323 mtspr SPRN_CTRLT, r0
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530324
Nicholas Piggin9d292502017-06-13 23:05:51 +1000325 /*
326 * Could avoid this and pass it through in r3. For now,
327 * code expects it to be in SRR1.
328 */
329 mtspr SPRN_SRR1,r3
330
Paul Mackerras19ccb762011-07-23 17:42:46 +1000331 ld r2,PACATOC(r13)
332
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000333 li r0,KVM_HWTHREAD_IN_KVM
334 stb r0,HSTATE_HWTHREAD_STATE(r13)
335
336 /* NV GPR values from power7_idle() will no longer be valid */
337 li r0,1
338 stb r0,PACA_NAPSTATELOST(r13)
339
Paul Mackerras4619ac82013-04-17 20:31:41 +0000340 /* were we napping due to cede? */
341 lbz r0,HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100342 cmpwi r0,NAPPING_CEDE
343 beq kvm_end_cede
344 cmpwi r0,NAPPING_NOVCPU
345 beq kvm_novcpu_wakeup
346
347 ld r1,PACAEMERGSP(r13)
348 subi r1,r1,STACK_FRAME_OVERHEAD
Paul Mackerras4619ac82013-04-17 20:31:41 +0000349
350 /*
351 * We weren't napping due to cede, so this must be a secondary
352 * thread being woken up to run a guest, or being woken up due
353 * to a stray IPI. (Or due to some machine check or hypervisor
354 * maintenance interrupt while the core is in KVM.)
355 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000356
357 /* Check the wake reason in SRR1 to see why we got here */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100358 bl kvmppc_check_wake_reason
Suresh Warrier37f55d32016-08-19 15:35:46 +1000359 /*
360 * kvmppc_check_wake_reason could invoke a C routine, but we
361 * have no volatile registers to restore when we return.
362 */
363
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100364 cmpdi r3, 0
365 bge kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000366
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000367 /* get vcore pointer, NULL if we have nothing to run */
368 ld r5,HSTATE_KVM_VCORE(r13)
369 cmpdi r5,0
370 /* if we have no vcore to run, go back to sleep */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000371 beq kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000372
Paul Mackerras56548fc2014-12-03 14:48:40 +1100373kvm_secondary_got_guest:
374
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100375 /* Set HSTATE_DSCR(r13) to something sensible */
Anshuman Khandual1db36522015-05-21 12:13:03 +0530376 ld r6, PACA_DSCR_DEFAULT(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100377 std r6, HSTATE_DSCR(r13)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000378
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000379 /* On thread 0 of a subcore, set HDEC to max */
380 lbz r4, HSTATE_PTID(r13)
381 cmpwi r4, 0
382 bne 63f
Paul Mackerras2f272462017-05-22 16:25:14 +1000383 LOAD_REG_ADDR(r6, decrementer_max)
384 ld r6, 0(r6)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000385 mtspr SPRN_HDEC, r6
386 /* and set per-LPAR registers, if doing dynamic micro-threading */
387 ld r6, HSTATE_SPLIT_MODE(r13)
388 cmpdi r6, 0
389 beq 63f
390 ld r0, KVM_SPLIT_RPR(r6)
391 mtspr SPRN_RPR, r0
392 ld r0, KVM_SPLIT_PMMAR(r6)
393 mtspr SPRN_PMMAR, r0
394 ld r0, KVM_SPLIT_LDBAR(r6)
395 mtspr SPRN_LDBAR, r0
396 isync
39763:
398 /* Order load of vcpu after load of vcore */
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100399 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000400 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100401 bl kvmppc_hv_entry
Paul Mackerras218309b2013-09-06 13:23:44 +1000402
403 /* Back from the guest, go back to nap */
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000404 /* Clear our vcpu and vcore pointers so we don't come back in early */
Paul Mackerras218309b2013-09-06 13:23:44 +1000405 li r0, 0
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000406 std r0, HSTATE_KVM_VCPU(r13)
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100407 /*
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000408 * Once we clear HSTATE_KVM_VCORE(r13), the code in
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100409 * kvmppc_run_core() is going to assume that all our vcpu
410 * state is visible in memory. This lwsync makes sure
411 * that that is true.
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100412 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000413 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000414 std r0, HSTATE_KVM_VCORE(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000415
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530416 /*
417 * All secondaries exiting guest will fall through this path.
418 * Before proceeding, just check for HMI interrupt and
419 * invoke opal hmi handler. By now we are sure that the
420 * primary thread on this core/subcore has already made partition
421 * switch/TB resync and we are good to call opal hmi handler.
422 */
423 cmpwi r12, BOOK3S_INTERRUPT_HMI
424 bne kvm_no_guest
425
426 li r3,0 /* NULL argument */
427 bl hmi_exception_realmode
Paul Mackerras56548fc2014-12-03 14:48:40 +1100428/*
429 * At this point we have finished executing in the guest.
430 * We need to wait for hwthread_req to become zero, since
431 * we may not turn on the MMU while hwthread_req is non-zero.
432 * While waiting we also need to check if we get given a vcpu to run.
433 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000434kvm_no_guest:
Paul Mackerras56548fc2014-12-03 14:48:40 +1100435 lbz r3, HSTATE_HWTHREAD_REQ(r13)
436 cmpwi r3, 0
437 bne 53f
438 HMT_MEDIUM
439 li r0, KVM_HWTHREAD_IN_KERNEL
Paul Mackerras218309b2013-09-06 13:23:44 +1000440 stb r0, HSTATE_HWTHREAD_STATE(r13)
Paul Mackerras56548fc2014-12-03 14:48:40 +1100441 /* need to recheck hwthread_req after a barrier, to avoid race */
442 sync
443 lbz r3, HSTATE_HWTHREAD_REQ(r13)
444 cmpwi r3, 0
445 bne 54f
446/*
Shreyas B. Prabhu5fa6b6b2016-07-08 11:50:46 +0530447 * We jump to pnv_wakeup_loss, which will return to the caller
Paul Mackerras56548fc2014-12-03 14:48:40 +1100448 * of power7_nap in the powernv cpu offline loop. The value we
Nicholas Piggin9d292502017-06-13 23:05:51 +1000449 * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss
450 * requires SRR1 in r12.
Paul Mackerras56548fc2014-12-03 14:48:40 +1100451 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000452 li r3, LPCR_PECE0
453 mfspr r4, SPRN_LPCR
454 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
455 mtspr SPRN_LPCR, r4
Paul Mackerras56548fc2014-12-03 14:48:40 +1100456 li r3, 0
Nicholas Piggin9d292502017-06-13 23:05:51 +1000457 mfspr r12,SPRN_SRR1
Shreyas B. Prabhu5fa6b6b2016-07-08 11:50:46 +0530458 b pnv_wakeup_loss
Paul Mackerras56548fc2014-12-03 14:48:40 +1100459
46053: HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000461 ld r5, HSTATE_KVM_VCORE(r13)
462 cmpdi r5, 0
463 bne 60f
464 ld r3, HSTATE_SPLIT_MODE(r13)
465 cmpdi r3, 0
466 beq kvm_no_guest
467 lbz r0, KVM_SPLIT_DO_NAP(r3)
468 cmpwi r0, 0
Paul Mackerras56548fc2014-12-03 14:48:40 +1100469 beq kvm_no_guest
470 HMT_MEDIUM
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000471 b kvm_unsplit_nap
47260: HMT_MEDIUM
Paul Mackerras56548fc2014-12-03 14:48:40 +1100473 b kvm_secondary_got_guest
474
47554: li r0, KVM_HWTHREAD_IN_KVM
476 stb r0, HSTATE_HWTHREAD_STATE(r13)
477 b kvm_no_guest
Paul Mackerras218309b2013-09-06 13:23:44 +1000478
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000479/*
480 * Here the primary thread is trying to return the core to
481 * whole-core mode, so we need to nap.
482 */
483kvm_unsplit_nap:
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530484 /*
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530485 * When secondaries are napping in kvm_unsplit_nap() with
486 * hwthread_req = 1, HMI goes ignored even though subcores are
487 * already exited the guest. Hence HMI keeps waking up secondaries
488 * from nap in a loop and secondaries always go back to nap since
489 * no vcore is assigned to them. This makes impossible for primary
490 * thread to get hold of secondary threads resulting into a soft
491 * lockup in KVM path.
492 *
493 * Let us check if HMI is pending and handle it before we go to nap.
494 */
495 cmpwi r12, BOOK3S_INTERRUPT_HMI
496 bne 55f
497 li r3, 0 /* NULL argument */
498 bl hmi_exception_realmode
49955:
500 /*
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530501 * Ensure that secondary doesn't nap when it has
502 * its vcore pointer set.
503 */
504 sync /* matches smp_mb() before setting split_info.do_nap */
505 ld r0, HSTATE_KVM_VCORE(r13)
506 cmpdi r0, 0
507 bne kvm_no_guest
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000508 /* clear any pending message */
509BEGIN_FTR_SECTION
510 lis r6, (PPC_DBELL_SERVER << (63-36))@h
511 PPC_MSGCLR(6)
512END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
513 /* Set kvm_split_mode.napped[tid] = 1 */
514 ld r3, HSTATE_SPLIT_MODE(r13)
515 li r0, 1
516 lhz r4, PACAPACAINDEX(r13)
517 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
518 addi r4, r4, KVM_SPLIT_NAPPED
519 stbx r0, r3, r4
520 /* Check the do_nap flag again after setting napped[] */
521 sync
522 lbz r0, KVM_SPLIT_DO_NAP(r3)
523 cmpwi r0, 0
524 beq 57f
525 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
Paul Mackerrasbf53c882016-11-18 14:34:07 +1100526 mfspr r5, SPRN_LPCR
527 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
528 b kvm_nap_sequence
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000529
53057: li r0, 0
531 stbx r0, r3, r4
532 b kvm_no_guest
533
Paul Mackerras218309b2013-09-06 13:23:44 +1000534/******************************************************************************
535 * *
536 * Entry code *
537 * *
538 *****************************************************************************/
539
Paul Mackerrasde56a942011-06-29 00:21:34 +0000540.global kvmppc_hv_entry
541kvmppc_hv_entry:
542
543 /* Required state:
544 *
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100545 * R4 = vcpu pointer (or NULL)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000546 * MSR = ~IR|DR
547 * R13 = PACA
548 * R1 = host R1
Michael Neuling06a29e42014-08-19 14:59:30 +1000549 * R2 = TOC
Paul Mackerrasde56a942011-06-29 00:21:34 +0000550 * all other volatile GPRS = free
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100551 * Does not preserve non-volatile GPRs or CR fields
Paul Mackerrasde56a942011-06-29 00:21:34 +0000552 */
553 mflr r0
Paul Mackerras218309b2013-09-06 13:23:44 +1000554 std r0, PPC_LR_STKOFF(r1)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000555 stdu r1, -SFS(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000556
Paul Mackerrasde56a942011-06-29 00:21:34 +0000557 /* Save R1 in the PACA */
558 std r1, HSTATE_HOST_R1(r13)
559
Paul Mackerras44a3add2013-10-04 21:45:04 +1000560 li r6, KVM_GUEST_MODE_HOST_HV
561 stb r6, HSTATE_IN_GUEST(r13)
562
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100563#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
564 /* Store initial timestamp */
565 cmpdi r4, 0
566 beq 1f
567 addi r3, r4, VCPU_TB_RMENTRY
568 bl kvmhv_start_timing
5691:
570#endif
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100571
572 /* Use cr7 as an indication of radix mode */
573 ld r5, HSTATE_KVM_VCORE(r13)
574 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
575 lbz r0, KVM_RADIX(r9)
576 cmpwi cr7, r0, 0
577
578 /* Clear out SLB if hash */
579 bne cr7, 2f
Paul Mackerrasde56a942011-06-29 00:21:34 +0000580 li r6,0
581 slbmte r6,r6
582 slbia
583 ptesync
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11005842:
Paul Mackerras9e368f22011-06-29 00:40:08 +0000585 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100586 * POWER7/POWER8 host -> guest partition switch code.
Paul Mackerras9e368f22011-06-29 00:40:08 +0000587 * We don't have to lock against concurrent tlbies,
588 * but we do have to coordinate across hardware threads.
589 */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100590 /* Set bit in entry map iff exit map is zero. */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100591 li r7, 1
592 lbz r6, HSTATE_PTID(r13)
593 sld r7, r7, r6
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100594 addi r8, r5, VCORE_ENTRY_EXIT
59521: lwarx r3, 0, r8
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100596 cmpwi r3, 0x100 /* any threads starting to exit? */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000597 bge secondary_too_late /* if so we're too late to the party */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100598 or r3, r3, r7
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100599 stwcx. r3, 0, r8
Paul Mackerras371fefd2011-06-29 00:23:08 +0000600 bne 21b
601
602 /* Primary thread switches to guest partition. */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000603 cmpwi r6,0
Paul Mackerras6af27c82015-03-28 14:21:10 +1100604 bne 10f
Paul Mackerrasde56a942011-06-29 00:21:34 +0000605 lwz r7,KVM_LPID(r9)
Paul Mackerras7a840842016-11-16 22:25:20 +1100606BEGIN_FTR_SECTION
607 ld r6,KVM_SDR1(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000608 li r0,LPID_RSVD /* switch to reserved LPID */
609 mtspr SPRN_LPID,r0
610 ptesync
611 mtspr SPRN_SDR1,r6 /* switch to partition page table */
Paul Mackerras7a840842016-11-16 22:25:20 +1100612END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000613 mtspr SPRN_LPID,r7
614 isync
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000615
616 /* See if we need to flush the TLB */
617 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100618BEGIN_FTR_SECTION
619 /*
620 * On POWER9, individual threads can come in here, but the
621 * TLB is shared between the 4 threads in a core, hence
622 * invalidating on one thread invalidates for all.
623 * Thus we make all 4 threads use the same bit here.
624 */
625 clrrdi r6,r6,2
626END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000627 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
628 srdi r6,r6,6 /* doubleword number */
629 sldi r6,r6,3 /* address offset */
630 add r6,r6,r9
631 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100632 li r8,1
633 sld r8,r8,r7
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000634 ld r7,0(r6)
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100635 and. r7,r7,r8
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000636 beq 22f
Paul Mackerrasca252052014-01-08 21:25:22 +1100637 /* Flush the TLB of any entries for this LPID */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100638 lwz r0,KVM_TLB_SETS(r9)
639 mtctr r0
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000640 li r7,0x800 /* IS field = 0b10 */
641 ptesync
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100642 li r0,0 /* RS for P9 version of tlbiel */
643 bne cr7, 29f
64428: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000645 addi r7,r7,0x1000
646 bdnz 28b
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100647 b 30f
64829: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */
649 addi r7,r7,0x1000
650 bdnz 29b
65130: ptesync
65223: ldarx r7,0,r6 /* clear the bit after TLB flushed */
653 andc r7,r7,r8
654 stdcx. r7,0,r6
655 bne 23b
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000656
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000657 /* Add timebase offset onto timebase */
65822: ld r8,VCORE_TB_OFFSET(r5)
659 cmpdi r8,0
660 beq 37f
661 mftb r6 /* current host timebase */
662 add r8,r8,r6
663 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
664 mftb r7 /* check if lower 24 bits overflowed */
665 clrldi r6,r6,40
666 clrldi r7,r7,40
667 cmpld r7,r6
668 bge 37f
669 addis r8,r8,0x100 /* if so, increment upper 40 bits */
670 mtspr SPRN_TBU40,r8
671
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000672 /* Load guest PCR value to select appropriate compat mode */
67337: ld r7, VCORE_PCR(r5)
674 cmpdi r7, 0
675 beq 38f
676 mtspr SPRN_PCR, r7
67738:
Michael Neulingb005255e2014-01-08 21:25:21 +1100678
679BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000680 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +1100681 ld r8, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000682 ld r7, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +1100683 mtspr SPRN_DPDES, r8
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000684 mtspr SPRN_VTB, r7
Michael Neulingb005255e2014-01-08 21:25:21 +1100685END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
686
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530687 /* Mark the subcore state as inside guest */
688 bl kvmppc_subcore_enter_guest
689 nop
690 ld r5, HSTATE_KVM_VCORE(r13)
691 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000692 li r0,1
Paul Mackerras371fefd2011-06-29 00:23:08 +0000693 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000694
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100695 /* Do we have a guest vcpu to run? */
Paul Mackerras6af27c82015-03-28 14:21:10 +110069610: cmpdi r4, 0
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100697 beq kvmppc_primary_no_guest
698kvmppc_got_guest:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000699
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100700 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100701 lwz r5,VCPU_SLB_MAX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000702 cmpwi r5,0
703 beq 9f
704 mtctr r5
705 addi r6,r4,VCPU_SLB
7061: ld r8,VCPU_SLB_E(r6)
707 ld r9,VCPU_SLB_V(r6)
708 slbmte r9,r8
709 addi r6,r6,VCPU_SLB_SIZE
710 bdnz 1b
7119:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100712 /* Increment yield count if they have a VPA */
713 ld r3, VCPU_VPA(r4)
714 cmpdi r3, 0
715 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +0200716 li r6, LPPACA_YIELDCOUNT
717 LWZX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100718 addi r5, r5, 1
Alexander Graf0865a582014-06-11 10:36:17 +0200719 STWX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100720 li r6, 1
721 stb r6, VCPU_VPA_DIRTY(r4)
72225:
723
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100724 /* Save purr/spurr */
725 mfspr r5,SPRN_PURR
726 mfspr r6,SPRN_SPURR
727 std r5,HSTATE_PURR(r13)
728 std r6,HSTATE_SPURR(r13)
729 ld r7,VCPU_PURR(r4)
730 ld r8,VCPU_SPURR(r4)
731 mtspr SPRN_PURR,r7
732 mtspr SPRN_SPURR,r8
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100733
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100734 /* Save host values of some registers */
735BEGIN_FTR_SECTION
736 mfspr r5, SPRN_TIDR
737 mfspr r6, SPRN_PSSCR
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100738 mfspr r7, SPRN_PID
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +1000739 mfspr r8, SPRN_IAMR
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100740 std r5, STACK_SLOT_TID(r1)
741 std r6, STACK_SLOT_PSSCR(r1)
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100742 std r7, STACK_SLOT_PID(r1)
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +1000743 std r8, STACK_SLOT_IAMR(r1)
Paul Mackerras769377f2017-02-15 14:30:17 +1100744 mfspr r5, SPRN_HFSCR
745 std r5, STACK_SLOT_HFSCR(r1)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100746END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000747BEGIN_FTR_SECTION
748 mfspr r5, SPRN_CIABR
749 mfspr r6, SPRN_DAWR
750 mfspr r7, SPRN_DAWRX
751 std r5, STACK_SLOT_CIABR(r1)
752 std r6, STACK_SLOT_DAWR(r1)
753 std r7, STACK_SLOT_DAWRX(r1)
754END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100755
Michael Neulingeee7ff92014-01-08 21:25:19 +1100756BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000757 /* Set partition DABR */
758 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
Paul Mackerras8563bf52014-01-08 21:25:29 +1100759 lwz r5,VCPU_DABRX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000760 ld r6,VCPU_DABR(r4)
761 mtspr SPRN_DABRX,r5
762 mtspr SPRN_DABR,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000763 isync
Michael Neulingeee7ff92014-01-08 21:25:19 +1100764END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000765
Michael Neulinge4e38122014-03-25 10:47:02 +1100766#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
767BEGIN_FTR_SECTION
Paul Mackerrasf024ee02016-06-22 14:21:59 +1000768 bl kvmppc_restore_tm
769END_FTR_SECTION_IFSET(CPU_FTR_TM)
Michael Neulinge4e38122014-03-25 10:47:02 +1100770#endif
771
Paul Mackerrasde56a942011-06-29 00:21:34 +0000772 /* Load guest PMU registers */
773 /* R4 is live here (vcpu pointer) */
774 li r3, 1
775 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
776 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
777 isync
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000778BEGIN_FTR_SECTION
779 ld r3, VCPU_MMCR(r4)
780 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
781 cmpwi r5, MMCR0_PMAO
782 beql kvmppc_fix_pmao
783END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000784 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
785 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
786 lwz r6, VCPU_PMC + 8(r4)
787 lwz r7, VCPU_PMC + 12(r4)
788 lwz r8, VCPU_PMC + 16(r4)
789 lwz r9, VCPU_PMC + 20(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000790 mtspr SPRN_PMC1, r3
791 mtspr SPRN_PMC2, r5
792 mtspr SPRN_PMC3, r6
793 mtspr SPRN_PMC4, r7
794 mtspr SPRN_PMC5, r8
795 mtspr SPRN_PMC6, r9
Paul Mackerrasde56a942011-06-29 00:21:34 +0000796 ld r3, VCPU_MMCR(r4)
797 ld r5, VCPU_MMCR + 8(r4)
798 ld r6, VCPU_MMCR + 16(r4)
799 ld r7, VCPU_SIAR(r4)
800 ld r8, VCPU_SDAR(r4)
801 mtspr SPRN_MMCR1, r5
802 mtspr SPRN_MMCRA, r6
803 mtspr SPRN_SIAR, r7
804 mtspr SPRN_SDAR, r8
Michael Neulingb005255e2014-01-08 21:25:21 +1100805BEGIN_FTR_SECTION
806 ld r5, VCPU_MMCR + 24(r4)
807 ld r6, VCPU_SIER(r4)
Paul Mackerras83677f52016-11-16 22:33:27 +1100808 mtspr SPRN_MMCR2, r5
809 mtspr SPRN_SIER, r6
810BEGIN_FTR_SECTION_NESTED(96)
Michael Neulingb005255e2014-01-08 21:25:21 +1100811 lwz r7, VCPU_PMC + 24(r4)
812 lwz r8, VCPU_PMC + 28(r4)
813 ld r9, VCPU_MMCR + 32(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100814 mtspr SPRN_SPMC1, r7
815 mtspr SPRN_SPMC2, r8
816 mtspr SPRN_MMCRS, r9
Paul Mackerras83677f52016-11-16 22:33:27 +1100817END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
Michael Neulingb005255e2014-01-08 21:25:21 +1100818END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000819 mtspr SPRN_MMCR0, r3
820 isync
821
822 /* Load up FP, VMX and VSX registers */
823 bl kvmppc_load_fp
824
825 ld r14, VCPU_GPR(R14)(r4)
826 ld r15, VCPU_GPR(R15)(r4)
827 ld r16, VCPU_GPR(R16)(r4)
828 ld r17, VCPU_GPR(R17)(r4)
829 ld r18, VCPU_GPR(R18)(r4)
830 ld r19, VCPU_GPR(R19)(r4)
831 ld r20, VCPU_GPR(R20)(r4)
832 ld r21, VCPU_GPR(R21)(r4)
833 ld r22, VCPU_GPR(R22)(r4)
834 ld r23, VCPU_GPR(R23)(r4)
835 ld r24, VCPU_GPR(R24)(r4)
836 ld r25, VCPU_GPR(R25)(r4)
837 ld r26, VCPU_GPR(R26)(r4)
838 ld r27, VCPU_GPR(R27)(r4)
839 ld r28, VCPU_GPR(R28)(r4)
840 ld r29, VCPU_GPR(R29)(r4)
841 ld r30, VCPU_GPR(R30)(r4)
842 ld r31, VCPU_GPR(R31)(r4)
843
Paul Mackerrasde56a942011-06-29 00:21:34 +0000844 /* Switch DSCR to guest value */
845 ld r5, VCPU_DSCR(r4)
846 mtspr SPRN_DSCR, r5
Paul Mackerrasde56a942011-06-29 00:21:34 +0000847
Michael Neulingb005255e2014-01-08 21:25:21 +1100848BEGIN_FTR_SECTION
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100849 /* Skip next section on POWER7 */
Michael Neulingb005255e2014-01-08 21:25:21 +1100850 b 8f
851END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +1100852 /* Load up POWER8-specific registers */
853 ld r5, VCPU_IAMR(r4)
854 lwz r6, VCPU_PSPB(r4)
855 ld r7, VCPU_FSCR(r4)
856 mtspr SPRN_IAMR, r5
857 mtspr SPRN_PSPB, r6
858 mtspr SPRN_FSCR, r7
859 ld r5, VCPU_DAWR(r4)
860 ld r6, VCPU_DAWRX(r4)
861 ld r7, VCPU_CIABR(r4)
862 ld r8, VCPU_TAR(r4)
863 mtspr SPRN_DAWR, r5
864 mtspr SPRN_DAWRX, r6
865 mtspr SPRN_CIABR, r7
866 mtspr SPRN_TAR, r8
867 ld r5, VCPU_IC(r4)
Michael Neuling7b490412014-01-08 21:25:32 +1100868 ld r8, VCPU_EBBHR(r4)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000869 mtspr SPRN_IC, r5
Michael Neulingb005255e2014-01-08 21:25:21 +1100870 mtspr SPRN_EBBHR, r8
871 ld r5, VCPU_EBBRR(r4)
872 ld r6, VCPU_BESCR(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100873 lwz r7, VCPU_GUEST_PID(r4)
874 ld r8, VCPU_WORT(r4)
Paul Mackerras83677f52016-11-16 22:33:27 +1100875 mtspr SPRN_EBBRR, r5
876 mtspr SPRN_BESCR, r6
Michael Neulingb005255e2014-01-08 21:25:21 +1100877 mtspr SPRN_PID, r7
878 mtspr SPRN_WORT, r8
Paul Mackerras83677f52016-11-16 22:33:27 +1100879BEGIN_FTR_SECTION
Paul Mackerrasf11f6f72017-01-30 21:21:52 +1100880 PPC_INVALIDATE_ERAT
881END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
882BEGIN_FTR_SECTION
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100883 /* POWER8-only registers */
Paul Mackerras83677f52016-11-16 22:33:27 +1100884 ld r5, VCPU_TCSCR(r4)
885 ld r6, VCPU_ACOP(r4)
886 ld r7, VCPU_CSIGR(r4)
887 ld r8, VCPU_TACR(r4)
888 mtspr SPRN_TCSCR, r5
889 mtspr SPRN_ACOP, r6
890 mtspr SPRN_CSIGR, r7
891 mtspr SPRN_TACR, r8
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100892FTR_SECTION_ELSE
893 /* POWER9-only registers */
894 ld r5, VCPU_TID(r4)
895 ld r6, VCPU_PSSCR(r4)
896 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
Paul Mackerras769377f2017-02-15 14:30:17 +1100897 ld r7, VCPU_HFSCR(r4)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100898 mtspr SPRN_TIDR, r5
899 mtspr SPRN_PSSCR, r6
Paul Mackerras769377f2017-02-15 14:30:17 +1100900 mtspr SPRN_HFSCR, r7
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100901ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Michael Neulingb005255e2014-01-08 21:25:21 +11009028:
903
Paul Mackerrasde56a942011-06-29 00:21:34 +0000904 /*
905 * Set the decrementer to the guest decrementer.
906 */
907 ld r8,VCPU_DEC_EXPIRES(r4)
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +1100908 /* r8 is a host timebase value here, convert to guest TB */
909 ld r5,HSTATE_KVM_VCORE(r13)
910 ld r6,VCORE_TB_OFFSET(r5)
911 add r8,r8,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000912 mftb r7
913 subf r3,r7,r8
914 mtspr SPRN_DEC,r3
Paul Mackerras1bc3fe82017-05-22 16:55:16 +1000915 std r3,VCPU_DEC(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000916
917 ld r5, VCPU_SPRG0(r4)
918 ld r6, VCPU_SPRG1(r4)
919 ld r7, VCPU_SPRG2(r4)
920 ld r8, VCPU_SPRG3(r4)
921 mtspr SPRN_SPRG0, r5
922 mtspr SPRN_SPRG1, r6
923 mtspr SPRN_SPRG2, r7
924 mtspr SPRN_SPRG3, r8
925
Paul Mackerrasde56a942011-06-29 00:21:34 +0000926 /* Load up DAR and DSISR */
927 ld r5, VCPU_DAR(r4)
928 lwz r6, VCPU_DSISR(r4)
929 mtspr SPRN_DAR, r5
930 mtspr SPRN_DSISR, r6
931
Paul Mackerrasde56a942011-06-29 00:21:34 +0000932 /* Restore AMR and UAMOR, set AMOR to all 1s */
933 ld r5,VCPU_AMR(r4)
934 ld r6,VCPU_UAMOR(r4)
935 li r7,-1
936 mtspr SPRN_AMR,r5
937 mtspr SPRN_UAMOR,r6
938 mtspr SPRN_AMOR,r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000939
940 /* Restore state of CTRL run bit; assume 1 on entry */
941 lwz r5,VCPU_CTRL(r4)
942 andi. r5,r5,1
943 bne 4f
944 mfspr r6,SPRN_CTRLF
945 clrrdi r6,r6,1
946 mtspr SPRN_CTRLT,r6
9474:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100948 /* Secondary threads wait for primary to have done partition switch */
949 ld r5, HSTATE_KVM_VCORE(r13)
950 lbz r6, HSTATE_PTID(r13)
951 cmpwi r6, 0
952 beq 21f
953 lbz r0, VCORE_IN_GUEST(r5)
954 cmpwi r0, 0
955 bne 21f
956 HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +100095720: lwz r3, VCORE_ENTRY_EXIT(r5)
958 cmpwi r3, 0x100
959 bge no_switch_exit
960 lbz r0, VCORE_IN_GUEST(r5)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100961 cmpwi r0, 0
962 beq 20b
963 HMT_MEDIUM
96421:
965 /* Set LPCR. */
966 ld r8,VCORE_LPCR(r5)
967 mtspr SPRN_LPCR,r8
968 isync
969
970 /* Check if HDEC expires soon */
971 mfspr r3, SPRN_HDEC
Paul Mackerras2f272462017-05-22 16:25:14 +1000972 EXTEND_HDEC(r3)
973 cmpdi r3, 512 /* 1 microsecond */
Paul Mackerras6af27c82015-03-28 14:21:10 +1100974 blt hdec_soon
975
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000976#ifdef CONFIG_KVM_XICS
977 /* We are entering the guest on that thread, push VCPU to XIVE */
978 ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
979 cmpldi cr0, r10, r0
980 beq no_xive
981 ld r11, VCPU_XIVE_SAVED_STATE(r4)
982 li r9, TM_QW1_OS
983 stdcix r11,r9,r10
984 eieio
985 lwz r11, VCPU_XIVE_CAM_WORD(r4)
986 li r9, TM_QW1_OS + TM_WORD2
987 stwcix r11,r9,r10
988 li r9, 1
989 stw r9, VCPU_XIVE_PUSHED(r4)
990no_xive:
991#endif /* CONFIG_KVM_XICS */
992
Suresh Warrier37f55d32016-08-19 15:35:46 +1000993deliver_guest_interrupt:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000994 ld r6, VCPU_CTR(r4)
Sam bobroffc63517c2015-05-27 09:56:57 +1000995 ld r7, VCPU_XER(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000996
997 mtctr r6
998 mtxer r7
999
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001000kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
Paul Mackerras4619ac82013-04-17 20:31:41 +00001001 ld r10, VCPU_PC(r4)
1002 ld r11, VCPU_MSR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001003 ld r6, VCPU_SRR0(r4)
1004 ld r7, VCPU_SRR1(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001005 mtspr SPRN_SRR0, r6
1006 mtspr SPRN_SRR1, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001007
Paul Mackerras4619ac82013-04-17 20:31:41 +00001008 /* r11 = vcpu->arch.msr & ~MSR_HV */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001009 rldicl r11, r11, 63 - MSR_HV_LG, 1
1010 rotldi r11, r11, 1 + MSR_HV_LG
1011 ori r11, r11, MSR_ME
1012
Paul Mackerras19ccb762011-07-23 17:42:46 +10001013 /* Check if we can deliver an external or decrementer interrupt now */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001014 ld r0, VCPU_PENDING_EXC(r4)
1015 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
1016 cmpdi cr1, r0, 0
1017 andi. r8, r11, MSR_EE
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001018 mfspr r8, SPRN_LPCR
1019 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
1020 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
1021 mtspr SPRN_LPCR, r8
Paul Mackerras19ccb762011-07-23 17:42:46 +10001022 isync
Paul Mackerras19ccb762011-07-23 17:42:46 +10001023 beq 5f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001024 li r0, BOOK3S_INTERRUPT_EXTERNAL
1025 bne cr1, 12f
1026 mfspr r0, SPRN_DEC
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10001027BEGIN_FTR_SECTION
1028 /* On POWER9 check whether the guest has large decrementer enabled */
1029 andis. r8, r8, LPCR_LD@h
1030 bne 15f
1031END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1032 extsw r0, r0
103315: cmpdi r0, 0
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001034 li r0, BOOK3S_INTERRUPT_DECREMENTER
1035 bge 5f
1036
103712: mtspr SPRN_SRR0, r10
Paul Mackerras19ccb762011-07-23 17:42:46 +10001038 mr r10,r0
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001039 mtspr SPRN_SRR1, r11
Michael Neulinge4e38122014-03-25 10:47:02 +11001040 mr r9, r4
1041 bl kvmppc_msr_interrupt
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +110010425:
Paul Mackerras57900692017-05-16 16:41:20 +10001043BEGIN_FTR_SECTION
1044 b fast_guest_return
1045END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1046 /* On POWER9, check for pending doorbell requests */
1047 lbz r0, VCPU_DBELL_REQ(r4)
1048 cmpwi r0, 0
1049 beq fast_guest_return
1050 ld r5, HSTATE_KVM_VCORE(r13)
1051 /* Set DPDES register so the CPU will take a doorbell interrupt */
1052 li r0, 1
1053 mtspr SPRN_DPDES, r0
1054 std r0, VCORE_DPDES(r5)
1055 /* Make sure other cpus see vcore->dpdes set before dbell req clear */
1056 lwsync
1057 /* Clear the pending doorbell request */
1058 li r0, 0
1059 stb r0, VCPU_DBELL_REQ(r4)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001060
Liu Ping Fan27025a62013-11-19 14:12:48 +08001061/*
1062 * Required state:
1063 * R4 = vcpu
1064 * R10: value for HSRR0
1065 * R11: value for HSRR1
1066 * R13 = PACA
1067 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001068fast_guest_return:
Paul Mackerras4619ac82013-04-17 20:31:41 +00001069 li r0,0
1070 stb r0,VCPU_CEDED(r4) /* cancel cede */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001071 mtspr SPRN_HSRR0,r10
1072 mtspr SPRN_HSRR1,r11
1073
1074 /* Activate guest mode, so faults get handled by KVM */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001075 li r9, KVM_GUEST_MODE_GUEST_HV
Paul Mackerrasde56a942011-06-29 00:21:34 +00001076 stb r9, HSTATE_IN_GUEST(r13)
1077
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001078#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1079 /* Accumulate timing */
1080 addi r3, r4, VCPU_TB_GUEST
1081 bl kvmhv_accumulate_time
1082#endif
1083
Paul Mackerrasde56a942011-06-29 00:21:34 +00001084 /* Enter guest */
1085
Paul Mackerras0acb9112013-02-04 18:10:51 +00001086BEGIN_FTR_SECTION
1087 ld r5, VCPU_CFAR(r4)
1088 mtspr SPRN_CFAR, r5
1089END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001090BEGIN_FTR_SECTION
1091 ld r0, VCPU_PPR(r4)
1092END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001093
Paul Mackerrasde56a942011-06-29 00:21:34 +00001094 ld r5, VCPU_LR(r4)
1095 lwz r6, VCPU_CR(r4)
1096 mtlr r5
1097 mtcr r6
1098
Michael Neulingc75df6f2012-06-25 13:33:10 +00001099 ld r1, VCPU_GPR(R1)(r4)
1100 ld r2, VCPU_GPR(R2)(r4)
1101 ld r3, VCPU_GPR(R3)(r4)
1102 ld r5, VCPU_GPR(R5)(r4)
1103 ld r6, VCPU_GPR(R6)(r4)
1104 ld r7, VCPU_GPR(R7)(r4)
1105 ld r8, VCPU_GPR(R8)(r4)
1106 ld r9, VCPU_GPR(R9)(r4)
1107 ld r10, VCPU_GPR(R10)(r4)
1108 ld r11, VCPU_GPR(R11)(r4)
1109 ld r12, VCPU_GPR(R12)(r4)
1110 ld r13, VCPU_GPR(R13)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001111
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001112BEGIN_FTR_SECTION
1113 mtspr SPRN_PPR, r0
1114END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1115 ld r0, VCPU_GPR(R0)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001116 ld r4, VCPU_GPR(R4)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001117
1118 hrfid
1119 b .
1120
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001121secondary_too_late:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001122 li r12, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001123 cmpdi r4, 0
1124 beq 11f
Paul Mackerras6af27c82015-03-28 14:21:10 +11001125 stw r12, VCPU_TRAP(r4)
1126#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001127 addi r3, r4, VCPU_TB_RMEXIT
1128 bl kvmhv_accumulate_time
Paul Mackerras6af27c82015-03-28 14:21:10 +11001129#endif
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100113011: b kvmhv_switch_to_host
1131
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001132no_switch_exit:
1133 HMT_MEDIUM
1134 li r12, 0
1135 b 12f
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001136hdec_soon:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001137 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000113812: stw r12, VCPU_TRAP(r4)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001139 mr r9, r4
1140#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001141 addi r3, r4, VCPU_TB_RMEXIT
1142 bl kvmhv_accumulate_time
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001143#endif
Paul Mackerras6af27c82015-03-28 14:21:10 +11001144 b guest_exit_cont
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001145
Paul Mackerrasde56a942011-06-29 00:21:34 +00001146/******************************************************************************
1147 * *
1148 * Exit code *
1149 * *
1150 *****************************************************************************/
1151
1152/*
1153 * We come here from the first-level interrupt handlers.
1154 */
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301155 .globl kvmppc_interrupt_hv
1156kvmppc_interrupt_hv:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001157 /*
1158 * Register contents:
Nicholas Piggind3918e72016-12-22 04:29:25 +10001159 * R12 = (guest CR << 32) | interrupt vector
Paul Mackerrasde56a942011-06-29 00:21:34 +00001160 * R13 = PACA
Nicholas Piggind3918e72016-12-22 04:29:25 +10001161 * guest R12 saved in shadow VCPU SCRATCH0
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001162 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
Paul Mackerrasde56a942011-06-29 00:21:34 +00001163 * guest R13 saved in SPRN_SCRATCH0
1164 */
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001165 std r9, HSTATE_SCRATCH2(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +10001166 lbz r9, HSTATE_IN_GUEST(r13)
1167 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1168 beq kvmppc_bad_host_intr
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301169#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1170 cmpwi r9, KVM_GUEST_MODE_GUEST
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001171 ld r9, HSTATE_SCRATCH2(r13)
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301172 beq kvmppc_interrupt_pr
1173#endif
Paul Mackerras44a3add2013-10-04 21:45:04 +10001174 /* We're now back in the host but in guest MMU context */
1175 li r9, KVM_GUEST_MODE_HOST_HV
1176 stb r9, HSTATE_IN_GUEST(r13)
1177
Paul Mackerrasde56a942011-06-29 00:21:34 +00001178 ld r9, HSTATE_KVM_VCPU(r13)
1179
1180 /* Save registers */
1181
Michael Neulingc75df6f2012-06-25 13:33:10 +00001182 std r0, VCPU_GPR(R0)(r9)
1183 std r1, VCPU_GPR(R1)(r9)
1184 std r2, VCPU_GPR(R2)(r9)
1185 std r3, VCPU_GPR(R3)(r9)
1186 std r4, VCPU_GPR(R4)(r9)
1187 std r5, VCPU_GPR(R5)(r9)
1188 std r6, VCPU_GPR(R6)(r9)
1189 std r7, VCPU_GPR(R7)(r9)
1190 std r8, VCPU_GPR(R8)(r9)
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001191 ld r0, HSTATE_SCRATCH2(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001192 std r0, VCPU_GPR(R9)(r9)
1193 std r10, VCPU_GPR(R10)(r9)
1194 std r11, VCPU_GPR(R11)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001195 ld r3, HSTATE_SCRATCH0(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001196 std r3, VCPU_GPR(R12)(r9)
Nicholas Piggind3918e72016-12-22 04:29:25 +10001197 /* CR is in the high half of r12 */
1198 srdi r4, r12, 32
Paul Mackerrasde56a942011-06-29 00:21:34 +00001199 stw r4, VCPU_CR(r9)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001200BEGIN_FTR_SECTION
1201 ld r3, HSTATE_CFAR(r13)
1202 std r3, VCPU_CFAR(r9)
1203END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001204BEGIN_FTR_SECTION
1205 ld r4, HSTATE_PPR(r13)
1206 std r4, VCPU_PPR(r9)
1207END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001208
1209 /* Restore R1/R2 so we can handle faults */
1210 ld r1, HSTATE_HOST_R1(r13)
1211 ld r2, PACATOC(r13)
1212
1213 mfspr r10, SPRN_SRR0
1214 mfspr r11, SPRN_SRR1
1215 std r10, VCPU_SRR0(r9)
1216 std r11, VCPU_SRR1(r9)
Nicholas Piggind3918e72016-12-22 04:29:25 +10001217 /* trap is in the low half of r12, clear CR from the high half */
1218 clrldi r12, r12, 32
Paul Mackerrasde56a942011-06-29 00:21:34 +00001219 andi. r0, r12, 2 /* need to read HSRR0/1? */
1220 beq 1f
1221 mfspr r10, SPRN_HSRR0
1222 mfspr r11, SPRN_HSRR1
1223 clrrdi r12, r12, 2
12241: std r10, VCPU_PC(r9)
1225 std r11, VCPU_MSR(r9)
1226
1227 GET_SCRATCH0(r3)
1228 mflr r4
Michael Neulingc75df6f2012-06-25 13:33:10 +00001229 std r3, VCPU_GPR(R13)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001230 std r4, VCPU_LR(r9)
1231
Paul Mackerrasde56a942011-06-29 00:21:34 +00001232 stw r12,VCPU_TRAP(r9)
1233
Paul Mackerras8b24e692017-06-26 15:45:51 +10001234 /*
1235 * Now that we have saved away SRR0/1 and HSRR0/1,
1236 * interrupts are recoverable in principle, so set MSR_RI.
1237 * This becomes important for relocation-on interrupts from
1238 * the guest, which we can get in radix mode on POWER9.
1239 */
1240 li r0, MSR_RI
1241 mtmsrd r0, 1
1242
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001243#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1244 addi r3, r9, VCPU_TB_RMINTR
1245 mr r4, r9
1246 bl kvmhv_accumulate_time
1247 ld r5, VCPU_GPR(R5)(r9)
1248 ld r6, VCPU_GPR(R6)(r9)
1249 ld r7, VCPU_GPR(R7)(r9)
1250 ld r8, VCPU_GPR(R8)(r9)
1251#endif
1252
Paul Mackerras4a157d62014-12-03 13:30:39 +11001253 /* Save HEIR (HV emulation assist reg) in emul_inst
Paul Mackerras697d3892011-12-12 12:36:37 +00001254 if this is an HEI (HV emulation interrupt, e40) */
1255 li r3,KVM_INST_FETCH_FAILED
Paul Mackerras2bf27602015-03-20 20:39:40 +11001256 stw r3,VCPU_LAST_INST(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001257 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1258 bne 11f
1259 mfspr r3,SPRN_HEIR
Paul Mackerras4a157d62014-12-03 13:30:39 +1100126011: stw r3,VCPU_HEIR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001261
1262 /* these are volatile across C function calls */
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001263#ifdef CONFIG_RELOCATABLE
1264 ld r3, HSTATE_SCRATCH1(r13)
1265 mtctr r3
1266#else
Paul Mackerras697d3892011-12-12 12:36:37 +00001267 mfctr r3
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001268#endif
Paul Mackerras697d3892011-12-12 12:36:37 +00001269 mfxer r4
1270 std r3, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001271 std r4, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001272
Paul Mackerras697d3892011-12-12 12:36:37 +00001273 /* If this is a page table miss then see if it's theirs or ours */
1274 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1275 beq kvmppc_hdsi
Paul Mackerras342d3db2011-12-12 12:38:05 +00001276 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1277 beq kvmppc_hisi
Paul Mackerras697d3892011-12-12 12:36:37 +00001278
Paul Mackerrasde56a942011-06-29 00:21:34 +00001279 /* See if this is a leftover HDEC interrupt */
1280 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1281 bne 2f
1282 mfspr r3,SPRN_HDEC
1283 cmpwi r3,0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001284 mr r4,r9
1285 bge fast_guest_return
Paul Mackerrasde56a942011-06-29 00:21:34 +000012862:
Paul Mackerras697d3892011-12-12 12:36:37 +00001287 /* See if this is an hcall we can handle in real mode */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001288 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1289 beq hcall_try_real_mode
Paul Mackerrasde56a942011-06-29 00:21:34 +00001290
Paul Mackerras66feed62015-03-28 14:21:12 +11001291 /* Hypervisor doorbell - exit only if host IPI flag set */
1292 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1293 bne 3f
Nicholas Pigginbd0fdb12017-03-13 03:03:49 +10001294BEGIN_FTR_SECTION
1295 PPC_MSGSYNC
1296END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras66feed62015-03-28 14:21:12 +11001297 lbz r0, HSTATE_HOST_IPI(r13)
Gautham R. Shenoy06554d92015-08-07 17:41:20 +05301298 cmpwi r0, 0
Paul Mackerras66feed62015-03-28 14:21:12 +11001299 beq 4f
1300 b guest_exit_cont
13013:
Paul Mackerras769377f2017-02-15 14:30:17 +11001302 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1303 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1304 bne 14f
1305 mfspr r3, SPRN_HFSCR
1306 std r3, VCPU_HFSCR(r9)
1307 b guest_exit_cont
130814:
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001309 /* External interrupt ? */
1310 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001311 bne+ guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001312
1313 /* External interrupt, first check for host_ipi. If this is
1314 * set, we know the host wants us out so let's do it now
1315 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001316 bl kvmppc_read_intr
Suresh Warrier37f55d32016-08-19 15:35:46 +10001317
1318 /*
1319 * Restore the active volatile registers after returning from
1320 * a C function.
1321 */
1322 ld r9, HSTATE_KVM_VCPU(r13)
1323 li r12, BOOK3S_INTERRUPT_EXTERNAL
1324
1325 /*
1326 * kvmppc_read_intr return codes:
1327 *
1328 * Exit to host (r3 > 0)
1329 * 1 An interrupt is pending that needs to be handled by the host
1330 * Exit guest and return to host by branching to guest_exit_cont
1331 *
Suresh Warrierf7af5202016-08-19 15:35:52 +10001332 * 2 Passthrough that needs completion in the host
1333 * Exit guest and return to host by branching to guest_exit_cont
1334 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1335 * to indicate to the host to complete handling the interrupt
1336 *
Suresh Warrier37f55d32016-08-19 15:35:46 +10001337 * Before returning to guest, we check if any CPU is heading out
1338 * to the host and if so, we head out also. If no CPUs are heading
1339 * check return values <= 0.
1340 *
1341 * Return to guest (r3 <= 0)
1342 * 0 No external interrupt is pending
1343 * -1 A guest wakeup IPI (which has now been cleared)
1344 * In either case, we return to guest to deliver any pending
1345 * guest interrupts.
Suresh Warriere3c13e52016-08-19 15:35:51 +10001346 *
1347 * -2 A PCI passthrough external interrupt was handled
1348 * (interrupt was delivered directly to guest)
1349 * Return to guest to deliver any pending guest interrupts.
Suresh Warrier37f55d32016-08-19 15:35:46 +10001350 */
1351
Suresh Warrierf7af5202016-08-19 15:35:52 +10001352 cmpdi r3, 1
1353 ble 1f
1354
1355 /* Return code = 2 */
1356 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1357 stw r12, VCPU_TRAP(r9)
1358 b guest_exit_cont
1359
13601: /* Return code <= 1 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001361 cmpdi r3, 0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001362 bgt guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001363
Suresh Warrier37f55d32016-08-19 15:35:46 +10001364 /* Return code <= 0 */
Paul Mackerras66feed62015-03-28 14:21:12 +110013654: ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras4619ac82013-04-17 20:31:41 +00001366 lwz r0, VCORE_ENTRY_EXIT(r5)
1367 cmpwi r0, 0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001368 mr r4, r9
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001369 blt deliver_guest_interrupt
Paul Mackerrasde56a942011-06-29 00:21:34 +00001370
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001371guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001372#ifdef CONFIG_KVM_XICS
1373 /* We are exiting, pull the VP from the XIVE */
1374 lwz r0, VCPU_XIVE_PUSHED(r9)
1375 cmpwi cr0, r0, 0
1376 beq 1f
1377 li r7, TM_SPC_PULL_OS_CTX
1378 li r6, TM_QW1_OS
1379 mfmsr r0
1380 andi. r0, r0, MSR_IR /* in real mode? */
1381 beq 2f
1382 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1383 cmpldi cr0, r10, 0
1384 beq 1f
1385 /* First load to pull the context, we ignore the value */
1386 lwzx r11, r7, r10
1387 eieio
1388 /* Second load to recover the context state (Words 0 and 1) */
1389 ldx r11, r6, r10
1390 b 3f
13912: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1392 cmpldi cr0, r10, 0
1393 beq 1f
1394 /* First load to pull the context, we ignore the value */
1395 lwzcix r11, r7, r10
1396 eieio
1397 /* Second load to recover the context state (Words 0 and 1) */
1398 ldcix r11, r6, r10
13993: std r11, VCPU_XIVE_SAVED_STATE(r9)
1400 /* Fixup some of the state for the next load */
1401 li r10, 0
1402 li r0, 0xff
1403 stw r10, VCPU_XIVE_PUSHED(r9)
1404 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1405 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
14061:
1407#endif /* CONFIG_KVM_XICS */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001408 /* Save more register state */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001409 mfdar r6
1410 mfdsisr r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001411 std r6, VCPU_DAR(r9)
1412 stw r7, VCPU_DSISR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001413 /* don't overwrite fault_dar/fault_dsisr if HDSI */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001414 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
Paul Mackerras6af27c82015-03-28 14:21:10 +11001415 beq mc_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001416 std r6, VCPU_FAULT_DAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001417 stw r7, VCPU_FAULT_DSISR(r9)
1418
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001419 /* See if it is a machine check */
1420 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1421 beq machine_check_realmode
1422mc_cont:
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001423#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1424 addi r3, r9, VCPU_TB_RMEXIT
1425 mr r4, r9
1426 bl kvmhv_accumulate_time
1427#endif
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001428
Gautham R. Shenoy7e022e72015-05-21 13:57:04 +05301429 mr r3, r12
Paul Mackerras6af27c82015-03-28 14:21:10 +11001430 /* Increment exit count, poke other threads to exit */
1431 bl kvmhv_commence_exit
Paul Mackerraseddb60f2015-03-28 14:21:11 +11001432 nop
1433 ld r9, HSTATE_KVM_VCPU(r13)
1434 lwz r12, VCPU_TRAP(r9)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001435
Paul Mackerrasec257162015-06-24 21:18:03 +10001436 /* Stop others sending VCPU interrupts to this physical CPU */
1437 li r0, -1
1438 stw r0, VCPU_CPU(r9)
1439 stw r0, VCPU_THREAD_CPU(r9)
1440
Paul Mackerrasde56a942011-06-29 00:21:34 +00001441 /* Save guest CTRL register, set runlatch to 1 */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001442 mfspr r6,SPRN_CTRLF
Paul Mackerrasde56a942011-06-29 00:21:34 +00001443 stw r6,VCPU_CTRL(r9)
1444 andi. r0,r6,1
1445 bne 4f
1446 ori r6,r6,1
1447 mtspr SPRN_CTRLT,r6
14484:
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001449 /* Check if we are running hash or radix and store it in cr2 */
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001450 ld r5, VCPU_KVM(r9)
1451 lbz r0, KVM_RADIX(r5)
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001452 cmpwi cr2,r0,0
1453
1454 /* Read the guest SLB and save it away */
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001455 li r5, 0
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001456 bne cr2, 3f /* for radix, save 0 entries */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001457 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1458 mtctr r0
1459 li r6,0
1460 addi r7,r9,VCPU_SLB
Paul Mackerrasde56a942011-06-29 00:21:34 +000014611: slbmfee r8,r6
1462 andis. r0,r8,SLB_ESID_V@h
1463 beq 2f
1464 add r8,r8,r6 /* put index in */
1465 slbmfev r3,r6
1466 std r8,VCPU_SLB_E(r7)
1467 std r3,VCPU_SLB_V(r7)
1468 addi r7,r7,VCPU_SLB_SIZE
1469 addi r5,r5,1
14702: addi r6,r6,1
1471 bdnz 1b
Paul Mackerrasf4c51f82017-01-30 21:21:45 +110014723: stw r5,VCPU_SLB_MAX(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001473
1474 /*
1475 * Save the guest PURR/SPURR
1476 */
1477 mfspr r5,SPRN_PURR
1478 mfspr r6,SPRN_SPURR
1479 ld r7,VCPU_PURR(r9)
1480 ld r8,VCPU_SPURR(r9)
1481 std r5,VCPU_PURR(r9)
1482 std r6,VCPU_SPURR(r9)
1483 subf r5,r7,r5
1484 subf r6,r8,r6
1485
1486 /*
1487 * Restore host PURR/SPURR and add guest times
1488 * so that the time in the guest gets accounted.
1489 */
1490 ld r3,HSTATE_PURR(r13)
1491 ld r4,HSTATE_SPURR(r13)
1492 add r3,r3,r5
1493 add r4,r4,r6
1494 mtspr SPRN_PURR,r3
1495 mtspr SPRN_SPURR,r4
1496
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001497 /* Save DEC */
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10001498 ld r3, HSTATE_KVM_VCORE(r13)
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001499 mfspr r5,SPRN_DEC
1500 mftb r6
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10001501 /* On P9, if the guest has large decr enabled, don't sign extend */
1502BEGIN_FTR_SECTION
1503 ld r4, VCORE_LPCR(r3)
1504 andis. r4, r4, LPCR_LD@h
1505 bne 16f
1506END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001507 extsw r5,r5
Paul Mackerras1bc3fe82017-05-22 16:55:16 +1000150816: add r5,r5,r6
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001509 /* r5 is a guest timebase value here, convert to host TB */
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001510 ld r4,VCORE_TB_OFFSET(r3)
1511 subf r5,r4,r5
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001512 std r5,VCPU_DEC_EXPIRES(r9)
1513
Michael Neulingb005255e2014-01-08 21:25:21 +11001514BEGIN_FTR_SECTION
1515 b 8f
1516END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +11001517 /* Save POWER8-specific registers */
1518 mfspr r5, SPRN_IAMR
1519 mfspr r6, SPRN_PSPB
1520 mfspr r7, SPRN_FSCR
1521 std r5, VCPU_IAMR(r9)
1522 stw r6, VCPU_PSPB(r9)
1523 std r7, VCPU_FSCR(r9)
1524 mfspr r5, SPRN_IC
Michael Neulingb005255e2014-01-08 21:25:21 +11001525 mfspr r7, SPRN_TAR
1526 std r5, VCPU_IC(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001527 std r7, VCPU_TAR(r9)
Michael Neuling7b490412014-01-08 21:25:32 +11001528 mfspr r8, SPRN_EBBHR
Michael Neulingb005255e2014-01-08 21:25:21 +11001529 std r8, VCPU_EBBHR(r9)
1530 mfspr r5, SPRN_EBBRR
1531 mfspr r6, SPRN_BESCR
Michael Neulingb005255e2014-01-08 21:25:21 +11001532 mfspr r7, SPRN_PID
1533 mfspr r8, SPRN_WORT
Paul Mackerras83677f52016-11-16 22:33:27 +11001534 std r5, VCPU_EBBRR(r9)
1535 std r6, VCPU_BESCR(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001536 stw r7, VCPU_GUEST_PID(r9)
1537 std r8, VCPU_WORT(r9)
Paul Mackerras83677f52016-11-16 22:33:27 +11001538BEGIN_FTR_SECTION
1539 mfspr r5, SPRN_TCSCR
1540 mfspr r6, SPRN_ACOP
1541 mfspr r7, SPRN_CSIGR
1542 mfspr r8, SPRN_TACR
1543 std r5, VCPU_TCSCR(r9)
1544 std r6, VCPU_ACOP(r9)
1545 std r7, VCPU_CSIGR(r9)
1546 std r8, VCPU_TACR(r9)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001547FTR_SECTION_ELSE
1548 mfspr r5, SPRN_TIDR
1549 mfspr r6, SPRN_PSSCR
1550 std r5, VCPU_TID(r9)
1551 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1552 rotldi r6, r6, 60
1553 std r6, VCPU_PSSCR(r9)
Paul Mackerras769377f2017-02-15 14:30:17 +11001554 /* Restore host HFSCR value */
1555 ld r7, STACK_SLOT_HFSCR(r1)
1556 mtspr SPRN_HFSCR, r7
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001557ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasccec4452016-03-05 19:34:39 +11001558 /*
1559 * Restore various registers to 0, where non-zero values
1560 * set by the guest could disrupt the host.
1561 */
1562 li r0, 0
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001563 mtspr SPRN_PSPB, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001564 mtspr SPRN_WORT, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001565BEGIN_FTR_SECTION
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001566 mtspr SPRN_IAMR, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001567 mtspr SPRN_TCSCR, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001568 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1569 li r0, 1
1570 sldi r0, r0, 31
1571 mtspr SPRN_MMCRS, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001572END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Michael Neulingb005255e2014-01-08 21:25:21 +110015738:
1574
Paul Mackerrasde56a942011-06-29 00:21:34 +00001575 /* Save and reset AMR and UAMOR before turning on the MMU */
1576 mfspr r5,SPRN_AMR
1577 mfspr r6,SPRN_UAMOR
1578 std r5,VCPU_AMR(r9)
1579 std r6,VCPU_UAMOR(r9)
1580 li r6,0
1581 mtspr SPRN_AMR,r6
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001582 mtspr SPRN_UAMOR, r6
Paul Mackerrasde56a942011-06-29 00:21:34 +00001583
Paul Mackerrasde56a942011-06-29 00:21:34 +00001584 /* Switch DSCR back to host value */
1585 mfspr r8, SPRN_DSCR
1586 ld r7, HSTATE_DSCR(r13)
Paul Mackerrascfc86022013-09-21 09:53:28 +10001587 std r8, VCPU_DSCR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001588 mtspr SPRN_DSCR, r7
1589
1590 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001591 std r14, VCPU_GPR(R14)(r9)
1592 std r15, VCPU_GPR(R15)(r9)
1593 std r16, VCPU_GPR(R16)(r9)
1594 std r17, VCPU_GPR(R17)(r9)
1595 std r18, VCPU_GPR(R18)(r9)
1596 std r19, VCPU_GPR(R19)(r9)
1597 std r20, VCPU_GPR(R20)(r9)
1598 std r21, VCPU_GPR(R21)(r9)
1599 std r22, VCPU_GPR(R22)(r9)
1600 std r23, VCPU_GPR(R23)(r9)
1601 std r24, VCPU_GPR(R24)(r9)
1602 std r25, VCPU_GPR(R25)(r9)
1603 std r26, VCPU_GPR(R26)(r9)
1604 std r27, VCPU_GPR(R27)(r9)
1605 std r28, VCPU_GPR(R28)(r9)
1606 std r29, VCPU_GPR(R29)(r9)
1607 std r30, VCPU_GPR(R30)(r9)
1608 std r31, VCPU_GPR(R31)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001609
1610 /* Save SPRGs */
1611 mfspr r3, SPRN_SPRG0
1612 mfspr r4, SPRN_SPRG1
1613 mfspr r5, SPRN_SPRG2
1614 mfspr r6, SPRN_SPRG3
1615 std r3, VCPU_SPRG0(r9)
1616 std r4, VCPU_SPRG1(r9)
1617 std r5, VCPU_SPRG2(r9)
1618 std r6, VCPU_SPRG3(r9)
1619
Paul Mackerras89436332012-03-02 01:38:23 +00001620 /* save FP state */
1621 mr r3, r9
Paul Mackerras595e4f72013-10-15 20:43:04 +11001622 bl kvmppc_save_fp
Paul Mackerras89436332012-03-02 01:38:23 +00001623
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001624#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1625BEGIN_FTR_SECTION
Paul Mackerrasf024ee02016-06-22 14:21:59 +10001626 bl kvmppc_save_tm
1627END_FTR_SECTION_IFSET(CPU_FTR_TM)
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001628#endif
1629
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001630 /* Increment yield count if they have a VPA */
1631 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1632 cmpdi r8, 0
1633 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +02001634 li r4, LPPACA_YIELDCOUNT
1635 LWZX_BE r3, r8, r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001636 addi r3, r3, 1
Alexander Graf0865a582014-06-11 10:36:17 +02001637 STWX_BE r3, r8, r4
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001638 li r3, 1
1639 stb r3, VCPU_VPA_DIRTY(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000164025:
1641 /* Save PMU registers if requested */
1642 /* r8 and cr0.eq are live here */
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001643BEGIN_FTR_SECTION
1644 /*
1645 * POWER8 seems to have a hardware bug where setting
1646 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1647 * when some counters are already negative doesn't seem
1648 * to cause a performance monitor alert (and hence interrupt).
1649 * The effect of this is that when saving the PMU state,
1650 * if there is no PMU alert pending when we read MMCR0
1651 * before freezing the counters, but one becomes pending
1652 * before we read the counters, we lose it.
1653 * To work around this, we need a way to freeze the counters
1654 * before reading MMCR0. Normally, freezing the counters
1655 * is done by writing MMCR0 (to set MMCR0[FC]) which
1656 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1657 * we can also freeze the counters using MMCR2, by writing
1658 * 1s to all the counter freeze condition bits (there are
1659 * 9 bits each for 6 counters).
1660 */
1661 li r3, -1 /* set all freeze bits */
1662 clrrdi r3, r3, 10
1663 mfspr r10, SPRN_MMCR2
1664 mtspr SPRN_MMCR2, r3
1665 isync
1666END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001667 li r3, 1
1668 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1669 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1670 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
Paul Mackerras89436332012-03-02 01:38:23 +00001671 mfspr r6, SPRN_MMCRA
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001672 /* Clear MMCRA in order to disable SDAR updates */
Paul Mackerras89436332012-03-02 01:38:23 +00001673 li r7, 0
1674 mtspr SPRN_MMCRA, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001675 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001676 beq 21f /* if no VPA, save PMU stuff anyway */
1677 lbz r7, LPPACA_PMCINUSE(r8)
1678 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1679 bne 21f
1680 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1681 b 22f
168221: mfspr r5, SPRN_MMCR1
Paul Mackerras14941782013-09-06 13:11:18 +10001683 mfspr r7, SPRN_SIAR
1684 mfspr r8, SPRN_SDAR
Paul Mackerrasde56a942011-06-29 00:21:34 +00001685 std r4, VCPU_MMCR(r9)
1686 std r5, VCPU_MMCR + 8(r9)
1687 std r6, VCPU_MMCR + 16(r9)
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001688BEGIN_FTR_SECTION
1689 std r10, VCPU_MMCR + 24(r9)
1690END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras14941782013-09-06 13:11:18 +10001691 std r7, VCPU_SIAR(r9)
1692 std r8, VCPU_SDAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001693 mfspr r3, SPRN_PMC1
1694 mfspr r4, SPRN_PMC2
1695 mfspr r5, SPRN_PMC3
1696 mfspr r6, SPRN_PMC4
1697 mfspr r7, SPRN_PMC5
1698 mfspr r8, SPRN_PMC6
1699 stw r3, VCPU_PMC(r9)
1700 stw r4, VCPU_PMC + 4(r9)
1701 stw r5, VCPU_PMC + 8(r9)
1702 stw r6, VCPU_PMC + 12(r9)
1703 stw r7, VCPU_PMC + 16(r9)
1704 stw r8, VCPU_PMC + 20(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001705BEGIN_FTR_SECTION
Michael Neulingb005255e2014-01-08 21:25:21 +11001706 mfspr r5, SPRN_SIER
Paul Mackerras83677f52016-11-16 22:33:27 +11001707 std r5, VCPU_SIER(r9)
1708BEGIN_FTR_SECTION_NESTED(96)
Michael Neulingb005255e2014-01-08 21:25:21 +11001709 mfspr r6, SPRN_SPMC1
1710 mfspr r7, SPRN_SPMC2
1711 mfspr r8, SPRN_MMCRS
Michael Neulingb005255e2014-01-08 21:25:21 +11001712 stw r6, VCPU_PMC + 24(r9)
1713 stw r7, VCPU_PMC + 28(r9)
1714 std r8, VCPU_MMCR + 32(r9)
1715 lis r4, 0x8000
1716 mtspr SPRN_MMCRS, r4
Paul Mackerras83677f52016-11-16 22:33:27 +11001717END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
Michael Neulingb005255e2014-01-08 21:25:21 +11001718END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000171922:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001720
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001721 /* Restore host values of some registers */
1722BEGIN_FTR_SECTION
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10001723 ld r5, STACK_SLOT_CIABR(r1)
1724 ld r6, STACK_SLOT_DAWR(r1)
1725 ld r7, STACK_SLOT_DAWRX(r1)
1726 mtspr SPRN_CIABR, r5
1727 mtspr SPRN_DAWR, r6
1728 mtspr SPRN_DAWRX, r7
1729END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1730BEGIN_FTR_SECTION
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001731 ld r5, STACK_SLOT_TID(r1)
1732 ld r6, STACK_SLOT_PSSCR(r1)
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001733 ld r7, STACK_SLOT_PID(r1)
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001734 ld r8, STACK_SLOT_IAMR(r1)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001735 mtspr SPRN_TIDR, r5
1736 mtspr SPRN_PSSCR, r6
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001737 mtspr SPRN_PID, r7
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001738 mtspr SPRN_IAMR, r8
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001739END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001740
1741#ifdef CONFIG_PPC_RADIX_MMU
1742 /*
1743 * Are we running hash or radix ?
1744 */
1745 beq cr2,3f
1746
1747 /* Radix: Handle the case where the guest used an illegal PID */
1748 LOAD_REG_ADDR(r4, mmu_base_pid)
1749 lwz r3, VCPU_GUEST_PID(r9)
1750 lwz r5, 0(r4)
1751 cmpw cr0,r3,r5
1752 blt 2f
1753
1754 /*
1755 * Illegal PID, the HW might have prefetched and cached in the TLB
1756 * some translations for the LPID 0 / guest PID combination which
1757 * Linux doesn't know about, so we need to flush that PID out of
1758 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
1759 * the right context.
1760 */
1761 li r0,0
1762 mtspr SPRN_LPID,r0
1763 isync
1764
1765 /* Then do a congruence class local flush */
1766 ld r6,VCPU_KVM(r9)
1767 lwz r0,KVM_TLB_SETS(r6)
1768 mtctr r0
1769 li r7,0x400 /* IS field = 0b01 */
1770 ptesync
1771 sldi r0,r3,32 /* RS has PID */
17721: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
1773 addi r7,r7,0x1000
1774 bdnz 1b
1775 ptesync
1776
17772: /* Flush the ERAT on radix P9 DD1 guest exit */
Paul Mackerrasf11f6f72017-01-30 21:21:52 +11001778BEGIN_FTR_SECTION
1779 PPC_INVALIDATE_ERAT
1780END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001781 b 4f
1782#endif /* CONFIG_PPC_RADIX_MMU */
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001783
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001784 /* Hash: clear out SLB */
17853: li r5,0
1786 slbmte r5,r5
1787 slbia
1788 ptesync
17894:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001790 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001791 * POWER7/POWER8 guest -> host partition switch code.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001792 * We don't have to lock against tlbies but we do
1793 * have to coordinate the hardware threads.
1794 */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001795kvmhv_switch_to_host:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001796 /* Secondary threads wait for primary to do partition switch */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001797 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001798 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1799 lbz r3,HSTATE_PTID(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001800 cmpwi r3,0
1801 beq 15f
1802 HMT_LOW
180313: lbz r3,VCORE_IN_GUEST(r5)
1804 cmpwi r3,0
1805 bne 13b
1806 HMT_MEDIUM
1807 b 16f
1808
1809 /* Primary thread waits for all the secondaries to exit guest */
181015: lwz r3,VCORE_ENTRY_EXIT(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001811 rlwinm r0,r3,32-8,0xff
Paul Mackerrasde56a942011-06-29 00:21:34 +00001812 clrldi r3,r3,56
1813 cmpw r3,r0
1814 bne 15b
1815 isync
1816
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001817 /* Did we actually switch to the guest at all? */
1818 lbz r6, VCORE_IN_GUEST(r5)
1819 cmpwi r6, 0
1820 beq 19f
1821
Paul Mackerrasde56a942011-06-29 00:21:34 +00001822 /* Primary thread switches back to host partition */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001823 lwz r7,KVM_HOST_LPID(r4)
Paul Mackerras7a840842016-11-16 22:25:20 +11001824BEGIN_FTR_SECTION
1825 ld r6,KVM_HOST_SDR1(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001826 li r8,LPID_RSVD /* switch to reserved LPID */
1827 mtspr SPRN_LPID,r8
1828 ptesync
Paul Mackerras7a840842016-11-16 22:25:20 +11001829 mtspr SPRN_SDR1,r6 /* switch to host page table */
1830END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001831 mtspr SPRN_LPID,r7
1832 isync
1833
Michael Neulingb005255e2014-01-08 21:25:21 +11001834BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001835 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +11001836 mfspr r7, SPRN_DPDES
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001837 mfspr r8, SPRN_VTB
Michael Neulingb005255e2014-01-08 21:25:21 +11001838 std r7, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001839 std r8, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +11001840 /* clear DPDES so we don't get guest doorbells in the host */
1841 li r8, 0
1842 mtspr SPRN_DPDES, r8
1843END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1844
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301845 /* If HMI, call kvmppc_realmode_hmi_handler() */
1846 cmpwi r12, BOOK3S_INTERRUPT_HMI
1847 bne 27f
1848 bl kvmppc_realmode_hmi_handler
1849 nop
1850 li r12, BOOK3S_INTERRUPT_HMI
1851 /*
1852 * At this point kvmppc_realmode_hmi_handler would have resync-ed
1853 * the TB. Hence it is not required to subtract guest timebase
1854 * offset from timebase. So, skip it.
1855 *
1856 * Also, do not call kvmppc_subcore_exit_guest() because it has
1857 * been invoked as part of kvmppc_realmode_hmi_handler().
1858 */
1859 b 30f
1860
186127:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001862 /* Subtract timebase offset from timebase */
1863 ld r8,VCORE_TB_OFFSET(r5)
1864 cmpdi r8,0
1865 beq 17f
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001866 mftb r6 /* current guest timebase */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001867 subf r8,r8,r6
1868 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1869 mftb r7 /* check if lower 24 bits overflowed */
1870 clrldi r6,r6,40
1871 clrldi r7,r7,40
1872 cmpld r7,r6
1873 bge 17f
1874 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1875 mtspr SPRN_TBU40,r8
1876
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530187717: bl kvmppc_subcore_exit_guest
1878 nop
187930: ld r5,HSTATE_KVM_VCORE(r13)
1880 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1881
Paul Mackerrasde56a942011-06-29 00:21:34 +00001882 /* Reset PCR */
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301883 ld r0, VCORE_PCR(r5)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001884 cmpdi r0, 0
1885 beq 18f
1886 li r0, 0
1887 mtspr SPRN_PCR, r0
188818:
1889 /* Signal secondary CPUs to continue */
1890 stb r0,VCORE_IN_GUEST(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000189119: lis r8,0x7fff /* MAX_INT@h */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001892 mtspr SPRN_HDEC,r8
1893
189416: ld r8,KVM_HOST_LPCR(r4)
1895 mtspr SPRN_LPCR,r8
1896 isync
Paul Mackerrasde56a942011-06-29 00:21:34 +00001897
1898 /* load host SLB entries */
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001899BEGIN_MMU_FTR_SECTION
1900 b 0f
1901END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001902 ld r8,PACA_SLBSHADOWPTR(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001903
1904 .rept SLB_NUM_BOLTED
Alexander Graf0865a582014-06-11 10:36:17 +02001905 li r3, SLBSHADOW_SAVEAREA
1906 LDX_BE r5, r8, r3
1907 addi r3, r3, 8
1908 LDX_BE r6, r8, r3
Paul Mackerrasde56a942011-06-29 00:21:34 +00001909 andis. r7,r5,SLB_ESID_V@h
1910 beq 1f
1911 slbmte r6,r5
19121: addi r8,r8,16
1913 .endr
Paul Mackerrasf4c51f82017-01-30 21:21:45 +110019140:
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001915#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1916 /* Finish timing, if we have a vcpu */
1917 ld r4, HSTATE_KVM_VCPU(r13)
1918 cmpdi r4, 0
1919 li r3, 0
1920 beq 2f
1921 bl kvmhv_accumulate_time
19222:
1923#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +00001924 /* Unset guest mode */
1925 li r0, KVM_GUEST_MODE_NONE
1926 stb r0, HSTATE_IN_GUEST(r13)
1927
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10001928 ld r0, SFS+PPC_LR_STKOFF(r1)
1929 addi r1, r1, SFS
Paul Mackerras218309b2013-09-06 13:23:44 +10001930 mtlr r0
1931 blr
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001932
Paul Mackerras697d3892011-12-12 12:36:37 +00001933/*
1934 * Check whether an HDSI is an HPTE not found fault or something else.
1935 * If it is an HPTE not found fault that is due to the guest accessing
1936 * a page that they have mapped but which we have paged out, then
1937 * we continue on with the guest exit path. In all other cases,
1938 * reflect the HDSI to the guest as a DSI.
1939 */
1940kvmppc_hdsi:
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001941 ld r3, VCPU_KVM(r9)
1942 lbz r0, KVM_RADIX(r3)
1943 cmpwi r0, 0
Paul Mackerras697d3892011-12-12 12:36:37 +00001944 mfspr r4, SPRN_HDAR
1945 mfspr r6, SPRN_HDSISR
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001946 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001947 /* HPTE not found fault or protection fault? */
1948 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
Paul Mackerras697d3892011-12-12 12:36:37 +00001949 beq 1f /* if not, send it to the guest */
Paul Mackerras4e5acdc2017-02-28 11:05:47 +11001950 andi. r0, r11, MSR_DR /* data relocation enabled? */
1951 beq 3f
Paul Mackerrasef8c6402017-01-30 21:21:43 +11001952BEGIN_FTR_SECTION
1953 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
1954 b 4f
1955END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras697d3892011-12-12 12:36:37 +00001956 clrrdi r0, r4, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001957 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11001958 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
1959 bne 7f /* if no SLB entry found */
Paul Mackerras697d3892011-12-12 12:36:37 +000019604: std r4, VCPU_FAULT_DAR(r9)
1961 stw r6, VCPU_FAULT_DSISR(r9)
1962
1963 /* Search the hash table. */
1964 mr r3, r9 /* vcpu pointer */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001965 li r7, 1 /* data fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001966 bl kvmppc_hpte_hv_fault
Paul Mackerras697d3892011-12-12 12:36:37 +00001967 ld r9, HSTATE_KVM_VCPU(r13)
1968 ld r10, VCPU_PC(r9)
1969 ld r11, VCPU_MSR(r9)
1970 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1971 cmpdi r3, 0 /* retry the instruction */
1972 beq 6f
1973 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001974 beq guest_exit_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001975 cmpdi r3, -2 /* MMIO emulation; need instr word */
1976 beq 2f
1977
Paul Mackerrascf29b212015-10-27 16:10:20 +11001978 /* Synthesize a DSI (or DSegI) for the guest */
Paul Mackerras697d3892011-12-12 12:36:37 +00001979 ld r4, VCPU_FAULT_DAR(r9)
1980 mr r6, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110019811: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
Paul Mackerras697d3892011-12-12 12:36:37 +00001982 mtspr SPRN_DSISR, r6
Paul Mackerrascf29b212015-10-27 16:10:20 +110019837: mtspr SPRN_DAR, r4
Paul Mackerras697d3892011-12-12 12:36:37 +00001984 mtspr SPRN_SRR0, r10
1985 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11001986 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11001987 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001988fast_interrupt_c_return:
Paul Mackerras697d3892011-12-12 12:36:37 +000019896: ld r7, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001990 ld r8, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001991 mtctr r7
1992 mtxer r8
1993 mr r4, r9
1994 b fast_guest_return
1995
19963: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1997 ld r5, KVM_VRMA_SLB_V(r5)
1998 b 4b
1999
2000 /* If this is for emulated MMIO, load the instruction word */
20012: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
2002
2003 /* Set guest mode to 'jump over instruction' so if lwz faults
2004 * we'll just continue at the next IP. */
2005 li r0, KVM_GUEST_MODE_SKIP
2006 stb r0, HSTATE_IN_GUEST(r13)
2007
2008 /* Do the access with MSR:DR enabled */
2009 mfmsr r3
2010 ori r4, r3, MSR_DR /* Enable paging for data */
2011 mtmsrd r4
2012 lwz r8, 0(r10)
2013 mtmsrd r3
2014
2015 /* Store the result */
2016 stw r8, VCPU_LAST_INST(r9)
2017
2018 /* Unset guest mode. */
Paul Mackerras44a3add2013-10-04 21:45:04 +10002019 li r0, KVM_GUEST_MODE_HOST_HV
Paul Mackerras697d3892011-12-12 12:36:37 +00002020 stb r0, HSTATE_IN_GUEST(r13)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002021 b guest_exit_cont
Paul Mackerrasde56a942011-06-29 00:21:34 +00002022
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002023.Lradix_hdsi:
2024 std r4, VCPU_FAULT_DAR(r9)
2025 stw r6, VCPU_FAULT_DSISR(r9)
2026.Lradix_hisi:
2027 mfspr r5, SPRN_ASDR
2028 std r5, VCPU_FAULT_GPA(r9)
2029 b guest_exit_cont
2030
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002031/*
Paul Mackerras342d3db2011-12-12 12:38:05 +00002032 * Similarly for an HISI, reflect it to the guest as an ISI unless
2033 * it is an HPTE not found fault for a page that we have paged out.
2034 */
2035kvmppc_hisi:
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002036 ld r3, VCPU_KVM(r9)
2037 lbz r0, KVM_RADIX(r3)
2038 cmpwi r0, 0
2039 bne .Lradix_hisi /* for radix, just save ASDR */
Paul Mackerras342d3db2011-12-12 12:38:05 +00002040 andis. r0, r11, SRR1_ISI_NOPT@h
2041 beq 1f
Paul Mackerras4e5acdc2017-02-28 11:05:47 +11002042 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
2043 beq 3f
Paul Mackerrasef8c6402017-01-30 21:21:43 +11002044BEGIN_FTR_SECTION
2045 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2046 b 4f
2047END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras342d3db2011-12-12 12:38:05 +00002048 clrrdi r0, r10, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00002049 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11002050 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2051 bne 7f /* if no SLB entry found */
Paul Mackerras342d3db2011-12-12 12:38:05 +000020524:
2053 /* Search the hash table. */
2054 mr r3, r9 /* vcpu pointer */
2055 mr r4, r10
2056 mr r6, r11
2057 li r7, 0 /* instruction fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002058 bl kvmppc_hpte_hv_fault
Paul Mackerras342d3db2011-12-12 12:38:05 +00002059 ld r9, HSTATE_KVM_VCPU(r13)
2060 ld r10, VCPU_PC(r9)
2061 ld r11, VCPU_MSR(r9)
2062 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2063 cmpdi r3, 0 /* retry the instruction */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002064 beq fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00002065 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002066 beq guest_exit_cont
Paul Mackerras342d3db2011-12-12 12:38:05 +00002067
Paul Mackerrascf29b212015-10-27 16:10:20 +11002068 /* Synthesize an ISI (or ISegI) for the guest */
Paul Mackerras342d3db2011-12-12 12:38:05 +00002069 mr r11, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110020701: li r0, BOOK3S_INTERRUPT_INST_STORAGE
20717: mtspr SPRN_SRR0, r10
Paul Mackerras342d3db2011-12-12 12:38:05 +00002072 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11002073 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11002074 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002075 b fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00002076
20773: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2078 ld r5, KVM_VRMA_SLB_V(r6)
2079 b 4b
2080
2081/*
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002082 * Try to handle an hcall in real mode.
2083 * Returns to the guest if we handle it, or continues on up to
2084 * the kernel if we can't (i.e. if we don't have a handler for
2085 * it, or if the handler returns H_TOO_HARD).
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002086 *
2087 * r5 - r8 contain hcall args,
2088 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002089 */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002090hcall_try_real_mode:
Michael Neulingc75df6f2012-06-25 13:33:10 +00002091 ld r3,VCPU_GPR(R3)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002092 andi. r0,r11,MSR_PR
Liu Ping Fan27025a62013-11-19 14:12:48 +08002093 /* sc 1 from userspace - reflect to guest syscall */
2094 bne sc_1_fast_return
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002095 clrrdi r3,r3,2
2096 cmpldi r3,hcall_real_table_end - hcall_real_table
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002097 bge guest_exit_cont
Paul Mackerras699a0ea2014-06-02 11:02:59 +10002098 /* See if this hcall is enabled for in-kernel handling */
2099 ld r4, VCPU_KVM(r9)
2100 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2101 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2102 add r4, r4, r0
2103 ld r0, KVM_ENABLED_HCALLS(r4)
2104 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2105 srd r0, r0, r4
2106 andi. r0, r0, 1
2107 beq guest_exit_cont
2108 /* Get pointer to handler, if any, and call it */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002109 LOAD_REG_ADDR(r4, hcall_real_table)
Paul Mackerras4baa1d82013-07-08 20:09:53 +10002110 lwax r3,r3,r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002111 cmpwi r3,0
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002112 beq guest_exit_cont
Anton Blanchard05a308c2014-06-12 18:16:10 +10002113 add r12,r3,r4
2114 mtctr r12
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002115 mr r3,r9 /* get vcpu pointer */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002116 ld r4,VCPU_GPR(R4)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002117 bctrl
2118 cmpdi r3,H_TOO_HARD
2119 beq hcall_real_fallback
2120 ld r4,HSTATE_KVM_VCPU(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00002121 std r3,VCPU_GPR(R3)(r4)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002122 ld r10,VCPU_PC(r4)
2123 ld r11,VCPU_MSR(r4)
2124 b fast_guest_return
2125
Liu Ping Fan27025a62013-11-19 14:12:48 +08002126sc_1_fast_return:
2127 mtspr SPRN_SRR0,r10
2128 mtspr SPRN_SRR1,r11
2129 li r10, BOOK3S_INTERRUPT_SYSCALL
Michael Neulinge4e38122014-03-25 10:47:02 +11002130 bl kvmppc_msr_interrupt
Liu Ping Fan27025a62013-11-19 14:12:48 +08002131 mr r4,r9
2132 b fast_guest_return
2133
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002134 /* We've attempted a real mode hcall, but it's punted it back
2135 * to userspace. We need to restore some clobbered volatiles
2136 * before resuming the pass-it-to-qemu path */
2137hcall_real_fallback:
2138 li r12,BOOK3S_INTERRUPT_SYSCALL
2139 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002140
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002141 b guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002142
2143 .globl hcall_real_table
2144hcall_real_table:
2145 .long 0 /* 0 - unused */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002146 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2147 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2148 .long DOTSYM(kvmppc_h_read) - hcall_real_table
Paul Mackerrascdeee512015-06-24 21:18:07 +10002149 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2150 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002151 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2152 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11002153 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002154 .long 0 /* 0x24 - H_SET_SPRG0 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002155 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002156 .long 0 /* 0x2c */
2157 .long 0 /* 0x30 */
2158 .long 0 /* 0x34 */
2159 .long 0 /* 0x38 */
2160 .long 0 /* 0x3c */
2161 .long 0 /* 0x40 */
2162 .long 0 /* 0x44 */
2163 .long 0 /* 0x48 */
2164 .long 0 /* 0x4c */
2165 .long 0 /* 0x50 */
2166 .long 0 /* 0x54 */
2167 .long 0 /* 0x58 */
2168 .long 0 /* 0x5c */
2169 .long 0 /* 0x60 */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00002170#ifdef CONFIG_KVM_XICS
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002171 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2172 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2173 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10002174 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002175 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00002176#else
2177 .long 0 /* 0x64 - H_EOI */
2178 .long 0 /* 0x68 - H_CPPR */
2179 .long 0 /* 0x6c - H_IPI */
2180 .long 0 /* 0x70 - H_IPOLL */
2181 .long 0 /* 0x74 - H_XIRR */
2182#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002183 .long 0 /* 0x78 */
2184 .long 0 /* 0x7c */
2185 .long 0 /* 0x80 */
2186 .long 0 /* 0x84 */
2187 .long 0 /* 0x88 */
2188 .long 0 /* 0x8c */
2189 .long 0 /* 0x90 */
2190 .long 0 /* 0x94 */
2191 .long 0 /* 0x98 */
2192 .long 0 /* 0x9c */
2193 .long 0 /* 0xa0 */
2194 .long 0 /* 0xa4 */
2195 .long 0 /* 0xa8 */
2196 .long 0 /* 0xac */
2197 .long 0 /* 0xb0 */
2198 .long 0 /* 0xb4 */
2199 .long 0 /* 0xb8 */
2200 .long 0 /* 0xbc */
2201 .long 0 /* 0xc0 */
2202 .long 0 /* 0xc4 */
2203 .long 0 /* 0xc8 */
2204 .long 0 /* 0xcc */
2205 .long 0 /* 0xd0 */
2206 .long 0 /* 0xd4 */
2207 .long 0 /* 0xd8 */
2208 .long 0 /* 0xdc */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002209 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
Sam Bobroff90fd09f2014-12-03 13:30:40 +11002210 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002211 .long 0 /* 0xe8 */
2212 .long 0 /* 0xec */
2213 .long 0 /* 0xf0 */
2214 .long 0 /* 0xf4 */
2215 .long 0 /* 0xf8 */
2216 .long 0 /* 0xfc */
2217 .long 0 /* 0x100 */
2218 .long 0 /* 0x104 */
2219 .long 0 /* 0x108 */
2220 .long 0 /* 0x10c */
2221 .long 0 /* 0x110 */
2222 .long 0 /* 0x114 */
2223 .long 0 /* 0x118 */
2224 .long 0 /* 0x11c */
2225 .long 0 /* 0x120 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002226 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
Paul Mackerras8563bf52014-01-08 21:25:29 +11002227 .long 0 /* 0x128 */
2228 .long 0 /* 0x12c */
2229 .long 0 /* 0x130 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002230 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11002231 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +11002232 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
Michael Ellermane928e9c2015-03-20 20:39:41 +11002233 .long 0 /* 0x140 */
2234 .long 0 /* 0x144 */
2235 .long 0 /* 0x148 */
2236 .long 0 /* 0x14c */
2237 .long 0 /* 0x150 */
2238 .long 0 /* 0x154 */
2239 .long 0 /* 0x158 */
2240 .long 0 /* 0x15c */
2241 .long 0 /* 0x160 */
2242 .long 0 /* 0x164 */
2243 .long 0 /* 0x168 */
2244 .long 0 /* 0x16c */
2245 .long 0 /* 0x170 */
2246 .long 0 /* 0x174 */
2247 .long 0 /* 0x178 */
2248 .long 0 /* 0x17c */
2249 .long 0 /* 0x180 */
2250 .long 0 /* 0x184 */
2251 .long 0 /* 0x188 */
2252 .long 0 /* 0x18c */
2253 .long 0 /* 0x190 */
2254 .long 0 /* 0x194 */
2255 .long 0 /* 0x198 */
2256 .long 0 /* 0x19c */
2257 .long 0 /* 0x1a0 */
2258 .long 0 /* 0x1a4 */
2259 .long 0 /* 0x1a8 */
2260 .long 0 /* 0x1ac */
2261 .long 0 /* 0x1b0 */
2262 .long 0 /* 0x1b4 */
2263 .long 0 /* 0x1b8 */
2264 .long 0 /* 0x1bc */
2265 .long 0 /* 0x1c0 */
2266 .long 0 /* 0x1c4 */
2267 .long 0 /* 0x1c8 */
2268 .long 0 /* 0x1cc */
2269 .long 0 /* 0x1d0 */
2270 .long 0 /* 0x1d4 */
2271 .long 0 /* 0x1d8 */
2272 .long 0 /* 0x1dc */
2273 .long 0 /* 0x1e0 */
2274 .long 0 /* 0x1e4 */
2275 .long 0 /* 0x1e8 */
2276 .long 0 /* 0x1ec */
2277 .long 0 /* 0x1f0 */
2278 .long 0 /* 0x1f4 */
2279 .long 0 /* 0x1f8 */
2280 .long 0 /* 0x1fc */
2281 .long 0 /* 0x200 */
2282 .long 0 /* 0x204 */
2283 .long 0 /* 0x208 */
2284 .long 0 /* 0x20c */
2285 .long 0 /* 0x210 */
2286 .long 0 /* 0x214 */
2287 .long 0 /* 0x218 */
2288 .long 0 /* 0x21c */
2289 .long 0 /* 0x220 */
2290 .long 0 /* 0x224 */
2291 .long 0 /* 0x228 */
2292 .long 0 /* 0x22c */
2293 .long 0 /* 0x230 */
2294 .long 0 /* 0x234 */
2295 .long 0 /* 0x238 */
2296 .long 0 /* 0x23c */
2297 .long 0 /* 0x240 */
2298 .long 0 /* 0x244 */
2299 .long 0 /* 0x248 */
2300 .long 0 /* 0x24c */
2301 .long 0 /* 0x250 */
2302 .long 0 /* 0x254 */
2303 .long 0 /* 0x258 */
2304 .long 0 /* 0x25c */
2305 .long 0 /* 0x260 */
2306 .long 0 /* 0x264 */
2307 .long 0 /* 0x268 */
2308 .long 0 /* 0x26c */
2309 .long 0 /* 0x270 */
2310 .long 0 /* 0x274 */
2311 .long 0 /* 0x278 */
2312 .long 0 /* 0x27c */
2313 .long 0 /* 0x280 */
2314 .long 0 /* 0x284 */
2315 .long 0 /* 0x288 */
2316 .long 0 /* 0x28c */
2317 .long 0 /* 0x290 */
2318 .long 0 /* 0x294 */
2319 .long 0 /* 0x298 */
2320 .long 0 /* 0x29c */
2321 .long 0 /* 0x2a0 */
2322 .long 0 /* 0x2a4 */
2323 .long 0 /* 0x2a8 */
2324 .long 0 /* 0x2ac */
2325 .long 0 /* 0x2b0 */
2326 .long 0 /* 0x2b4 */
2327 .long 0 /* 0x2b8 */
2328 .long 0 /* 0x2bc */
2329 .long 0 /* 0x2c0 */
2330 .long 0 /* 0x2c4 */
2331 .long 0 /* 0x2c8 */
2332 .long 0 /* 0x2cc */
2333 .long 0 /* 0x2d0 */
2334 .long 0 /* 0x2d4 */
2335 .long 0 /* 0x2d8 */
2336 .long 0 /* 0x2dc */
2337 .long 0 /* 0x2e0 */
2338 .long 0 /* 0x2e4 */
2339 .long 0 /* 0x2e8 */
2340 .long 0 /* 0x2ec */
2341 .long 0 /* 0x2f0 */
2342 .long 0 /* 0x2f4 */
2343 .long 0 /* 0x2f8 */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10002344#ifdef CONFIG_KVM_XICS
2345 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2346#else
2347 .long 0 /* 0x2fc - H_XIRR_X*/
2348#endif
Michael Ellermane928e9c2015-03-20 20:39:41 +11002349 .long DOTSYM(kvmppc_h_random) - hcall_real_table
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002350 .globl hcall_real_table_end
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002351hcall_real_table_end:
2352
Paul Mackerras8563bf52014-01-08 21:25:29 +11002353_GLOBAL(kvmppc_h_set_xdabr)
2354 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2355 beq 6f
2356 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2357 andc. r0, r5, r0
2358 beq 3f
23596: li r3, H_PARAMETER
2360 blr
2361
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002362_GLOBAL(kvmppc_h_set_dabr)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002363 li r5, DABRX_USER | DABRX_KERNEL
23643:
Michael Neulingeee7ff92014-01-08 21:25:19 +11002365BEGIN_FTR_SECTION
2366 b 2f
2367END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002368 std r4,VCPU_DABR(r3)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002369 stw r5, VCPU_DABRX(r3)
2370 mtspr SPRN_DABRX, r5
Paul Mackerras89436332012-03-02 01:38:23 +00002371 /* Work around P7 bug where DABR can get corrupted on mtspr */
23721: mtspr SPRN_DABR,r4
2373 mfspr r5, SPRN_DABR
2374 cmpd r4, r5
2375 bne 1b
2376 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002377 li r3,0
2378 blr
2379
Paul Mackerras8563bf52014-01-08 21:25:29 +11002380 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
23812: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
Thomas Huth760a7362015-11-20 09:11:45 +01002382 rlwimi r5, r4, 2, DAWRX_WT
Paul Mackerras8563bf52014-01-08 21:25:29 +11002383 clrrdi r4, r4, 3
2384 std r4, VCPU_DAWR(r3)
2385 std r5, VCPU_DAWRX(r3)
2386 mtspr SPRN_DAWR, r4
2387 mtspr SPRN_DAWRX, r5
2388 li r3, 0
Paul Mackerrasde56a942011-06-29 00:21:34 +00002389 blr
2390
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002391_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002392 ori r11,r11,MSR_EE
2393 std r11,VCPU_MSR(r3)
2394 li r0,1
2395 stb r0,VCPU_CEDED(r3)
2396 sync /* order setting ceded vs. testing prodded */
2397 lbz r5,VCPU_PRODDED(r3)
2398 cmpwi r5,0
Paul Mackerras04f995a2012-08-06 00:03:28 +00002399 bne kvm_cede_prodded
Paul Mackerras6af27c82015-03-28 14:21:10 +11002400 li r12,0 /* set trap to 0 to say hcall is handled */
2401 stw r12,VCPU_TRAP(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002402 li r0,H_SUCCESS
Michael Neulingc75df6f2012-06-25 13:33:10 +00002403 std r0,VCPU_GPR(R3)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002404
2405 /*
2406 * Set our bit in the bitmask of napping threads unless all the
2407 * other threads are already napping, in which case we send this
2408 * up to the host.
2409 */
2410 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002411 lbz r6,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002412 lwz r8,VCORE_ENTRY_EXIT(r5)
2413 clrldi r8,r8,56
2414 li r0,1
2415 sld r0,r0,r6
2416 addi r6,r5,VCORE_NAPPING_THREADS
241731: lwarx r4,0,r6
2418 or r4,r4,r0
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002419 cmpw r4,r8
2420 beq kvm_cede_exit
Paul Mackerras19ccb762011-07-23 17:42:46 +10002421 stwcx. r4,0,r6
2422 bne 31b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002423 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11002424 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002425 li r0,NAPPING_CEDE
Paul Mackerras19ccb762011-07-23 17:42:46 +10002426 stb r0,HSTATE_NAPPING(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002427 lwz r7,VCORE_ENTRY_EXIT(r5)
2428 cmpwi r7,0x100
2429 bge 33f /* another thread already exiting */
2430
2431/*
2432 * Although not specifically required by the architecture, POWER7
2433 * preserves the following registers in nap mode, even if an SMT mode
2434 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2435 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2436 */
2437 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002438 std r14, VCPU_GPR(R14)(r3)
2439 std r15, VCPU_GPR(R15)(r3)
2440 std r16, VCPU_GPR(R16)(r3)
2441 std r17, VCPU_GPR(R17)(r3)
2442 std r18, VCPU_GPR(R18)(r3)
2443 std r19, VCPU_GPR(R19)(r3)
2444 std r20, VCPU_GPR(R20)(r3)
2445 std r21, VCPU_GPR(R21)(r3)
2446 std r22, VCPU_GPR(R22)(r3)
2447 std r23, VCPU_GPR(R23)(r3)
2448 std r24, VCPU_GPR(R24)(r3)
2449 std r25, VCPU_GPR(R25)(r3)
2450 std r26, VCPU_GPR(R26)(r3)
2451 std r27, VCPU_GPR(R27)(r3)
2452 std r28, VCPU_GPR(R28)(r3)
2453 std r29, VCPU_GPR(R29)(r3)
2454 std r30, VCPU_GPR(R30)(r3)
2455 std r31, VCPU_GPR(R31)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002456
2457 /* save FP state */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002458 bl kvmppc_save_fp
Paul Mackerras19ccb762011-07-23 17:42:46 +10002459
Paul Mackerras93d17392016-06-22 15:52:55 +10002460#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2461BEGIN_FTR_SECTION
2462 ld r9, HSTATE_KVM_VCPU(r13)
2463 bl kvmppc_save_tm
2464END_FTR_SECTION_IFSET(CPU_FTR_TM)
2465#endif
2466
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002467 /*
2468 * Set DEC to the smaller of DEC and HDEC, so that we wake
2469 * no later than the end of our timeslice (HDEC interrupts
2470 * don't wake us from nap).
2471 */
2472 mfspr r3, SPRN_DEC
2473 mfspr r4, SPRN_HDEC
2474 mftb r5
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10002475BEGIN_FTR_SECTION
2476 /* On P9 check whether the guest has large decrementer mode enabled */
2477 ld r6, HSTATE_KVM_VCORE(r13)
2478 ld r6, VCORE_LPCR(r6)
2479 andis. r6, r6, LPCR_LD@h
2480 bne 68f
2481END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras2f272462017-05-22 16:25:14 +10002482 extsw r3, r3
Paul Mackerras1bc3fe82017-05-22 16:55:16 +1000248368: EXTEND_HDEC(r4)
Paul Mackerras2f272462017-05-22 16:25:14 +10002484 cmpd r3, r4
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002485 ble 67f
2486 mtspr SPRN_DEC, r4
248767:
2488 /* save expiry time of guest decrementer */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002489 add r3, r3, r5
2490 ld r4, HSTATE_KVM_VCPU(r13)
2491 ld r5, HSTATE_KVM_VCORE(r13)
2492 ld r6, VCORE_TB_OFFSET(r5)
2493 subf r3, r6, r3 /* convert to host TB value */
2494 std r3, VCPU_DEC_EXPIRES(r4)
2495
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002496#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2497 ld r4, HSTATE_KVM_VCPU(r13)
2498 addi r3, r4, VCPU_TB_CEDE
2499 bl kvmhv_accumulate_time
2500#endif
2501
Paul Mackerrasccc07772015-03-28 14:21:07 +11002502 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2503
Paul Mackerras19ccb762011-07-23 17:42:46 +10002504 /*
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002505 * Take a nap until a decrementer or external or doobell interrupt
Paul Mackerrasccc07772015-03-28 14:21:07 +11002506 * occurs, with PECE1 and PECE0 set in LPCR.
Paul Mackerras66feed62015-03-28 14:21:12 +11002507 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
Paul Mackerrasccc07772015-03-28 14:21:07 +11002508 * Also clear the runlatch bit before napping.
Paul Mackerras19ccb762011-07-23 17:42:46 +10002509 */
Paul Mackerras56548fc2014-12-03 14:48:40 +11002510kvm_do_nap:
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002511 mfspr r0, SPRN_CTRLF
2512 clrrdi r0, r0, 1
2513 mtspr SPRN_CTRLT, r0
Preeti U Murthy582b9102014-04-11 16:02:08 +05302514
Paul Mackerrasf0888f72012-02-03 00:54:17 +00002515 li r0,1
2516 stb r0,HSTATE_HWTHREAD_REQ(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002517 mfspr r5,SPRN_LPCR
2518 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002519BEGIN_FTR_SECTION
Paul Mackerras66feed62015-03-28 14:21:12 +11002520 ori r5, r5, LPCR_PECEDH
Paul Mackerrasccc07772015-03-28 14:21:07 +11002521 rlwimi r5, r3, 0, LPCR_PECEDP
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002522END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002523
2524kvm_nap_sequence: /* desired LPCR value in r5 */
2525BEGIN_FTR_SECTION
2526 /*
2527 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2528 * enable state loss = 1 (allow SMT mode switch)
2529 * requested level = 0 (just stop dispatching)
2530 */
2531 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2532 mtspr SPRN_PSSCR, r3
2533 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2534 li r4, LPCR_PECE_HVEE@higher
2535 sldi r4, r4, 32
2536 or r5, r5, r4
2537END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002538 mtspr SPRN_LPCR,r5
2539 isync
2540 li r0, 0
2541 std r0, HSTATE_SCRATCH0(r13)
2542 ptesync
2543 ld r0, HSTATE_SCRATCH0(r13)
25441: cmpd r0, r0
2545 bne 1b
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002546BEGIN_FTR_SECTION
Paul Mackerras19ccb762011-07-23 17:42:46 +10002547 nap
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002548FTR_SECTION_ELSE
2549 PPC_STOP
2550ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002551 b .
2552
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100255333: mr r4, r3
2554 li r3, 0
2555 li r12, 0
2556 b 34f
2557
Paul Mackerras19ccb762011-07-23 17:42:46 +10002558kvm_end_cede:
Paul Mackerras4619ac82013-04-17 20:31:41 +00002559 /* get vcpu pointer */
2560 ld r4, HSTATE_KVM_VCPU(r13)
2561
Paul Mackerras19ccb762011-07-23 17:42:46 +10002562 /* Woken by external or decrementer interrupt */
2563 ld r1, HSTATE_HOST_R1(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002564
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002565#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2566 addi r3, r4, VCPU_TB_RMINTR
2567 bl kvmhv_accumulate_time
2568#endif
2569
Paul Mackerras93d17392016-06-22 15:52:55 +10002570#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2571BEGIN_FTR_SECTION
2572 bl kvmppc_restore_tm
2573END_FTR_SECTION_IFSET(CPU_FTR_TM)
2574#endif
2575
Paul Mackerras19ccb762011-07-23 17:42:46 +10002576 /* load up FP state */
2577 bl kvmppc_load_fp
2578
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002579 /* Restore guest decrementer */
2580 ld r3, VCPU_DEC_EXPIRES(r4)
2581 ld r5, HSTATE_KVM_VCORE(r13)
2582 ld r6, VCORE_TB_OFFSET(r5)
2583 add r3, r3, r6 /* convert host TB to guest TB value */
2584 mftb r7
2585 subf r3, r7, r3
2586 mtspr SPRN_DEC, r3
2587
Paul Mackerras19ccb762011-07-23 17:42:46 +10002588 /* Load NV GPRS */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002589 ld r14, VCPU_GPR(R14)(r4)
2590 ld r15, VCPU_GPR(R15)(r4)
2591 ld r16, VCPU_GPR(R16)(r4)
2592 ld r17, VCPU_GPR(R17)(r4)
2593 ld r18, VCPU_GPR(R18)(r4)
2594 ld r19, VCPU_GPR(R19)(r4)
2595 ld r20, VCPU_GPR(R20)(r4)
2596 ld r21, VCPU_GPR(R21)(r4)
2597 ld r22, VCPU_GPR(R22)(r4)
2598 ld r23, VCPU_GPR(R23)(r4)
2599 ld r24, VCPU_GPR(R24)(r4)
2600 ld r25, VCPU_GPR(R25)(r4)
2601 ld r26, VCPU_GPR(R26)(r4)
2602 ld r27, VCPU_GPR(R27)(r4)
2603 ld r28, VCPU_GPR(R28)(r4)
2604 ld r29, VCPU_GPR(R29)(r4)
2605 ld r30, VCPU_GPR(R30)(r4)
2606 ld r31, VCPU_GPR(R31)(r4)
Suresh Warrier37f55d32016-08-19 15:35:46 +10002607
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002608 /* Check the wake reason in SRR1 to see why we got here */
2609 bl kvmppc_check_wake_reason
Paul Mackerras19ccb762011-07-23 17:42:46 +10002610
Suresh Warrier37f55d32016-08-19 15:35:46 +10002611 /*
2612 * Restore volatile registers since we could have called a
2613 * C routine in kvmppc_check_wake_reason
2614 * r4 = VCPU
2615 * r3 tells us whether we need to return to host or not
2616 * WARNING: it gets checked further down:
2617 * should not modify r3 until this check is done.
2618 */
2619 ld r4, HSTATE_KVM_VCPU(r13)
2620
Paul Mackerras19ccb762011-07-23 17:42:46 +10002621 /* clear our bit in vcore->napping_threads */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100262234: ld r5,HSTATE_KVM_VCORE(r13)
2623 lbz r7,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002624 li r0,1
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002625 sld r0,r0,r7
Paul Mackerras19ccb762011-07-23 17:42:46 +10002626 addi r6,r5,VCORE_NAPPING_THREADS
262732: lwarx r7,0,r6
2628 andc r7,r7,r0
2629 stwcx. r7,0,r6
2630 bne 32b
2631 li r0,0
2632 stb r0,HSTATE_NAPPING(r13)
2633
Suresh Warrier37f55d32016-08-19 15:35:46 +10002634 /* See if the wake reason saved in r3 means we need to exit */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002635 stw r12, VCPU_TRAP(r4)
Paul Mackerras4619ac82013-04-17 20:31:41 +00002636 mr r9, r4
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002637 cmpdi r3, 0
2638 bgt guest_exit_cont
Paul Mackerras4619ac82013-04-17 20:31:41 +00002639
Paul Mackerras19ccb762011-07-23 17:42:46 +10002640 /* see if any other thread is already exiting */
2641 lwz r0,VCORE_ENTRY_EXIT(r5)
2642 cmpwi r0,0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002643 bge guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002644
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002645 b kvmppc_cede_reentry /* if not go back to guest */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002646
2647 /* cede when already previously prodded case */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002648kvm_cede_prodded:
2649 li r0,0
Paul Mackerras19ccb762011-07-23 17:42:46 +10002650 stb r0,VCPU_PRODDED(r3)
2651 sync /* order testing prodded vs. clearing ceded */
2652 stb r0,VCPU_CEDED(r3)
2653 li r3,H_SUCCESS
2654 blr
2655
2656 /* we've ceded but we want to give control to the host */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002657kvm_cede_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +11002658 ld r9, HSTATE_KVM_VCPU(r13)
2659 b guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002660
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002661 /* Try to handle a machine check in real mode */
2662machine_check_realmode:
2663 mr r3, r9 /* get vcpu pointer */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002664 bl kvmppc_realmode_machine_check
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002665 nop
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002666 ld r9, HSTATE_KVM_VCPU(r13)
2667 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302668 /*
Aravinda Prasade20bbd32017-05-11 16:33:37 +05302669 * For the guest that is FWNMI capable, deliver all the MCE errors
2670 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
2671 * reason. This new approach injects machine check errors in guest
2672 * address space to guest with additional information in the form
2673 * of RTAS event, thus enabling guest kernel to suitably handle
2674 * such errors.
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302675 *
Aravinda Prasade20bbd32017-05-11 16:33:37 +05302676 * For the guest that is not FWNMI capable (old QEMU) fallback
2677 * to old behaviour for backward compatibility:
2678 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
2679 * through machine check interrupt (set HSRR0 to 0x200).
2680 * For handled errors (no-fatal), just go back to guest execution
2681 * with current HSRR0.
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302682 * if we receive machine check with MSR(RI=0) then deliver it to
2683 * guest as machine check causing guest to crash.
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302684 */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302685 ld r11, VCPU_MSR(r9)
Paul Mackerras1c9e3d52015-11-12 16:43:48 +11002686 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2687 bne mc_cont /* if so, exit to host */
Aravinda Prasade20bbd32017-05-11 16:33:37 +05302688 /* Check if guest is capable of handling NMI exit */
2689 ld r10, VCPU_KVM(r9)
2690 lbz r10, KVM_FWNMI(r10)
2691 cmpdi r10, 1 /* FWNMI capable? */
2692 beq mc_cont /* if so, exit with KVM_EXIT_NMI. */
2693
2694 /* if not, fall through for backward compatibility. */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302695 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2696 beq 1f /* Deliver a machine check to guest */
2697 ld r10, VCPU_PC(r9)
2698 cmpdi r3, 0 /* Did we handle MCE ? */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302699 bne 2f /* Continue guest execution. */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002700 /* If not, deliver a machine check. SRR0/1 are already set */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +053027011: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
Michael Neulinge4e38122014-03-25 10:47:02 +11002702 bl kvmppc_msr_interrupt
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +053027032: b fast_interrupt_c_return
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002704
Paul Mackerrasde56a942011-06-29 00:21:34 +00002705/*
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002706 * Check the reason we woke from nap, and take appropriate action.
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002707 * Returns (in r3):
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002708 * 0 if nothing needs to be done
2709 * 1 if something happened that needs to be handled by the host
Paul Mackerras66feed62015-03-28 14:21:12 +11002710 * -1 if there was a guest wakeup (IPI or msgsnd)
Suresh Warriere3c13e52016-08-19 15:35:51 +10002711 * -2 if we handled a PCI passthrough interrupt (returned by
2712 * kvmppc_read_intr only)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002713 *
2714 * Also sets r12 to the interrupt vector for any interrupt that needs
2715 * to be handled now by the host (0x500 for external interrupt), or zero.
Suresh Warrier37f55d32016-08-19 15:35:46 +10002716 * Modifies all volatile registers (since it may call a C function).
2717 * This routine calls kvmppc_read_intr, a C function, if an external
2718 * interrupt is pending.
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002719 */
2720kvmppc_check_wake_reason:
2721 mfspr r6, SPRN_SRR1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002722BEGIN_FTR_SECTION
2723 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2724FTR_SECTION_ELSE
2725 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2726ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2727 cmpwi r6, 8 /* was it an external interrupt? */
Suresh Warrier37f55d32016-08-19 15:35:46 +10002728 beq 7f /* if so, see what it was */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002729 li r3, 0
2730 li r12, 0
2731 cmpwi r6, 6 /* was it the decrementer? */
2732 beq 0f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002733BEGIN_FTR_SECTION
2734 cmpwi r6, 5 /* privileged doorbell? */
2735 beq 0f
Paul Mackerras5d00f662014-01-08 21:25:28 +11002736 cmpwi r6, 3 /* hypervisor doorbell? */
2737 beq 3f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002738END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302739 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2740 beq 4f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002741 li r3, 1 /* anything else, return 1 */
27420: blr
2743
Paul Mackerras5d00f662014-01-08 21:25:28 +11002744 /* hypervisor doorbell */
27453: li r12, BOOK3S_INTERRUPT_H_DOORBELL
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05302746
2747 /*
2748 * Clear the doorbell as we will invoke the handler
2749 * explicitly in the guest exit path.
2750 */
2751 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2752 PPC_MSGCLR(6)
Paul Mackerras66feed62015-03-28 14:21:12 +11002753 /* see if it's a host IPI */
Paul Mackerras5d00f662014-01-08 21:25:28 +11002754 li r3, 1
Paul Mackerras66feed62015-03-28 14:21:12 +11002755 lbz r0, HSTATE_HOST_IPI(r13)
2756 cmpwi r0, 0
2757 bnelr
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05302758 /* if not, return -1 */
Paul Mackerras66feed62015-03-28 14:21:12 +11002759 li r3, -1
Paul Mackerras5d00f662014-01-08 21:25:28 +11002760 blr
2761
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302762 /* Woken up due to Hypervisor maintenance interrupt */
27634: li r12, BOOK3S_INTERRUPT_HMI
2764 li r3, 1
2765 blr
2766
Suresh Warrier37f55d32016-08-19 15:35:46 +10002767 /* external interrupt - create a stack frame so we can call C */
27687: mflr r0
2769 std r0, PPC_LR_STKOFF(r1)
2770 stdu r1, -PPC_MIN_STKFRM(r1)
2771 bl kvmppc_read_intr
2772 nop
2773 li r12, BOOK3S_INTERRUPT_EXTERNAL
Suresh Warrierf7af5202016-08-19 15:35:52 +10002774 cmpdi r3, 1
2775 ble 1f
2776
2777 /*
2778 * Return code of 2 means PCI passthrough interrupt, but
2779 * we need to return back to host to complete handling the
2780 * interrupt. Trap reason is expected in r12 by guest
2781 * exit code.
2782 */
2783 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
27841:
Suresh Warrier37f55d32016-08-19 15:35:46 +10002785 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
2786 addi r1, r1, PPC_MIN_STKFRM
2787 mtlr r0
2788 blr
Paul Mackerrasde56a942011-06-29 00:21:34 +00002789
2790/*
2791 * Save away FP, VMX and VSX registers.
2792 * r3 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002793 * N.B. r30 and r31 are volatile across this function,
2794 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002795 */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002796kvmppc_save_fp:
2797 mflr r30
2798 mr r31,r3
Paul Mackerras89436332012-03-02 01:38:23 +00002799 mfmsr r5
2800 ori r8,r5,MSR_FP
Paul Mackerrasde56a942011-06-29 00:21:34 +00002801#ifdef CONFIG_ALTIVEC
2802BEGIN_FTR_SECTION
2803 oris r8,r8,MSR_VEC@h
2804END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2805#endif
2806#ifdef CONFIG_VSX
2807BEGIN_FTR_SECTION
2808 oris r8,r8,MSR_VSX@h
2809END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2810#endif
2811 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002812 addi r3,r3,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002813 bl store_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002814#ifdef CONFIG_ALTIVEC
2815BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002816 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002817 bl store_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002818END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2819#endif
2820 mfspr r6,SPRN_VRSAVE
Paul Mackerrase724f082014-03-13 20:02:48 +11002821 stw r6,VCPU_VRSAVE(r31)
Paul Mackerras595e4f72013-10-15 20:43:04 +11002822 mtlr r30
Paul Mackerrasde56a942011-06-29 00:21:34 +00002823 blr
2824
2825/*
2826 * Load up FP, VMX and VSX registers
2827 * r4 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002828 * N.B. r30 and r31 are volatile across this function,
2829 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002830 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002831kvmppc_load_fp:
Paul Mackerras595e4f72013-10-15 20:43:04 +11002832 mflr r30
2833 mr r31,r4
Paul Mackerrasde56a942011-06-29 00:21:34 +00002834 mfmsr r9
2835 ori r8,r9,MSR_FP
2836#ifdef CONFIG_ALTIVEC
2837BEGIN_FTR_SECTION
2838 oris r8,r8,MSR_VEC@h
2839END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2840#endif
2841#ifdef CONFIG_VSX
2842BEGIN_FTR_SECTION
2843 oris r8,r8,MSR_VSX@h
2844END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2845#endif
2846 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002847 addi r3,r4,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002848 bl load_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002849#ifdef CONFIG_ALTIVEC
2850BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002851 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002852 bl load_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002853END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2854#endif
Paul Mackerrase724f082014-03-13 20:02:48 +11002855 lwz r7,VCPU_VRSAVE(r31)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002856 mtspr SPRN_VRSAVE,r7
Paul Mackerras595e4f72013-10-15 20:43:04 +11002857 mtlr r30
2858 mr r4,r31
Paul Mackerrasde56a942011-06-29 00:21:34 +00002859 blr
Paul Mackerras44a3add2013-10-04 21:45:04 +10002860
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002861#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2862/*
2863 * Save transactional state and TM-related registers.
2864 * Called with r9 pointing to the vcpu struct.
2865 * This can modify all checkpointed registers, but
2866 * restores r1, r2 and r9 (vcpu pointer) before exit.
2867 */
2868kvmppc_save_tm:
2869 mflr r0
2870 std r0, PPC_LR_STKOFF(r1)
2871
2872 /* Turn on TM. */
2873 mfmsr r8
2874 li r0, 1
2875 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
2876 mtmsrd r8
2877
2878 ld r5, VCPU_MSR(r9)
2879 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2880 beq 1f /* TM not active in guest. */
2881
2882 std r1, HSTATE_HOST_R1(r13)
2883 li r3, TM_CAUSE_KVM_RESCHED
2884
2885 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2886 li r5, 0
2887 mtmsrd r5, 1
2888
2889 /* All GPRs are volatile at this point. */
2890 TRECLAIM(R3)
2891
2892 /* Temporarily store r13 and r9 so we have some regs to play with */
2893 SET_SCRATCH0(r13)
2894 GET_PACA(r13)
2895 std r9, PACATMSCRATCH(r13)
2896 ld r9, HSTATE_KVM_VCPU(r13)
2897
2898 /* Get a few more GPRs free. */
2899 std r29, VCPU_GPRS_TM(29)(r9)
2900 std r30, VCPU_GPRS_TM(30)(r9)
2901 std r31, VCPU_GPRS_TM(31)(r9)
2902
2903 /* Save away PPR and DSCR soon so don't run with user values. */
2904 mfspr r31, SPRN_PPR
2905 HMT_MEDIUM
2906 mfspr r30, SPRN_DSCR
2907 ld r29, HSTATE_DSCR(r13)
2908 mtspr SPRN_DSCR, r29
2909
2910 /* Save all but r9, r13 & r29-r31 */
2911 reg = 0
2912 .rept 29
2913 .if (reg != 9) && (reg != 13)
2914 std reg, VCPU_GPRS_TM(reg)(r9)
2915 .endif
2916 reg = reg + 1
2917 .endr
2918 /* ... now save r13 */
2919 GET_SCRATCH0(r4)
2920 std r4, VCPU_GPRS_TM(13)(r9)
2921 /* ... and save r9 */
2922 ld r4, PACATMSCRATCH(r13)
2923 std r4, VCPU_GPRS_TM(9)(r9)
2924
2925 /* Reload stack pointer and TOC. */
2926 ld r1, HSTATE_HOST_R1(r13)
2927 ld r2, PACATOC(r13)
2928
2929 /* Set MSR RI now we have r1 and r13 back. */
2930 li r5, MSR_RI
2931 mtmsrd r5, 1
2932
2933 /* Save away checkpinted SPRs. */
2934 std r31, VCPU_PPR_TM(r9)
2935 std r30, VCPU_DSCR_TM(r9)
2936 mflr r5
2937 mfcr r6
2938 mfctr r7
2939 mfspr r8, SPRN_AMR
2940 mfspr r10, SPRN_TAR
Paul Mackerras0d808df2016-11-07 15:09:58 +11002941 mfxer r11
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002942 std r5, VCPU_LR_TM(r9)
2943 stw r6, VCPU_CR_TM(r9)
2944 std r7, VCPU_CTR_TM(r9)
2945 std r8, VCPU_AMR_TM(r9)
2946 std r10, VCPU_TAR_TM(r9)
Paul Mackerras0d808df2016-11-07 15:09:58 +11002947 std r11, VCPU_XER_TM(r9)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002948
2949 /* Restore r12 as trap number. */
2950 lwz r12, VCPU_TRAP(r9)
2951
2952 /* Save FP/VSX. */
2953 addi r3, r9, VCPU_FPRS_TM
2954 bl store_fp_state
2955 addi r3, r9, VCPU_VRS_TM
2956 bl store_vr_state
2957 mfspr r6, SPRN_VRSAVE
2958 stw r6, VCPU_VRSAVE_TM(r9)
29591:
2960 /*
2961 * We need to save these SPRs after the treclaim so that the software
2962 * error code is recorded correctly in the TEXASR. Also the user may
2963 * change these outside of a transaction, so they must always be
2964 * context switched.
2965 */
2966 mfspr r5, SPRN_TFHAR
2967 mfspr r6, SPRN_TFIAR
2968 mfspr r7, SPRN_TEXASR
2969 std r5, VCPU_TFHAR(r9)
2970 std r6, VCPU_TFIAR(r9)
2971 std r7, VCPU_TEXASR(r9)
2972
2973 ld r0, PPC_LR_STKOFF(r1)
2974 mtlr r0
2975 blr
2976
2977/*
2978 * Restore transactional state and TM-related registers.
2979 * Called with r4 pointing to the vcpu struct.
2980 * This potentially modifies all checkpointed registers.
2981 * It restores r1, r2, r4 from the PACA.
2982 */
2983kvmppc_restore_tm:
2984 mflr r0
2985 std r0, PPC_LR_STKOFF(r1)
2986
2987 /* Turn on TM/FP/VSX/VMX so we can restore them. */
2988 mfmsr r5
2989 li r6, MSR_TM >> 32
2990 sldi r6, r6, 32
2991 or r5, r5, r6
2992 ori r5, r5, MSR_FP
2993 oris r5, r5, (MSR_VEC | MSR_VSX)@h
2994 mtmsrd r5
2995
2996 /*
2997 * The user may change these outside of a transaction, so they must
2998 * always be context switched.
2999 */
3000 ld r5, VCPU_TFHAR(r4)
3001 ld r6, VCPU_TFIAR(r4)
3002 ld r7, VCPU_TEXASR(r4)
3003 mtspr SPRN_TFHAR, r5
3004 mtspr SPRN_TFIAR, r6
3005 mtspr SPRN_TEXASR, r7
3006
3007 ld r5, VCPU_MSR(r4)
3008 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
3009 beqlr /* TM not active in guest */
3010 std r1, HSTATE_HOST_R1(r13)
3011
3012 /* Make sure the failure summary is set, otherwise we'll program check
3013 * when we trechkpt. It's possible that this might have been not set
3014 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
3015 * host.
3016 */
3017 oris r7, r7, (TEXASR_FS)@h
3018 mtspr SPRN_TEXASR, r7
3019
3020 /*
3021 * We need to load up the checkpointed state for the guest.
3022 * We need to do this early as it will blow away any GPRs, VSRs and
3023 * some SPRs.
3024 */
3025
3026 mr r31, r4
3027 addi r3, r31, VCPU_FPRS_TM
3028 bl load_fp_state
3029 addi r3, r31, VCPU_VRS_TM
3030 bl load_vr_state
3031 mr r4, r31
3032 lwz r7, VCPU_VRSAVE_TM(r4)
3033 mtspr SPRN_VRSAVE, r7
3034
3035 ld r5, VCPU_LR_TM(r4)
3036 lwz r6, VCPU_CR_TM(r4)
3037 ld r7, VCPU_CTR_TM(r4)
3038 ld r8, VCPU_AMR_TM(r4)
3039 ld r9, VCPU_TAR_TM(r4)
Paul Mackerras0d808df2016-11-07 15:09:58 +11003040 ld r10, VCPU_XER_TM(r4)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003041 mtlr r5
3042 mtcr r6
3043 mtctr r7
3044 mtspr SPRN_AMR, r8
3045 mtspr SPRN_TAR, r9
Paul Mackerras0d808df2016-11-07 15:09:58 +11003046 mtxer r10
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003047
3048 /*
3049 * Load up PPR and DSCR values but don't put them in the actual SPRs
3050 * till the last moment to avoid running with userspace PPR and DSCR for
3051 * too long.
3052 */
3053 ld r29, VCPU_DSCR_TM(r4)
3054 ld r30, VCPU_PPR_TM(r4)
3055
3056 std r2, PACATMSCRATCH(r13) /* Save TOC */
3057
3058 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
3059 li r5, 0
3060 mtmsrd r5, 1
3061
3062 /* Load GPRs r0-r28 */
3063 reg = 0
3064 .rept 29
3065 ld reg, VCPU_GPRS_TM(reg)(r31)
3066 reg = reg + 1
3067 .endr
3068
3069 mtspr SPRN_DSCR, r29
3070 mtspr SPRN_PPR, r30
3071
3072 /* Load final GPRs */
3073 ld 29, VCPU_GPRS_TM(29)(r31)
3074 ld 30, VCPU_GPRS_TM(30)(r31)
3075 ld 31, VCPU_GPRS_TM(31)(r31)
3076
3077 /* TM checkpointed state is now setup. All GPRs are now volatile. */
3078 TRECHKPT
3079
3080 /* Now let's get back the state we need. */
3081 HMT_MEDIUM
3082 GET_PACA(r13)
3083 ld r29, HSTATE_DSCR(r13)
3084 mtspr SPRN_DSCR, r29
3085 ld r4, HSTATE_KVM_VCPU(r13)
3086 ld r1, HSTATE_HOST_R1(r13)
3087 ld r2, PACATMSCRATCH(r13)
3088
3089 /* Set the MSR RI since we have our registers back. */
3090 li r5, MSR_RI
3091 mtmsrd r5, 1
3092
3093 ld r0, PPC_LR_STKOFF(r1)
3094 mtlr r0
3095 blr
3096#endif
3097
Paul Mackerras44a3add2013-10-04 21:45:04 +10003098/*
3099 * We come here if we get any exception or interrupt while we are
3100 * executing host real mode code while in guest MMU context.
3101 * For now just spin, but we should do something better.
3102 */
3103kvmppc_bad_host_intr:
3104 b .
Michael Neulinge4e38122014-03-25 10:47:02 +11003105
3106/*
3107 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3108 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3109 * r11 has the guest MSR value (in/out)
3110 * r9 has a vcpu pointer (in)
3111 * r0 is used as a scratch register
3112 */
3113kvmppc_msr_interrupt:
3114 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3115 cmpwi r0, 2 /* Check if we are in transactional state.. */
3116 ld r11, VCPU_INTR_MSR(r9)
3117 bne 1f
3118 /* ... if transactional, change to suspended */
3119 li r0, 1
31201: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3121 blr
Paul Mackerras9bc01a92014-05-26 19:48:40 +10003122
3123/*
3124 * This works around a hardware bug on POWER8E processors, where
3125 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3126 * performance monitor interrupt. Instead, when we need to have
3127 * an interrupt pending, we have to arrange for a counter to overflow.
3128 */
3129kvmppc_fix_pmao:
3130 li r3, 0
3131 mtspr SPRN_MMCR2, r3
3132 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3133 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3134 mtspr SPRN_MMCR0, r3
3135 lis r3, 0x7fff
3136 ori r3, r3, 0xffff
3137 mtspr SPRN_PMC6, r3
3138 isync
3139 blr
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003140
3141#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3142/*
3143 * Start timing an activity
3144 * r3 = pointer to time accumulation struct, r4 = vcpu
3145 */
3146kvmhv_start_timing:
3147 ld r5, HSTATE_KVM_VCORE(r13)
3148 lbz r6, VCORE_IN_GUEST(r5)
3149 cmpwi r6, 0
3150 beq 5f /* if in guest, need to */
3151 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
31525: mftb r5
3153 subf r5, r6, r5
3154 std r3, VCPU_CUR_ACTIVITY(r4)
3155 std r5, VCPU_ACTIVITY_START(r4)
3156 blr
3157
3158/*
3159 * Accumulate time to one activity and start another.
3160 * r3 = pointer to new time accumulation struct, r4 = vcpu
3161 */
3162kvmhv_accumulate_time:
3163 ld r5, HSTATE_KVM_VCORE(r13)
3164 lbz r8, VCORE_IN_GUEST(r5)
3165 cmpwi r8, 0
3166 beq 4f /* if in guest, need to */
3167 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
31684: ld r5, VCPU_CUR_ACTIVITY(r4)
3169 ld r6, VCPU_ACTIVITY_START(r4)
3170 std r3, VCPU_CUR_ACTIVITY(r4)
3171 mftb r7
3172 subf r7, r8, r7
3173 std r7, VCPU_ACTIVITY_START(r4)
3174 cmpdi r5, 0
3175 beqlr
3176 subf r3, r6, r7
3177 ld r8, TAS_SEQCOUNT(r5)
3178 cmpdi r8, 0
3179 addi r8, r8, 1
3180 std r8, TAS_SEQCOUNT(r5)
3181 lwsync
3182 ld r7, TAS_TOTAL(r5)
3183 add r7, r7, r3
3184 std r7, TAS_TOTAL(r5)
3185 ld r6, TAS_MIN(r5)
3186 ld r7, TAS_MAX(r5)
3187 beq 3f
3188 cmpd r3, r6
3189 bge 1f
31903: std r3, TAS_MIN(r5)
31911: cmpd r3, r7
3192 ble 2f
3193 std r3, TAS_MAX(r5)
31942: lwsync
3195 addi r8, r8, 1
3196 std r8, TAS_SEQCOUNT(r5)
3197 blr
3198#endif