blob: 07ca1b2a7966b5f58188b8b0b685eb581cc2b695 [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100023#include <asm/mmu.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000024#include <asm/page.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100025#include <asm/ptrace.h>
26#include <asm/hvcall.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000027#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
Paul Mackerrasf0888f72012-02-03 00:54:17 +000029#include <asm/kvm_book3s_asm.h>
Aneesh Kumar K.Vf64e8082016-03-01 12:59:20 +053030#include <asm/book3s/64/mmu-hash.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110031#include <asm/tm.h>
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +053032#include <asm/opal.h>
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100033#include <asm/xive-regs.h>
Paul Mackerras857b99e2017-09-01 16:17:27 +100034#include <asm/thread_info.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110035
Paul Mackerras2f272462017-05-22 16:25:14 +100036/* Sign-extend HDEC if not on POWER9 */
37#define EXTEND_HDEC(reg) \
38BEGIN_FTR_SECTION; \
39 extsw reg, reg; \
40END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
41
Michael Neulinge4e38122014-03-25 10:47:02 +110042#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
Paul Mackerrasde56a942011-06-29 00:21:34 +000043
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110044/* Values in HSTATE_NAPPING(r13) */
45#define NAPPING_CEDE 1
46#define NAPPING_NOVCPU 2
47
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100048/* Stack frame offsets for kvmppc_hv_entry */
Paul Mackerras769377f2017-02-15 14:30:17 +110049#define SFS 160
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100050#define STACK_SLOT_TRAP (SFS-4)
51#define STACK_SLOT_TID (SFS-16)
52#define STACK_SLOT_PSSCR (SFS-24)
53#define STACK_SLOT_PID (SFS-32)
54#define STACK_SLOT_IAMR (SFS-40)
55#define STACK_SLOT_CIABR (SFS-48)
56#define STACK_SLOT_DAWR (SFS-56)
57#define STACK_SLOT_DAWRX (SFS-64)
Paul Mackerras769377f2017-02-15 14:30:17 +110058#define STACK_SLOT_HFSCR (SFS-72)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100059
Paul Mackerrasde56a942011-06-29 00:21:34 +000060/*
Paul Mackerras19ccb762011-07-23 17:42:46 +100061 * Call kvmppc_hv_entry in real mode.
Paul Mackerrasde56a942011-06-29 00:21:34 +000062 * Must be called with interrupts hard-disabled.
63 *
64 * Input Registers:
65 *
66 * LR = return address to continue at after eventually re-enabling MMU
67 */
Anton Blanchard6ed179b2014-06-12 18:16:53 +100068_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
Paul Mackerras218309b2013-09-06 13:23:44 +100069 mflr r0
70 std r0, PPC_LR_STKOFF(r1)
71 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +000072 mfmsr r10
Paul Mackerras8b24e692017-06-26 15:45:51 +100073 std r10, HSTATE_HOST_MSR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100074 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
Paul Mackerrasde56a942011-06-29 00:21:34 +000075 li r0,MSR_RI
76 andc r0,r10,r0
77 li r6,MSR_IR | MSR_DR
78 andc r6,r10,r6
79 mtmsrd r0,1 /* clear RI in MSR */
80 mtsrr0 r5
81 mtsrr1 r6
Nicholas Piggin222f20f2018-01-10 03:07:15 +110082 RFI_TO_KERNEL
Paul Mackerrasde56a942011-06-29 00:21:34 +000083
Paul Mackerras218309b2013-09-06 13:23:44 +100084kvmppc_call_hv_entry:
Paul Mackerrasc0101502017-10-19 14:11:23 +110085BEGIN_FTR_SECTION
86 /* On P9, do LPCR setting, if necessary */
87 ld r3, HSTATE_SPLIT_MODE(r13)
88 cmpdi r3, 0
89 beq 46f
90 lwz r4, KVM_SPLIT_DO_SET(r3)
91 cmpwi r4, 0
92 beq 46f
93 bl kvmhv_p9_set_lpcr
94 nop
9546:
96END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
97
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110098 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100099 bl kvmppc_hv_entry
100
101 /* Back from guest - restore host state and return to caller */
102
Michael Neulingeee7ff92014-01-08 21:25:19 +1100103BEGIN_FTR_SECTION
Paul Mackerras218309b2013-09-06 13:23:44 +1000104 /* Restore host DABR and DABRX */
105 ld r5,HSTATE_DABR(r13)
106 li r6,7
107 mtspr SPRN_DABR,r5
108 mtspr SPRN_DABRX,r6
Michael Neulingeee7ff92014-01-08 21:25:19 +1100109END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +1000110
111 /* Restore SPRG3 */
Scott Wood9d378df2014-03-10 17:29:38 -0500112 ld r3,PACA_SPRG_VDSO(r13)
113 mtspr SPRN_SPRG_VDSO_WRITE,r3
Paul Mackerras218309b2013-09-06 13:23:44 +1000114
Paul Mackerras218309b2013-09-06 13:23:44 +1000115 /* Reload the host's PMU registers */
Nicholas Piggin8e0b634b2018-02-14 01:08:11 +1000116 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
Paul Mackerras218309b2013-09-06 13:23:44 +1000117 cmpwi r4, 0
118 beq 23f /* skip if not */
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000119BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000120 ld r3, HSTATE_MMCR0(r13)
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000121 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
122 cmpwi r4, MMCR0_PMAO
123 beql kvmppc_fix_pmao
124END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000125 lwz r3, HSTATE_PMC1(r13)
126 lwz r4, HSTATE_PMC2(r13)
127 lwz r5, HSTATE_PMC3(r13)
128 lwz r6, HSTATE_PMC4(r13)
129 lwz r8, HSTATE_PMC5(r13)
130 lwz r9, HSTATE_PMC6(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000131 mtspr SPRN_PMC1, r3
132 mtspr SPRN_PMC2, r4
133 mtspr SPRN_PMC3, r5
134 mtspr SPRN_PMC4, r6
135 mtspr SPRN_PMC5, r8
136 mtspr SPRN_PMC6, r9
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000137 ld r3, HSTATE_MMCR0(r13)
138 ld r4, HSTATE_MMCR1(r13)
139 ld r5, HSTATE_MMCRA(r13)
140 ld r6, HSTATE_SIAR(r13)
141 ld r7, HSTATE_SDAR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000142 mtspr SPRN_MMCR1, r4
143 mtspr SPRN_MMCRA, r5
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100144 mtspr SPRN_SIAR, r6
145 mtspr SPRN_SDAR, r7
146BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000147 ld r8, HSTATE_MMCR2(r13)
148 ld r9, HSTATE_SIER(r13)
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100149 mtspr SPRN_MMCR2, r8
150 mtspr SPRN_SIER, r9
151END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +1000152 mtspr SPRN_MMCR0, r3
153 isync
15423:
155
156 /*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100157 * Reload DEC. HDEC interrupts were disabled when
158 * we reloaded the host's LPCR value.
159 */
160 ld r3, HSTATE_DECEXP(r13)
161 mftb r4
162 subf r4, r4, r3
163 mtspr SPRN_DEC, r4
164
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000165 /* hwthread_req may have got set by cede or no vcpu, so clear it */
166 li r0, 0
167 stb r0, HSTATE_HWTHREAD_REQ(r13)
168
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100169 /*
Aravinda Prasade20bbd32017-05-11 16:33:37 +0530170 * For external interrupts we need to call the Linux
171 * handler to process the interrupt. We do that by jumping
172 * to absolute address 0x500 for external interrupts.
173 * The [h]rfid at the end of the handler will return to
174 * the book3s_hv_interrupts.S code. For other interrupts
175 * we do the rfid to get back to the book3s_hv_interrupts.S
176 * code here.
Paul Mackerras218309b2013-09-06 13:23:44 +1000177 */
178 ld r8, 112+PPC_LR_STKOFF(r1)
179 addi r1, r1, 112
180 ld r7, HSTATE_HOST_MSR(r13)
181
Paul Mackerras8b24e692017-06-26 15:45:51 +1000182 /* Return the trap number on this thread as the return value */
183 mr r3, r12
184
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100185 /*
186 * If we came back from the guest via a relocation-on interrupt,
187 * we will be in virtual mode at this point, which makes it a
188 * little easier to get back to the caller.
189 */
190 mfmsr r0
191 andi. r0, r0, MSR_IR /* in real mode? */
192 bne .Lvirt_return
193
Paul Mackerras8b24e692017-06-26 15:45:51 +1000194 /* RFI into the highmem handler */
Paul Mackerras218309b2013-09-06 13:23:44 +1000195 mfmsr r6
196 li r0, MSR_RI
197 andc r6, r6, r0
198 mtmsrd r6, 1 /* Clear RI in MSR */
199 mtsrr0 r8
200 mtsrr1 r7
Nicholas Piggin222f20f2018-01-10 03:07:15 +1100201 RFI_TO_KERNEL
Paul Mackerras218309b2013-09-06 13:23:44 +1000202
Paul Mackerras8b24e692017-06-26 15:45:51 +1000203 /* Virtual-mode return */
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100204.Lvirt_return:
Paul Mackerras8b24e692017-06-26 15:45:51 +1000205 mtlr r8
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100206 blr
207
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100208kvmppc_primary_no_guest:
209 /* We handle this much like a ceded vcpu */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100210 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
Paul Mackerras2f272462017-05-22 16:25:14 +1000211 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
212 /* HDEC value came from DEC in the first place, it will fit */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100213 mfspr r3, SPRN_HDEC
214 mtspr SPRN_DEC, r3
Paul Mackerras6af27c82015-03-28 14:21:10 +1100215 /*
216 * Make sure the primary has finished the MMU switch.
217 * We should never get here on a secondary thread, but
218 * check it for robustness' sake.
219 */
220 ld r5, HSTATE_KVM_VCORE(r13)
22165: lbz r0, VCORE_IN_GUEST(r5)
222 cmpwi r0, 0
223 beq 65b
224 /* Set LPCR. */
225 ld r8,VCORE_LPCR(r5)
226 mtspr SPRN_LPCR,r8
227 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100228 /* set our bit in napping_threads */
229 ld r5, HSTATE_KVM_VCORE(r13)
230 lbz r7, HSTATE_PTID(r13)
231 li r0, 1
232 sld r0, r0, r7
233 addi r6, r5, VCORE_NAPPING_THREADS
2341: lwarx r3, 0, r6
235 or r3, r3, r0
236 stwcx. r3, 0, r6
237 bne 1b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100238 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100239 isync
240 li r12, 0
241 lwz r7, VCORE_ENTRY_EXIT(r5)
242 cmpwi r7, 0x100
243 bge kvm_novcpu_exit /* another thread already exiting */
244 li r3, NAPPING_NOVCPU
245 stb r3, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100246
Paul Mackerrasccc07772015-03-28 14:21:07 +1100247 li r3, 0 /* Don't wake on privileged (OS) doorbell */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100248 b kvm_do_nap
249
Suresh Warrier37f55d32016-08-19 15:35:46 +1000250/*
251 * kvm_novcpu_wakeup
252 * Entered from kvm_start_guest if kvm_hstate.napping is set
253 * to NAPPING_NOVCPU
254 * r2 = kernel TOC
255 * r13 = paca
256 */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100257kvm_novcpu_wakeup:
258 ld r1, HSTATE_HOST_R1(r13)
259 ld r5, HSTATE_KVM_VCORE(r13)
260 li r0, 0
261 stb r0, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100262
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100263 /* check the wake reason */
264 bl kvmppc_check_wake_reason
Paul Mackerras6af27c82015-03-28 14:21:10 +1100265
Suresh Warrier37f55d32016-08-19 15:35:46 +1000266 /*
267 * Restore volatile registers since we could have called
268 * a C routine in kvmppc_check_wake_reason.
269 * r5 = VCORE
270 */
271 ld r5, HSTATE_KVM_VCORE(r13)
272
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100273 /* see if any other thread is already exiting */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100274 lwz r0, VCORE_ENTRY_EXIT(r5)
275 cmpwi r0, 0x100
276 bge kvm_novcpu_exit
277
278 /* clear our bit in napping_threads */
279 lbz r7, HSTATE_PTID(r13)
280 li r0, 1
281 sld r0, r0, r7
282 addi r6, r5, VCORE_NAPPING_THREADS
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002834: lwarx r7, 0, r6
284 andc r7, r7, r0
285 stwcx. r7, 0, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100286 bne 4b
287
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100288 /* See if the wake reason means we need to exit */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100289 cmpdi r3, 0
290 bge kvm_novcpu_exit
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100291
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100292 /* See if our timeslice has expired (HDEC is negative) */
293 mfspr r0, SPRN_HDEC
Paul Mackerras2f272462017-05-22 16:25:14 +1000294 EXTEND_HDEC(r0)
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100295 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerras2f272462017-05-22 16:25:14 +1000296 cmpdi r0, 0
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100297 blt kvm_novcpu_exit
298
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100299 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
300 ld r4, HSTATE_KVM_VCPU(r13)
301 cmpdi r4, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100302 beq kvmppc_primary_no_guest
303
304#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
305 addi r3, r4, VCPU_TB_RMENTRY
306 bl kvmhv_start_timing
307#endif
308 b kvmppc_got_guest
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100309
310kvm_novcpu_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100311#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
312 ld r4, HSTATE_KVM_VCPU(r13)
313 cmpdi r4, 0
314 beq 13f
315 addi r3, r4, VCPU_TB_RMEXIT
316 bl kvmhv_accumulate_time
317#endif
Paul Mackerraseddb60f2015-03-28 14:21:11 +110031813: mr r3, r12
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000319 stw r12, STACK_SLOT_TRAP(r1)
Paul Mackerraseddb60f2015-03-28 14:21:11 +1100320 bl kvmhv_commence_exit
321 nop
Paul Mackerras6af27c82015-03-28 14:21:10 +1100322 b kvmhv_switch_to_host
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100323
Paul Mackerras371fefd2011-06-29 00:23:08 +0000324/*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100325 * We come in here when wakened from nap mode.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000326 * Relocation is off and most register values are lost.
327 * r13 points to the PACA.
Nicholas Piggin9d292502017-06-13 23:05:51 +1000328 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000329 */
330 .globl kvm_start_guest
331kvm_start_guest:
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530332 /* Set runlatch bit the minute you wake up from nap */
Paul Mackerras1f09c3e2015-03-28 14:21:04 +1100333 mfspr r0, SPRN_CTRLF
334 ori r0, r0, 1
335 mtspr SPRN_CTRLT, r0
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530336
Nicholas Piggin9d292502017-06-13 23:05:51 +1000337 /*
338 * Could avoid this and pass it through in r3. For now,
339 * code expects it to be in SRR1.
340 */
341 mtspr SPRN_SRR1,r3
342
Paul Mackerras19ccb762011-07-23 17:42:46 +1000343 ld r2,PACATOC(r13)
344
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000345 li r0,KVM_HWTHREAD_IN_KVM
346 stb r0,HSTATE_HWTHREAD_STATE(r13)
347
348 /* NV GPR values from power7_idle() will no longer be valid */
349 li r0,1
350 stb r0,PACA_NAPSTATELOST(r13)
351
Paul Mackerras4619ac82013-04-17 20:31:41 +0000352 /* were we napping due to cede? */
353 lbz r0,HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100354 cmpwi r0,NAPPING_CEDE
355 beq kvm_end_cede
356 cmpwi r0,NAPPING_NOVCPU
357 beq kvm_novcpu_wakeup
358
359 ld r1,PACAEMERGSP(r13)
360 subi r1,r1,STACK_FRAME_OVERHEAD
Paul Mackerras4619ac82013-04-17 20:31:41 +0000361
362 /*
363 * We weren't napping due to cede, so this must be a secondary
364 * thread being woken up to run a guest, or being woken up due
365 * to a stray IPI. (Or due to some machine check or hypervisor
366 * maintenance interrupt while the core is in KVM.)
367 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000368
369 /* Check the wake reason in SRR1 to see why we got here */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100370 bl kvmppc_check_wake_reason
Suresh Warrier37f55d32016-08-19 15:35:46 +1000371 /*
372 * kvmppc_check_wake_reason could invoke a C routine, but we
373 * have no volatile registers to restore when we return.
374 */
375
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100376 cmpdi r3, 0
377 bge kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000378
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000379 /* get vcore pointer, NULL if we have nothing to run */
380 ld r5,HSTATE_KVM_VCORE(r13)
381 cmpdi r5,0
382 /* if we have no vcore to run, go back to sleep */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000383 beq kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000384
Paul Mackerras56548fc2014-12-03 14:48:40 +1100385kvm_secondary_got_guest:
386
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100387 /* Set HSTATE_DSCR(r13) to something sensible */
Anshuman Khandual1db36522015-05-21 12:13:03 +0530388 ld r6, PACA_DSCR_DEFAULT(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100389 std r6, HSTATE_DSCR(r13)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000390
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000391 /* On thread 0 of a subcore, set HDEC to max */
392 lbz r4, HSTATE_PTID(r13)
393 cmpwi r4, 0
394 bne 63f
Paul Mackerras2f272462017-05-22 16:25:14 +1000395 LOAD_REG_ADDR(r6, decrementer_max)
396 ld r6, 0(r6)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000397 mtspr SPRN_HDEC, r6
398 /* and set per-LPAR registers, if doing dynamic micro-threading */
399 ld r6, HSTATE_SPLIT_MODE(r13)
400 cmpdi r6, 0
401 beq 63f
Paul Mackerrasc0101502017-10-19 14:11:23 +1100402BEGIN_FTR_SECTION
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000403 ld r0, KVM_SPLIT_RPR(r6)
404 mtspr SPRN_RPR, r0
405 ld r0, KVM_SPLIT_PMMAR(r6)
406 mtspr SPRN_PMMAR, r0
407 ld r0, KVM_SPLIT_LDBAR(r6)
408 mtspr SPRN_LDBAR, r0
409 isync
Paul Mackerrasc0101502017-10-19 14:11:23 +1100410FTR_SECTION_ELSE
411 /* On P9 we use the split_info for coordinating LPCR changes */
412 lwz r4, KVM_SPLIT_DO_SET(r6)
413 cmpwi r4, 0
Alexander Grafd20fe502018-02-08 18:38:53 +0100414 beq 1f
Paul Mackerrasc0101502017-10-19 14:11:23 +1100415 mr r3, r6
416 bl kvmhv_p9_set_lpcr
417 nop
Alexander Grafd20fe502018-02-08 18:38:53 +01004181:
Paul Mackerrasc0101502017-10-19 14:11:23 +1100419ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasb4deba52015-07-02 20:38:16 +100042063:
421 /* Order load of vcpu after load of vcore */
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100422 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000423 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100424 bl kvmppc_hv_entry
Paul Mackerras218309b2013-09-06 13:23:44 +1000425
426 /* Back from the guest, go back to nap */
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000427 /* Clear our vcpu and vcore pointers so we don't come back in early */
Paul Mackerras218309b2013-09-06 13:23:44 +1000428 li r0, 0
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000429 std r0, HSTATE_KVM_VCPU(r13)
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100430 /*
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000431 * Once we clear HSTATE_KVM_VCORE(r13), the code in
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100432 * kvmppc_run_core() is going to assume that all our vcpu
433 * state is visible in memory. This lwsync makes sure
434 * that that is true.
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100435 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000436 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000437 std r0, HSTATE_KVM_VCORE(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000438
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530439 /*
440 * All secondaries exiting guest will fall through this path.
441 * Before proceeding, just check for HMI interrupt and
442 * invoke opal hmi handler. By now we are sure that the
443 * primary thread on this core/subcore has already made partition
444 * switch/TB resync and we are good to call opal hmi handler.
445 */
446 cmpwi r12, BOOK3S_INTERRUPT_HMI
447 bne kvm_no_guest
448
449 li r3,0 /* NULL argument */
450 bl hmi_exception_realmode
Paul Mackerras56548fc2014-12-03 14:48:40 +1100451/*
452 * At this point we have finished executing in the guest.
453 * We need to wait for hwthread_req to become zero, since
454 * we may not turn on the MMU while hwthread_req is non-zero.
455 * While waiting we also need to check if we get given a vcpu to run.
456 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000457kvm_no_guest:
Paul Mackerras56548fc2014-12-03 14:48:40 +1100458 lbz r3, HSTATE_HWTHREAD_REQ(r13)
459 cmpwi r3, 0
460 bne 53f
461 HMT_MEDIUM
462 li r0, KVM_HWTHREAD_IN_KERNEL
Paul Mackerras218309b2013-09-06 13:23:44 +1000463 stb r0, HSTATE_HWTHREAD_STATE(r13)
Paul Mackerras56548fc2014-12-03 14:48:40 +1100464 /* need to recheck hwthread_req after a barrier, to avoid race */
465 sync
466 lbz r3, HSTATE_HWTHREAD_REQ(r13)
467 cmpwi r3, 0
468 bne 54f
469/*
Shreyas B. Prabhu5fa6b6b2016-07-08 11:50:46 +0530470 * We jump to pnv_wakeup_loss, which will return to the caller
Paul Mackerras56548fc2014-12-03 14:48:40 +1100471 * of power7_nap in the powernv cpu offline loop. The value we
Nicholas Piggin9d292502017-06-13 23:05:51 +1000472 * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss
473 * requires SRR1 in r12.
Paul Mackerras56548fc2014-12-03 14:48:40 +1100474 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000475 li r3, LPCR_PECE0
476 mfspr r4, SPRN_LPCR
477 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
478 mtspr SPRN_LPCR, r4
Paul Mackerras56548fc2014-12-03 14:48:40 +1100479 li r3, 0
Nicholas Piggin9d292502017-06-13 23:05:51 +1000480 mfspr r12,SPRN_SRR1
Shreyas B. Prabhu5fa6b6b2016-07-08 11:50:46 +0530481 b pnv_wakeup_loss
Paul Mackerras56548fc2014-12-03 14:48:40 +1100482
48353: HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000484 ld r5, HSTATE_KVM_VCORE(r13)
485 cmpdi r5, 0
486 bne 60f
487 ld r3, HSTATE_SPLIT_MODE(r13)
488 cmpdi r3, 0
489 beq kvm_no_guest
Paul Mackerrasc0101502017-10-19 14:11:23 +1100490 lwz r0, KVM_SPLIT_DO_SET(r3)
491 cmpwi r0, 0
492 bne kvmhv_do_set
493 lwz r0, KVM_SPLIT_DO_RESTORE(r3)
494 cmpwi r0, 0
495 bne kvmhv_do_restore
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000496 lbz r0, KVM_SPLIT_DO_NAP(r3)
497 cmpwi r0, 0
Paul Mackerras56548fc2014-12-03 14:48:40 +1100498 beq kvm_no_guest
499 HMT_MEDIUM
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000500 b kvm_unsplit_nap
50160: HMT_MEDIUM
Paul Mackerras56548fc2014-12-03 14:48:40 +1100502 b kvm_secondary_got_guest
503
50454: li r0, KVM_HWTHREAD_IN_KVM
505 stb r0, HSTATE_HWTHREAD_STATE(r13)
506 b kvm_no_guest
Paul Mackerras218309b2013-09-06 13:23:44 +1000507
Paul Mackerrasc0101502017-10-19 14:11:23 +1100508kvmhv_do_set:
509 /* Set LPCR, LPIDR etc. on P9 */
510 HMT_MEDIUM
511 bl kvmhv_p9_set_lpcr
512 nop
513 b kvm_no_guest
514
515kvmhv_do_restore:
516 HMT_MEDIUM
517 bl kvmhv_p9_restore_lpcr
518 nop
519 b kvm_no_guest
520
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000521/*
522 * Here the primary thread is trying to return the core to
523 * whole-core mode, so we need to nap.
524 */
525kvm_unsplit_nap:
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530526 /*
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530527 * When secondaries are napping in kvm_unsplit_nap() with
528 * hwthread_req = 1, HMI goes ignored even though subcores are
529 * already exited the guest. Hence HMI keeps waking up secondaries
530 * from nap in a loop and secondaries always go back to nap since
531 * no vcore is assigned to them. This makes impossible for primary
532 * thread to get hold of secondary threads resulting into a soft
533 * lockup in KVM path.
534 *
535 * Let us check if HMI is pending and handle it before we go to nap.
536 */
537 cmpwi r12, BOOK3S_INTERRUPT_HMI
538 bne 55f
539 li r3, 0 /* NULL argument */
540 bl hmi_exception_realmode
54155:
542 /*
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530543 * Ensure that secondary doesn't nap when it has
544 * its vcore pointer set.
545 */
546 sync /* matches smp_mb() before setting split_info.do_nap */
547 ld r0, HSTATE_KVM_VCORE(r13)
548 cmpdi r0, 0
549 bne kvm_no_guest
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000550 /* clear any pending message */
551BEGIN_FTR_SECTION
552 lis r6, (PPC_DBELL_SERVER << (63-36))@h
553 PPC_MSGCLR(6)
554END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
555 /* Set kvm_split_mode.napped[tid] = 1 */
556 ld r3, HSTATE_SPLIT_MODE(r13)
557 li r0, 1
Paul Mackerrasc0101502017-10-19 14:11:23 +1100558 lbz r4, HSTATE_TID(r13)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000559 addi r4, r4, KVM_SPLIT_NAPPED
560 stbx r0, r3, r4
561 /* Check the do_nap flag again after setting napped[] */
562 sync
563 lbz r0, KVM_SPLIT_DO_NAP(r3)
564 cmpwi r0, 0
565 beq 57f
566 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
Paul Mackerrasbf53c882016-11-18 14:34:07 +1100567 mfspr r5, SPRN_LPCR
568 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
569 b kvm_nap_sequence
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000570
57157: li r0, 0
572 stbx r0, r3, r4
573 b kvm_no_guest
574
Paul Mackerras218309b2013-09-06 13:23:44 +1000575/******************************************************************************
576 * *
577 * Entry code *
578 * *
579 *****************************************************************************/
580
Paul Mackerrasde56a942011-06-29 00:21:34 +0000581.global kvmppc_hv_entry
582kvmppc_hv_entry:
583
584 /* Required state:
585 *
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100586 * R4 = vcpu pointer (or NULL)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000587 * MSR = ~IR|DR
588 * R13 = PACA
589 * R1 = host R1
Michael Neuling06a29e42014-08-19 14:59:30 +1000590 * R2 = TOC
Paul Mackerrasde56a942011-06-29 00:21:34 +0000591 * all other volatile GPRS = free
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100592 * Does not preserve non-volatile GPRs or CR fields
Paul Mackerrasde56a942011-06-29 00:21:34 +0000593 */
594 mflr r0
Paul Mackerras218309b2013-09-06 13:23:44 +1000595 std r0, PPC_LR_STKOFF(r1)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000596 stdu r1, -SFS(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000597
Paul Mackerrasde56a942011-06-29 00:21:34 +0000598 /* Save R1 in the PACA */
599 std r1, HSTATE_HOST_R1(r13)
600
Paul Mackerras44a3add2013-10-04 21:45:04 +1000601 li r6, KVM_GUEST_MODE_HOST_HV
602 stb r6, HSTATE_IN_GUEST(r13)
603
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100604#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
605 /* Store initial timestamp */
606 cmpdi r4, 0
607 beq 1f
608 addi r3, r4, VCPU_TB_RMENTRY
609 bl kvmhv_start_timing
6101:
611#endif
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100612
613 /* Use cr7 as an indication of radix mode */
614 ld r5, HSTATE_KVM_VCORE(r13)
615 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
616 lbz r0, KVM_RADIX(r9)
617 cmpwi cr7, r0, 0
618
Paul Mackerras9e368f22011-06-29 00:40:08 +0000619 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100620 * POWER7/POWER8 host -> guest partition switch code.
Paul Mackerras9e368f22011-06-29 00:40:08 +0000621 * We don't have to lock against concurrent tlbies,
622 * but we do have to coordinate across hardware threads.
623 */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100624 /* Set bit in entry map iff exit map is zero. */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100625 li r7, 1
626 lbz r6, HSTATE_PTID(r13)
627 sld r7, r7, r6
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100628 addi r8, r5, VCORE_ENTRY_EXIT
62921: lwarx r3, 0, r8
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100630 cmpwi r3, 0x100 /* any threads starting to exit? */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000631 bge secondary_too_late /* if so we're too late to the party */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100632 or r3, r3, r7
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100633 stwcx. r3, 0, r8
Paul Mackerras371fefd2011-06-29 00:23:08 +0000634 bne 21b
635
636 /* Primary thread switches to guest partition. */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000637 cmpwi r6,0
Paul Mackerras6af27c82015-03-28 14:21:10 +1100638 bne 10f
Paul Mackerrasde56a942011-06-29 00:21:34 +0000639 lwz r7,KVM_LPID(r9)
Paul Mackerras7a840842016-11-16 22:25:20 +1100640BEGIN_FTR_SECTION
641 ld r6,KVM_SDR1(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000642 li r0,LPID_RSVD /* switch to reserved LPID */
643 mtspr SPRN_LPID,r0
644 ptesync
645 mtspr SPRN_SDR1,r6 /* switch to partition page table */
Paul Mackerras7a840842016-11-16 22:25:20 +1100646END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000647 mtspr SPRN_LPID,r7
648 isync
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000649
650 /* See if we need to flush the TLB */
651 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100652BEGIN_FTR_SECTION
653 /*
654 * On POWER9, individual threads can come in here, but the
655 * TLB is shared between the 4 threads in a core, hence
656 * invalidating on one thread invalidates for all.
657 * Thus we make all 4 threads use the same bit here.
658 */
659 clrrdi r6,r6,2
660END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000661 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
662 srdi r6,r6,6 /* doubleword number */
663 sldi r6,r6,3 /* address offset */
664 add r6,r6,r9
665 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100666 li r8,1
667 sld r8,r8,r7
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000668 ld r7,0(r6)
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100669 and. r7,r7,r8
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000670 beq 22f
Paul Mackerrasca252052014-01-08 21:25:22 +1100671 /* Flush the TLB of any entries for this LPID */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100672 lwz r0,KVM_TLB_SETS(r9)
673 mtctr r0
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000674 li r7,0x800 /* IS field = 0b10 */
675 ptesync
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100676 li r0,0 /* RS for P9 version of tlbiel */
677 bne cr7, 29f
67828: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000679 addi r7,r7,0x1000
680 bdnz 28b
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100681 b 30f
68229: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */
683 addi r7,r7,0x1000
684 bdnz 29b
68530: ptesync
68623: ldarx r7,0,r6 /* clear the bit after TLB flushed */
687 andc r7,r7,r8
688 stdcx. r7,0,r6
689 bne 23b
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000690
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000691 /* Add timebase offset onto timebase */
69222: ld r8,VCORE_TB_OFFSET(r5)
693 cmpdi r8,0
694 beq 37f
Paul Mackerras57b8daa2018-04-20 22:51:11 +1000695 std r8, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000696 mftb r6 /* current host timebase */
697 add r8,r8,r6
698 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
699 mftb r7 /* check if lower 24 bits overflowed */
700 clrldi r6,r6,40
701 clrldi r7,r7,40
702 cmpld r7,r6
703 bge 37f
704 addis r8,r8,0x100 /* if so, increment upper 40 bits */
705 mtspr SPRN_TBU40,r8
706
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000707 /* Load guest PCR value to select appropriate compat mode */
70837: ld r7, VCORE_PCR(r5)
709 cmpdi r7, 0
710 beq 38f
711 mtspr SPRN_PCR, r7
71238:
Michael Neulingb005255e2014-01-08 21:25:21 +1100713
714BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000715 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +1100716 ld r8, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000717 ld r7, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +1100718 mtspr SPRN_DPDES, r8
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000719 mtspr SPRN_VTB, r7
Michael Neulingb005255e2014-01-08 21:25:21 +1100720END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
721
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530722 /* Mark the subcore state as inside guest */
723 bl kvmppc_subcore_enter_guest
724 nop
725 ld r5, HSTATE_KVM_VCORE(r13)
726 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000727 li r0,1
Paul Mackerras371fefd2011-06-29 00:23:08 +0000728 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000729
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100730 /* Do we have a guest vcpu to run? */
Paul Mackerras6af27c82015-03-28 14:21:10 +110073110: cmpdi r4, 0
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100732 beq kvmppc_primary_no_guest
733kvmppc_got_guest:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100734 /* Increment yield count if they have a VPA */
735 ld r3, VCPU_VPA(r4)
736 cmpdi r3, 0
737 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +0200738 li r6, LPPACA_YIELDCOUNT
739 LWZX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100740 addi r5, r5, 1
Alexander Graf0865a582014-06-11 10:36:17 +0200741 STWX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100742 li r6, 1
743 stb r6, VCPU_VPA_DIRTY(r4)
74425:
745
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100746 /* Save purr/spurr */
747 mfspr r5,SPRN_PURR
748 mfspr r6,SPRN_SPURR
749 std r5,HSTATE_PURR(r13)
750 std r6,HSTATE_SPURR(r13)
751 ld r7,VCPU_PURR(r4)
752 ld r8,VCPU_SPURR(r4)
753 mtspr SPRN_PURR,r7
754 mtspr SPRN_SPURR,r8
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100755
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100756 /* Save host values of some registers */
757BEGIN_FTR_SECTION
758 mfspr r5, SPRN_TIDR
759 mfspr r6, SPRN_PSSCR
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100760 mfspr r7, SPRN_PID
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +1000761 mfspr r8, SPRN_IAMR
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100762 std r5, STACK_SLOT_TID(r1)
763 std r6, STACK_SLOT_PSSCR(r1)
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100764 std r7, STACK_SLOT_PID(r1)
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +1000765 std r8, STACK_SLOT_IAMR(r1)
Paul Mackerras769377f2017-02-15 14:30:17 +1100766 mfspr r5, SPRN_HFSCR
767 std r5, STACK_SLOT_HFSCR(r1)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100768END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000769BEGIN_FTR_SECTION
770 mfspr r5, SPRN_CIABR
771 mfspr r6, SPRN_DAWR
772 mfspr r7, SPRN_DAWRX
773 std r5, STACK_SLOT_CIABR(r1)
774 std r6, STACK_SLOT_DAWR(r1)
775 std r7, STACK_SLOT_DAWRX(r1)
776END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100777
Michael Neulingeee7ff92014-01-08 21:25:19 +1100778BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000779 /* Set partition DABR */
780 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
Paul Mackerras8563bf52014-01-08 21:25:29 +1100781 lwz r5,VCPU_DABRX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000782 ld r6,VCPU_DABR(r4)
783 mtspr SPRN_DABRX,r5
784 mtspr SPRN_DABR,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000785 isync
Michael Neulingeee7ff92014-01-08 21:25:19 +1100786END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000787
Michael Neulinge4e38122014-03-25 10:47:02 +1100788#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100789/*
790 * Branch around the call if both CPU_FTR_TM and
791 * CPU_FTR_P9_TM_HV_ASSIST are off.
792 */
Michael Neulinge4e38122014-03-25 10:47:02 +1100793BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100794 b 91f
795END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +1000796 /*
797 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
798 */
Paul Mackerrasf024ee02016-06-22 14:21:59 +1000799 bl kvmppc_restore_tm
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +110080091:
Michael Neulinge4e38122014-03-25 10:47:02 +1100801#endif
802
Paul Mackerrasde56a942011-06-29 00:21:34 +0000803 /* Load guest PMU registers */
804 /* R4 is live here (vcpu pointer) */
805 li r3, 1
806 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
807 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
808 isync
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000809BEGIN_FTR_SECTION
810 ld r3, VCPU_MMCR(r4)
811 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
812 cmpwi r5, MMCR0_PMAO
813 beql kvmppc_fix_pmao
814END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000815 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
816 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
817 lwz r6, VCPU_PMC + 8(r4)
818 lwz r7, VCPU_PMC + 12(r4)
819 lwz r8, VCPU_PMC + 16(r4)
820 lwz r9, VCPU_PMC + 20(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000821 mtspr SPRN_PMC1, r3
822 mtspr SPRN_PMC2, r5
823 mtspr SPRN_PMC3, r6
824 mtspr SPRN_PMC4, r7
825 mtspr SPRN_PMC5, r8
826 mtspr SPRN_PMC6, r9
Paul Mackerrasde56a942011-06-29 00:21:34 +0000827 ld r3, VCPU_MMCR(r4)
828 ld r5, VCPU_MMCR + 8(r4)
829 ld r6, VCPU_MMCR + 16(r4)
830 ld r7, VCPU_SIAR(r4)
831 ld r8, VCPU_SDAR(r4)
832 mtspr SPRN_MMCR1, r5
833 mtspr SPRN_MMCRA, r6
834 mtspr SPRN_SIAR, r7
835 mtspr SPRN_SDAR, r8
Michael Neulingb005255e2014-01-08 21:25:21 +1100836BEGIN_FTR_SECTION
837 ld r5, VCPU_MMCR + 24(r4)
838 ld r6, VCPU_SIER(r4)
Paul Mackerras83677f52016-11-16 22:33:27 +1100839 mtspr SPRN_MMCR2, r5
840 mtspr SPRN_SIER, r6
841BEGIN_FTR_SECTION_NESTED(96)
Michael Neulingb005255e2014-01-08 21:25:21 +1100842 lwz r7, VCPU_PMC + 24(r4)
843 lwz r8, VCPU_PMC + 28(r4)
844 ld r9, VCPU_MMCR + 32(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100845 mtspr SPRN_SPMC1, r7
846 mtspr SPRN_SPMC2, r8
847 mtspr SPRN_MMCRS, r9
Paul Mackerras83677f52016-11-16 22:33:27 +1100848END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
Michael Neulingb005255e2014-01-08 21:25:21 +1100849END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000850 mtspr SPRN_MMCR0, r3
851 isync
852
853 /* Load up FP, VMX and VSX registers */
854 bl kvmppc_load_fp
855
856 ld r14, VCPU_GPR(R14)(r4)
857 ld r15, VCPU_GPR(R15)(r4)
858 ld r16, VCPU_GPR(R16)(r4)
859 ld r17, VCPU_GPR(R17)(r4)
860 ld r18, VCPU_GPR(R18)(r4)
861 ld r19, VCPU_GPR(R19)(r4)
862 ld r20, VCPU_GPR(R20)(r4)
863 ld r21, VCPU_GPR(R21)(r4)
864 ld r22, VCPU_GPR(R22)(r4)
865 ld r23, VCPU_GPR(R23)(r4)
866 ld r24, VCPU_GPR(R24)(r4)
867 ld r25, VCPU_GPR(R25)(r4)
868 ld r26, VCPU_GPR(R26)(r4)
869 ld r27, VCPU_GPR(R27)(r4)
870 ld r28, VCPU_GPR(R28)(r4)
871 ld r29, VCPU_GPR(R29)(r4)
872 ld r30, VCPU_GPR(R30)(r4)
873 ld r31, VCPU_GPR(R31)(r4)
874
Paul Mackerrasde56a942011-06-29 00:21:34 +0000875 /* Switch DSCR to guest value */
876 ld r5, VCPU_DSCR(r4)
877 mtspr SPRN_DSCR, r5
Paul Mackerrasde56a942011-06-29 00:21:34 +0000878
Michael Neulingb005255e2014-01-08 21:25:21 +1100879BEGIN_FTR_SECTION
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100880 /* Skip next section on POWER7 */
Michael Neulingb005255e2014-01-08 21:25:21 +1100881 b 8f
882END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +1100883 /* Load up POWER8-specific registers */
884 ld r5, VCPU_IAMR(r4)
885 lwz r6, VCPU_PSPB(r4)
886 ld r7, VCPU_FSCR(r4)
887 mtspr SPRN_IAMR, r5
888 mtspr SPRN_PSPB, r6
889 mtspr SPRN_FSCR, r7
890 ld r5, VCPU_DAWR(r4)
891 ld r6, VCPU_DAWRX(r4)
892 ld r7, VCPU_CIABR(r4)
893 ld r8, VCPU_TAR(r4)
Michael Neulingb53221e2018-03-27 15:37:22 +1100894 /*
895 * Handle broken DAWR case by not writing it. This means we
896 * can still store the DAWR register for migration.
897 */
898BEGIN_FTR_SECTION
Michael Neulingb005255e2014-01-08 21:25:21 +1100899 mtspr SPRN_DAWR, r5
900 mtspr SPRN_DAWRX, r6
Michael Neulingb53221e2018-03-27 15:37:22 +1100901END_FTR_SECTION_IFSET(CPU_FTR_DAWR)
Michael Neulingb005255e2014-01-08 21:25:21 +1100902 mtspr SPRN_CIABR, r7
903 mtspr SPRN_TAR, r8
904 ld r5, VCPU_IC(r4)
Michael Neuling7b490412014-01-08 21:25:32 +1100905 ld r8, VCPU_EBBHR(r4)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000906 mtspr SPRN_IC, r5
Michael Neulingb005255e2014-01-08 21:25:21 +1100907 mtspr SPRN_EBBHR, r8
908 ld r5, VCPU_EBBRR(r4)
909 ld r6, VCPU_BESCR(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100910 lwz r7, VCPU_GUEST_PID(r4)
911 ld r8, VCPU_WORT(r4)
Paul Mackerras83677f52016-11-16 22:33:27 +1100912 mtspr SPRN_EBBRR, r5
913 mtspr SPRN_BESCR, r6
Michael Neulingb005255e2014-01-08 21:25:21 +1100914 mtspr SPRN_PID, r7
915 mtspr SPRN_WORT, r8
Paul Mackerras83677f52016-11-16 22:33:27 +1100916BEGIN_FTR_SECTION
Paul Mackerrasf11f6f72017-01-30 21:21:52 +1100917 PPC_INVALIDATE_ERAT
918END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
919BEGIN_FTR_SECTION
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100920 /* POWER8-only registers */
Paul Mackerras83677f52016-11-16 22:33:27 +1100921 ld r5, VCPU_TCSCR(r4)
922 ld r6, VCPU_ACOP(r4)
923 ld r7, VCPU_CSIGR(r4)
924 ld r8, VCPU_TACR(r4)
925 mtspr SPRN_TCSCR, r5
926 mtspr SPRN_ACOP, r6
927 mtspr SPRN_CSIGR, r7
928 mtspr SPRN_TACR, r8
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100929 nop
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100930FTR_SECTION_ELSE
931 /* POWER9-only registers */
932 ld r5, VCPU_TID(r4)
933 ld r6, VCPU_PSSCR(r4)
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100934 lbz r8, HSTATE_FAKE_SUSPEND(r13)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100935 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100936 rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
Paul Mackerras769377f2017-02-15 14:30:17 +1100937 ld r7, VCPU_HFSCR(r4)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100938 mtspr SPRN_TIDR, r5
939 mtspr SPRN_PSSCR, r6
Paul Mackerras769377f2017-02-15 14:30:17 +1100940 mtspr SPRN_HFSCR, r7
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100941ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Michael Neulingb005255e2014-01-08 21:25:21 +11009428:
943
Paul Mackerrasde56a942011-06-29 00:21:34 +0000944 ld r5, VCPU_SPRG0(r4)
945 ld r6, VCPU_SPRG1(r4)
946 ld r7, VCPU_SPRG2(r4)
947 ld r8, VCPU_SPRG3(r4)
948 mtspr SPRN_SPRG0, r5
949 mtspr SPRN_SPRG1, r6
950 mtspr SPRN_SPRG2, r7
951 mtspr SPRN_SPRG3, r8
952
Paul Mackerrasde56a942011-06-29 00:21:34 +0000953 /* Load up DAR and DSISR */
954 ld r5, VCPU_DAR(r4)
955 lwz r6, VCPU_DSISR(r4)
956 mtspr SPRN_DAR, r5
957 mtspr SPRN_DSISR, r6
958
Paul Mackerrasde56a942011-06-29 00:21:34 +0000959 /* Restore AMR and UAMOR, set AMOR to all 1s */
960 ld r5,VCPU_AMR(r4)
961 ld r6,VCPU_UAMOR(r4)
962 li r7,-1
963 mtspr SPRN_AMR,r5
964 mtspr SPRN_UAMOR,r6
965 mtspr SPRN_AMOR,r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000966
967 /* Restore state of CTRL run bit; assume 1 on entry */
968 lwz r5,VCPU_CTRL(r4)
969 andi. r5,r5,1
970 bne 4f
971 mfspr r6,SPRN_CTRLF
972 clrrdi r6,r6,1
973 mtspr SPRN_CTRLT,r6
9744:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100975 /* Secondary threads wait for primary to have done partition switch */
976 ld r5, HSTATE_KVM_VCORE(r13)
977 lbz r6, HSTATE_PTID(r13)
978 cmpwi r6, 0
979 beq 21f
980 lbz r0, VCORE_IN_GUEST(r5)
981 cmpwi r0, 0
982 bne 21f
983 HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +100098420: lwz r3, VCORE_ENTRY_EXIT(r5)
985 cmpwi r3, 0x100
986 bge no_switch_exit
987 lbz r0, VCORE_IN_GUEST(r5)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100988 cmpwi r0, 0
989 beq 20b
990 HMT_MEDIUM
99121:
992 /* Set LPCR. */
993 ld r8,VCORE_LPCR(r5)
994 mtspr SPRN_LPCR,r8
995 isync
996
Paul Mackerras57b8daa2018-04-20 22:51:11 +1000997 /*
998 * Set the decrementer to the guest decrementer.
999 */
1000 ld r8,VCPU_DEC_EXPIRES(r4)
1001 /* r8 is a host timebase value here, convert to guest TB */
1002 ld r5,HSTATE_KVM_VCORE(r13)
1003 ld r6,VCORE_TB_OFFSET_APPL(r5)
1004 add r8,r8,r6
1005 mftb r7
1006 subf r3,r7,r8
1007 mtspr SPRN_DEC,r3
1008
Paul Mackerras6af27c82015-03-28 14:21:10 +11001009 /* Check if HDEC expires soon */
1010 mfspr r3, SPRN_HDEC
Paul Mackerras2f272462017-05-22 16:25:14 +10001011 EXTEND_HDEC(r3)
1012 cmpdi r3, 512 /* 1 microsecond */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001013 blt hdec_soon
1014
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001015 /* For hash guest, clear out and reload the SLB */
1016 ld r6, VCPU_KVM(r4)
1017 lbz r0, KVM_RADIX(r6)
1018 cmpwi r0, 0
1019 bne 9f
1020 li r6, 0
1021 slbmte r6, r6
1022 slbia
1023 ptesync
1024
1025 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
1026 lwz r5,VCPU_SLB_MAX(r4)
1027 cmpwi r5,0
1028 beq 9f
1029 mtctr r5
1030 addi r6,r4,VCPU_SLB
10311: ld r8,VCPU_SLB_E(r6)
1032 ld r9,VCPU_SLB_V(r6)
1033 slbmte r9,r8
1034 addi r6,r6,VCPU_SLB_SIZE
1035 bdnz 1b
10369:
1037
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001038#ifdef CONFIG_KVM_XICS
1039 /* We are entering the guest on that thread, push VCPU to XIVE */
1040 ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
Andreas Schwab0bfa33c2017-08-15 14:37:01 +10001041 cmpldi cr0, r10, 0
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001042 beq no_xive
1043 ld r11, VCPU_XIVE_SAVED_STATE(r4)
1044 li r9, TM_QW1_OS
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001045 eieio
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001046 stdcix r11,r9,r10
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001047 lwz r11, VCPU_XIVE_CAM_WORD(r4)
1048 li r9, TM_QW1_OS + TM_WORD2
1049 stwcix r11,r9,r10
1050 li r9, 1
Benjamin Herrenschmidt35c24052018-01-12 13:37:15 +11001051 stb r9, VCPU_XIVE_PUSHED(r4)
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001052 eieio
Benjamin Herrenschmidt2267ea72018-01-12 13:37:13 +11001053
1054 /*
1055 * We clear the irq_pending flag. There is a small chance of a
1056 * race vs. the escalation interrupt happening on another
1057 * processor setting it again, but the only consequence is to
1058 * cause a spurrious wakeup on the next H_CEDE which is not an
1059 * issue.
1060 */
1061 li r0,0
1062 stb r0, VCPU_IRQ_PENDING(r4)
Benjamin Herrenschmidt9b9b13a2018-01-12 13:37:16 +11001063
1064 /*
1065 * In single escalation mode, if the escalation interrupt is
1066 * on, we mask it.
1067 */
1068 lbz r0, VCPU_XIVE_ESC_ON(r4)
1069 cmpwi r0,0
1070 beq 1f
1071 ld r10, VCPU_XIVE_ESC_RADDR(r4)
1072 li r9, XIVE_ESB_SET_PQ_01
1073 ldcix r0, r10, r9
1074 sync
1075
1076 /* We have a possible subtle race here: The escalation interrupt might
1077 * have fired and be on its way to the host queue while we mask it,
1078 * and if we unmask it early enough (re-cede right away), there is
1079 * a theorical possibility that it fires again, thus landing in the
1080 * target queue more than once which is a big no-no.
1081 *
1082 * Fortunately, solving this is rather easy. If the above load setting
1083 * PQ to 01 returns a previous value where P is set, then we know the
1084 * escalation interrupt is somewhere on its way to the host. In that
1085 * case we simply don't clear the xive_esc_on flag below. It will be
1086 * eventually cleared by the handler for the escalation interrupt.
1087 *
1088 * Then, when doing a cede, we check that flag again before re-enabling
1089 * the escalation interrupt, and if set, we abort the cede.
1090 */
1091 andi. r0, r0, XIVE_ESB_VAL_P
1092 bne- 1f
1093
1094 /* Now P is 0, we can clear the flag */
1095 li r0, 0
1096 stb r0, VCPU_XIVE_ESC_ON(r4)
10971:
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001098no_xive:
1099#endif /* CONFIG_KVM_XICS */
1100
Suresh Warrier37f55d32016-08-19 15:35:46 +10001101deliver_guest_interrupt:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001102 ld r6, VCPU_CTR(r4)
Sam bobroffc63517c2015-05-27 09:56:57 +10001103 ld r7, VCPU_XER(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001104
1105 mtctr r6
1106 mtxer r7
1107
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001108kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
Paul Mackerras4619ac82013-04-17 20:31:41 +00001109 ld r10, VCPU_PC(r4)
1110 ld r11, VCPU_MSR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001111 ld r6, VCPU_SRR0(r4)
1112 ld r7, VCPU_SRR1(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001113 mtspr SPRN_SRR0, r6
1114 mtspr SPRN_SRR1, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001115
Paul Mackerras4619ac82013-04-17 20:31:41 +00001116 /* r11 = vcpu->arch.msr & ~MSR_HV */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001117 rldicl r11, r11, 63 - MSR_HV_LG, 1
1118 rotldi r11, r11, 1 + MSR_HV_LG
1119 ori r11, r11, MSR_ME
1120
Paul Mackerras19ccb762011-07-23 17:42:46 +10001121 /* Check if we can deliver an external or decrementer interrupt now */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001122 ld r0, VCPU_PENDING_EXC(r4)
1123 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
1124 cmpdi cr1, r0, 0
1125 andi. r8, r11, MSR_EE
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001126 mfspr r8, SPRN_LPCR
1127 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
1128 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
1129 mtspr SPRN_LPCR, r8
Paul Mackerras19ccb762011-07-23 17:42:46 +10001130 isync
Paul Mackerras19ccb762011-07-23 17:42:46 +10001131 beq 5f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001132 li r0, BOOK3S_INTERRUPT_EXTERNAL
1133 bne cr1, 12f
1134 mfspr r0, SPRN_DEC
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10001135BEGIN_FTR_SECTION
1136 /* On POWER9 check whether the guest has large decrementer enabled */
1137 andis. r8, r8, LPCR_LD@h
1138 bne 15f
1139END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1140 extsw r0, r0
114115: cmpdi r0, 0
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001142 li r0, BOOK3S_INTERRUPT_DECREMENTER
1143 bge 5f
1144
114512: mtspr SPRN_SRR0, r10
Paul Mackerras19ccb762011-07-23 17:42:46 +10001146 mr r10,r0
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001147 mtspr SPRN_SRR1, r11
Michael Neulinge4e38122014-03-25 10:47:02 +11001148 mr r9, r4
1149 bl kvmppc_msr_interrupt
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +110011505:
Paul Mackerras57900692017-05-16 16:41:20 +10001151BEGIN_FTR_SECTION
1152 b fast_guest_return
1153END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1154 /* On POWER9, check for pending doorbell requests */
1155 lbz r0, VCPU_DBELL_REQ(r4)
1156 cmpwi r0, 0
1157 beq fast_guest_return
1158 ld r5, HSTATE_KVM_VCORE(r13)
1159 /* Set DPDES register so the CPU will take a doorbell interrupt */
1160 li r0, 1
1161 mtspr SPRN_DPDES, r0
1162 std r0, VCORE_DPDES(r5)
1163 /* Make sure other cpus see vcore->dpdes set before dbell req clear */
1164 lwsync
1165 /* Clear the pending doorbell request */
1166 li r0, 0
1167 stb r0, VCPU_DBELL_REQ(r4)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001168
Liu Ping Fan27025a62013-11-19 14:12:48 +08001169/*
1170 * Required state:
1171 * R4 = vcpu
1172 * R10: value for HSRR0
1173 * R11: value for HSRR1
1174 * R13 = PACA
1175 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001176fast_guest_return:
Paul Mackerras4619ac82013-04-17 20:31:41 +00001177 li r0,0
1178 stb r0,VCPU_CEDED(r4) /* cancel cede */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001179 mtspr SPRN_HSRR0,r10
1180 mtspr SPRN_HSRR1,r11
1181
1182 /* Activate guest mode, so faults get handled by KVM */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001183 li r9, KVM_GUEST_MODE_GUEST_HV
Paul Mackerrasde56a942011-06-29 00:21:34 +00001184 stb r9, HSTATE_IN_GUEST(r13)
1185
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001186#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1187 /* Accumulate timing */
1188 addi r3, r4, VCPU_TB_GUEST
1189 bl kvmhv_accumulate_time
1190#endif
1191
Paul Mackerrasde56a942011-06-29 00:21:34 +00001192 /* Enter guest */
1193
Paul Mackerras0acb9112013-02-04 18:10:51 +00001194BEGIN_FTR_SECTION
1195 ld r5, VCPU_CFAR(r4)
1196 mtspr SPRN_CFAR, r5
1197END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001198BEGIN_FTR_SECTION
1199 ld r0, VCPU_PPR(r4)
1200END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001201
Paul Mackerrasde56a942011-06-29 00:21:34 +00001202 ld r5, VCPU_LR(r4)
1203 lwz r6, VCPU_CR(r4)
1204 mtlr r5
1205 mtcr r6
1206
Michael Neulingc75df6f2012-06-25 13:33:10 +00001207 ld r1, VCPU_GPR(R1)(r4)
1208 ld r2, VCPU_GPR(R2)(r4)
1209 ld r3, VCPU_GPR(R3)(r4)
1210 ld r5, VCPU_GPR(R5)(r4)
1211 ld r6, VCPU_GPR(R6)(r4)
1212 ld r7, VCPU_GPR(R7)(r4)
1213 ld r8, VCPU_GPR(R8)(r4)
1214 ld r9, VCPU_GPR(R9)(r4)
1215 ld r10, VCPU_GPR(R10)(r4)
1216 ld r11, VCPU_GPR(R11)(r4)
1217 ld r12, VCPU_GPR(R12)(r4)
1218 ld r13, VCPU_GPR(R13)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001219
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001220BEGIN_FTR_SECTION
1221 mtspr SPRN_PPR, r0
1222END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Michael Neulinge001fa72017-09-15 15:26:14 +10001223
1224/* Move canary into DSISR to check for later */
1225BEGIN_FTR_SECTION
1226 li r0, 0x7fff
1227 mtspr SPRN_HDSISR, r0
1228END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1229
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001230 ld r0, VCPU_GPR(R0)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001231 ld r4, VCPU_GPR(R4)(r4)
Nicholas Piggin222f20f2018-01-10 03:07:15 +11001232 HRFI_TO_GUEST
Paul Mackerrasde56a942011-06-29 00:21:34 +00001233 b .
1234
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001235secondary_too_late:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001236 li r12, 0
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11001237 stw r12, STACK_SLOT_TRAP(r1)
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001238 cmpdi r4, 0
1239 beq 11f
Paul Mackerras6af27c82015-03-28 14:21:10 +11001240 stw r12, VCPU_TRAP(r4)
1241#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001242 addi r3, r4, VCPU_TB_RMEXIT
1243 bl kvmhv_accumulate_time
Paul Mackerras6af27c82015-03-28 14:21:10 +11001244#endif
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100124511: b kvmhv_switch_to_host
1246
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001247no_switch_exit:
1248 HMT_MEDIUM
1249 li r12, 0
1250 b 12f
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001251hdec_soon:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001252 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000125312: stw r12, VCPU_TRAP(r4)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001254 mr r9, r4
1255#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001256 addi r3, r4, VCPU_TB_RMEXIT
1257 bl kvmhv_accumulate_time
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001258#endif
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001259 b guest_bypass
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001260
Paul Mackerrasde56a942011-06-29 00:21:34 +00001261/******************************************************************************
1262 * *
1263 * Exit code *
1264 * *
1265 *****************************************************************************/
1266
1267/*
1268 * We come here from the first-level interrupt handlers.
1269 */
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301270 .globl kvmppc_interrupt_hv
1271kvmppc_interrupt_hv:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001272 /*
1273 * Register contents:
Nicholas Piggind3918e72016-12-22 04:29:25 +10001274 * R12 = (guest CR << 32) | interrupt vector
Paul Mackerrasde56a942011-06-29 00:21:34 +00001275 * R13 = PACA
Nicholas Piggind3918e72016-12-22 04:29:25 +10001276 * guest R12 saved in shadow VCPU SCRATCH0
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001277 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
Paul Mackerrasde56a942011-06-29 00:21:34 +00001278 * guest R13 saved in SPRN_SCRATCH0
1279 */
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001280 std r9, HSTATE_SCRATCH2(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +10001281 lbz r9, HSTATE_IN_GUEST(r13)
1282 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1283 beq kvmppc_bad_host_intr
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301284#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1285 cmpwi r9, KVM_GUEST_MODE_GUEST
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001286 ld r9, HSTATE_SCRATCH2(r13)
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301287 beq kvmppc_interrupt_pr
1288#endif
Paul Mackerras44a3add2013-10-04 21:45:04 +10001289 /* We're now back in the host but in guest MMU context */
1290 li r9, KVM_GUEST_MODE_HOST_HV
1291 stb r9, HSTATE_IN_GUEST(r13)
1292
Paul Mackerrasde56a942011-06-29 00:21:34 +00001293 ld r9, HSTATE_KVM_VCPU(r13)
1294
1295 /* Save registers */
1296
Michael Neulingc75df6f2012-06-25 13:33:10 +00001297 std r0, VCPU_GPR(R0)(r9)
1298 std r1, VCPU_GPR(R1)(r9)
1299 std r2, VCPU_GPR(R2)(r9)
1300 std r3, VCPU_GPR(R3)(r9)
1301 std r4, VCPU_GPR(R4)(r9)
1302 std r5, VCPU_GPR(R5)(r9)
1303 std r6, VCPU_GPR(R6)(r9)
1304 std r7, VCPU_GPR(R7)(r9)
1305 std r8, VCPU_GPR(R8)(r9)
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001306 ld r0, HSTATE_SCRATCH2(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001307 std r0, VCPU_GPR(R9)(r9)
1308 std r10, VCPU_GPR(R10)(r9)
1309 std r11, VCPU_GPR(R11)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001310 ld r3, HSTATE_SCRATCH0(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001311 std r3, VCPU_GPR(R12)(r9)
Nicholas Piggind3918e72016-12-22 04:29:25 +10001312 /* CR is in the high half of r12 */
1313 srdi r4, r12, 32
Paul Mackerrasde56a942011-06-29 00:21:34 +00001314 stw r4, VCPU_CR(r9)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001315BEGIN_FTR_SECTION
1316 ld r3, HSTATE_CFAR(r13)
1317 std r3, VCPU_CFAR(r9)
1318END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001319BEGIN_FTR_SECTION
1320 ld r4, HSTATE_PPR(r13)
1321 std r4, VCPU_PPR(r9)
1322END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001323
1324 /* Restore R1/R2 so we can handle faults */
1325 ld r1, HSTATE_HOST_R1(r13)
1326 ld r2, PACATOC(r13)
1327
1328 mfspr r10, SPRN_SRR0
1329 mfspr r11, SPRN_SRR1
1330 std r10, VCPU_SRR0(r9)
1331 std r11, VCPU_SRR1(r9)
Nicholas Piggind3918e72016-12-22 04:29:25 +10001332 /* trap is in the low half of r12, clear CR from the high half */
1333 clrldi r12, r12, 32
Paul Mackerrasde56a942011-06-29 00:21:34 +00001334 andi. r0, r12, 2 /* need to read HSRR0/1? */
1335 beq 1f
1336 mfspr r10, SPRN_HSRR0
1337 mfspr r11, SPRN_HSRR1
1338 clrrdi r12, r12, 2
13391: std r10, VCPU_PC(r9)
1340 std r11, VCPU_MSR(r9)
1341
1342 GET_SCRATCH0(r3)
1343 mflr r4
Michael Neulingc75df6f2012-06-25 13:33:10 +00001344 std r3, VCPU_GPR(R13)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001345 std r4, VCPU_LR(r9)
1346
Paul Mackerrasde56a942011-06-29 00:21:34 +00001347 stw r12,VCPU_TRAP(r9)
1348
Paul Mackerras8b24e692017-06-26 15:45:51 +10001349 /*
1350 * Now that we have saved away SRR0/1 and HSRR0/1,
1351 * interrupts are recoverable in principle, so set MSR_RI.
1352 * This becomes important for relocation-on interrupts from
1353 * the guest, which we can get in radix mode on POWER9.
1354 */
1355 li r0, MSR_RI
1356 mtmsrd r0, 1
1357
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001358#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1359 addi r3, r9, VCPU_TB_RMINTR
1360 mr r4, r9
1361 bl kvmhv_accumulate_time
1362 ld r5, VCPU_GPR(R5)(r9)
1363 ld r6, VCPU_GPR(R6)(r9)
1364 ld r7, VCPU_GPR(R7)(r9)
1365 ld r8, VCPU_GPR(R8)(r9)
1366#endif
1367
Paul Mackerras4a157d62014-12-03 13:30:39 +11001368 /* Save HEIR (HV emulation assist reg) in emul_inst
Paul Mackerras697d3892011-12-12 12:36:37 +00001369 if this is an HEI (HV emulation interrupt, e40) */
1370 li r3,KVM_INST_FETCH_FAILED
Paul Mackerras2bf27602015-03-20 20:39:40 +11001371 stw r3,VCPU_LAST_INST(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001372 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1373 bne 11f
1374 mfspr r3,SPRN_HEIR
Paul Mackerras4a157d62014-12-03 13:30:39 +1100137511: stw r3,VCPU_HEIR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001376
1377 /* these are volatile across C function calls */
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001378#ifdef CONFIG_RELOCATABLE
1379 ld r3, HSTATE_SCRATCH1(r13)
1380 mtctr r3
1381#else
Paul Mackerras697d3892011-12-12 12:36:37 +00001382 mfctr r3
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001383#endif
Paul Mackerras697d3892011-12-12 12:36:37 +00001384 mfxer r4
1385 std r3, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001386 std r4, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001387
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11001388#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1389 /* For softpatch interrupt, go off and do TM instruction emulation */
1390 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
1391 beq kvmppc_tm_emul
1392#endif
1393
Paul Mackerras697d3892011-12-12 12:36:37 +00001394 /* If this is a page table miss then see if it's theirs or ours */
1395 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1396 beq kvmppc_hdsi
Paul Mackerras342d3db2011-12-12 12:38:05 +00001397 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1398 beq kvmppc_hisi
Paul Mackerras697d3892011-12-12 12:36:37 +00001399
Paul Mackerrasde56a942011-06-29 00:21:34 +00001400 /* See if this is a leftover HDEC interrupt */
1401 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1402 bne 2f
1403 mfspr r3,SPRN_HDEC
Paul Mackerrasa4faf2e2017-08-25 19:52:12 +10001404 EXTEND_HDEC(r3)
1405 cmpdi r3,0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001406 mr r4,r9
1407 bge fast_guest_return
Paul Mackerrasde56a942011-06-29 00:21:34 +000014082:
Paul Mackerras697d3892011-12-12 12:36:37 +00001409 /* See if this is an hcall we can handle in real mode */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001410 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1411 beq hcall_try_real_mode
Paul Mackerrasde56a942011-06-29 00:21:34 +00001412
Paul Mackerras66feed62015-03-28 14:21:12 +11001413 /* Hypervisor doorbell - exit only if host IPI flag set */
1414 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1415 bne 3f
Nicholas Pigginbd0fdb12017-03-13 03:03:49 +10001416BEGIN_FTR_SECTION
1417 PPC_MSGSYNC
Nicholas Piggin2cde3712017-10-10 20:18:28 +10001418 lwsync
Nicholas Pigginbd0fdb12017-03-13 03:03:49 +10001419END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras66feed62015-03-28 14:21:12 +11001420 lbz r0, HSTATE_HOST_IPI(r13)
Gautham R. Shenoy06554d92015-08-07 17:41:20 +05301421 cmpwi r0, 0
Paul Mackerras66feed62015-03-28 14:21:12 +11001422 beq 4f
1423 b guest_exit_cont
14243:
Paul Mackerras769377f2017-02-15 14:30:17 +11001425 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1426 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1427 bne 14f
1428 mfspr r3, SPRN_HFSCR
1429 std r3, VCPU_HFSCR(r9)
1430 b guest_exit_cont
143114:
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001432 /* External interrupt ? */
1433 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001434 bne+ guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001435
1436 /* External interrupt, first check for host_ipi. If this is
1437 * set, we know the host wants us out so let's do it now
1438 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001439 bl kvmppc_read_intr
Suresh Warrier37f55d32016-08-19 15:35:46 +10001440
1441 /*
1442 * Restore the active volatile registers after returning from
1443 * a C function.
1444 */
1445 ld r9, HSTATE_KVM_VCPU(r13)
1446 li r12, BOOK3S_INTERRUPT_EXTERNAL
1447
1448 /*
1449 * kvmppc_read_intr return codes:
1450 *
1451 * Exit to host (r3 > 0)
1452 * 1 An interrupt is pending that needs to be handled by the host
1453 * Exit guest and return to host by branching to guest_exit_cont
1454 *
Suresh Warrierf7af5202016-08-19 15:35:52 +10001455 * 2 Passthrough that needs completion in the host
1456 * Exit guest and return to host by branching to guest_exit_cont
1457 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1458 * to indicate to the host to complete handling the interrupt
1459 *
Suresh Warrier37f55d32016-08-19 15:35:46 +10001460 * Before returning to guest, we check if any CPU is heading out
1461 * to the host and if so, we head out also. If no CPUs are heading
1462 * check return values <= 0.
1463 *
1464 * Return to guest (r3 <= 0)
1465 * 0 No external interrupt is pending
1466 * -1 A guest wakeup IPI (which has now been cleared)
1467 * In either case, we return to guest to deliver any pending
1468 * guest interrupts.
Suresh Warriere3c13e52016-08-19 15:35:51 +10001469 *
1470 * -2 A PCI passthrough external interrupt was handled
1471 * (interrupt was delivered directly to guest)
1472 * Return to guest to deliver any pending guest interrupts.
Suresh Warrier37f55d32016-08-19 15:35:46 +10001473 */
1474
Suresh Warrierf7af5202016-08-19 15:35:52 +10001475 cmpdi r3, 1
1476 ble 1f
1477
1478 /* Return code = 2 */
1479 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1480 stw r12, VCPU_TRAP(r9)
1481 b guest_exit_cont
1482
14831: /* Return code <= 1 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001484 cmpdi r3, 0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001485 bgt guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001486
Suresh Warrier37f55d32016-08-19 15:35:46 +10001487 /* Return code <= 0 */
Paul Mackerras66feed62015-03-28 14:21:12 +110014884: ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras4619ac82013-04-17 20:31:41 +00001489 lwz r0, VCORE_ENTRY_EXIT(r5)
1490 cmpwi r0, 0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001491 mr r4, r9
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001492 blt deliver_guest_interrupt
Paul Mackerrasde56a942011-06-29 00:21:34 +00001493
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001494guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
Paul Mackerras43ff3f62018-01-11 14:31:43 +11001495 /* Save more register state */
1496 mfdar r6
1497 mfdsisr r7
1498 std r6, VCPU_DAR(r9)
1499 stw r7, VCPU_DSISR(r9)
1500 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1501 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1502 beq mc_cont
1503 std r6, VCPU_FAULT_DAR(r9)
1504 stw r7, VCPU_FAULT_DSISR(r9)
1505
1506 /* See if it is a machine check */
1507 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1508 beq machine_check_realmode
1509mc_cont:
1510#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1511 addi r3, r9, VCPU_TB_RMEXIT
1512 mr r4, r9
1513 bl kvmhv_accumulate_time
1514#endif
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001515#ifdef CONFIG_KVM_XICS
1516 /* We are exiting, pull the VP from the XIVE */
Benjamin Herrenschmidt35c24052018-01-12 13:37:15 +11001517 lbz r0, VCPU_XIVE_PUSHED(r9)
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001518 cmpwi cr0, r0, 0
1519 beq 1f
1520 li r7, TM_SPC_PULL_OS_CTX
1521 li r6, TM_QW1_OS
1522 mfmsr r0
Benjamin Herrenschmidt2662efd2018-01-12 13:37:14 +11001523 andi. r0, r0, MSR_DR /* in real mode? */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001524 beq 2f
1525 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1526 cmpldi cr0, r10, 0
1527 beq 1f
1528 /* First load to pull the context, we ignore the value */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001529 eieio
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001530 lwzx r11, r7, r10
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001531 /* Second load to recover the context state (Words 0 and 1) */
1532 ldx r11, r6, r10
1533 b 3f
15342: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1535 cmpldi cr0, r10, 0
1536 beq 1f
1537 /* First load to pull the context, we ignore the value */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001538 eieio
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001539 lwzcix r11, r7, r10
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001540 /* Second load to recover the context state (Words 0 and 1) */
1541 ldcix r11, r6, r10
15423: std r11, VCPU_XIVE_SAVED_STATE(r9)
1543 /* Fixup some of the state for the next load */
1544 li r10, 0
1545 li r0, 0xff
Benjamin Herrenschmidt35c24052018-01-12 13:37:15 +11001546 stb r10, VCPU_XIVE_PUSHED(r9)
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001547 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1548 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001549 eieio
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100015501:
1551#endif /* CONFIG_KVM_XICS */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001552
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001553 /* For hash guest, read the guest SLB and save it away */
1554 ld r5, VCPU_KVM(r9)
1555 lbz r0, KVM_RADIX(r5)
1556 li r5, 0
1557 cmpwi r0, 0
1558 bne 3f /* for radix, save 0 entries */
1559 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1560 mtctr r0
1561 li r6,0
1562 addi r7,r9,VCPU_SLB
15631: slbmfee r8,r6
1564 andis. r0,r8,SLB_ESID_V@h
1565 beq 2f
1566 add r8,r8,r6 /* put index in */
1567 slbmfev r3,r6
1568 std r8,VCPU_SLB_E(r7)
1569 std r3,VCPU_SLB_V(r7)
1570 addi r7,r7,VCPU_SLB_SIZE
1571 addi r5,r5,1
15722: addi r6,r6,1
1573 bdnz 1b
1574 /* Finally clear out the SLB */
1575 li r0,0
1576 slbmte r0,r0
1577 slbia
1578 ptesync
15793: stw r5,VCPU_SLB_MAX(r9)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001580
Paul Mackerrascda4a142018-03-22 09:48:54 +11001581 /* load host SLB entries */
1582BEGIN_MMU_FTR_SECTION
1583 b 0f
1584END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1585 ld r8,PACA_SLBSHADOWPTR(r13)
1586
1587 .rept SLB_NUM_BOLTED
1588 li r3, SLBSHADOW_SAVEAREA
1589 LDX_BE r5, r8, r3
1590 addi r3, r3, 8
1591 LDX_BE r6, r8, r3
1592 andis. r7,r5,SLB_ESID_V@h
1593 beq 1f
1594 slbmte r6,r5
15951: addi r8,r8,16
1596 .endr
15970:
1598
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001599guest_bypass:
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11001600 stw r12, STACK_SLOT_TRAP(r1)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10001601
1602 /* Save DEC */
1603 /* Do this before kvmhv_commence_exit so we know TB is guest TB */
1604 ld r3, HSTATE_KVM_VCORE(r13)
1605 mfspr r5,SPRN_DEC
1606 mftb r6
1607 /* On P9, if the guest has large decr enabled, don't sign extend */
1608BEGIN_FTR_SECTION
1609 ld r4, VCORE_LPCR(r3)
1610 andis. r4, r4, LPCR_LD@h
1611 bne 16f
1612END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1613 extsw r5,r5
161416: add r5,r5,r6
1615 /* r5 is a guest timebase value here, convert to host TB */
1616 ld r4,VCORE_TB_OFFSET_APPL(r3)
1617 subf r5,r4,r5
1618 std r5,VCPU_DEC_EXPIRES(r9)
1619
Paul Mackerras6af27c82015-03-28 14:21:10 +11001620 /* Increment exit count, poke other threads to exit */
Paul Mackerras57b8daa2018-04-20 22:51:11 +10001621 mr r3, r12
Paul Mackerras6af27c82015-03-28 14:21:10 +11001622 bl kvmhv_commence_exit
Paul Mackerraseddb60f2015-03-28 14:21:11 +11001623 nop
1624 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001625
Paul Mackerrasec257162015-06-24 21:18:03 +10001626 /* Stop others sending VCPU interrupts to this physical CPU */
1627 li r0, -1
1628 stw r0, VCPU_CPU(r9)
1629 stw r0, VCPU_THREAD_CPU(r9)
1630
Paul Mackerrasde56a942011-06-29 00:21:34 +00001631 /* Save guest CTRL register, set runlatch to 1 */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001632 mfspr r6,SPRN_CTRLF
Paul Mackerrasde56a942011-06-29 00:21:34 +00001633 stw r6,VCPU_CTRL(r9)
1634 andi. r0,r6,1
1635 bne 4f
1636 ori r6,r6,1
1637 mtspr SPRN_CTRLT,r6
16384:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001639 /*
1640 * Save the guest PURR/SPURR
1641 */
1642 mfspr r5,SPRN_PURR
1643 mfspr r6,SPRN_SPURR
1644 ld r7,VCPU_PURR(r9)
1645 ld r8,VCPU_SPURR(r9)
1646 std r5,VCPU_PURR(r9)
1647 std r6,VCPU_SPURR(r9)
1648 subf r5,r7,r5
1649 subf r6,r8,r6
1650
1651 /*
1652 * Restore host PURR/SPURR and add guest times
1653 * so that the time in the guest gets accounted.
1654 */
1655 ld r3,HSTATE_PURR(r13)
1656 ld r4,HSTATE_SPURR(r13)
1657 add r3,r3,r5
1658 add r4,r4,r6
1659 mtspr SPRN_PURR,r3
1660 mtspr SPRN_SPURR,r4
1661
Michael Neulingb005255e2014-01-08 21:25:21 +11001662BEGIN_FTR_SECTION
1663 b 8f
1664END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +11001665 /* Save POWER8-specific registers */
1666 mfspr r5, SPRN_IAMR
1667 mfspr r6, SPRN_PSPB
1668 mfspr r7, SPRN_FSCR
1669 std r5, VCPU_IAMR(r9)
1670 stw r6, VCPU_PSPB(r9)
1671 std r7, VCPU_FSCR(r9)
1672 mfspr r5, SPRN_IC
Michael Neulingb005255e2014-01-08 21:25:21 +11001673 mfspr r7, SPRN_TAR
1674 std r5, VCPU_IC(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001675 std r7, VCPU_TAR(r9)
Michael Neuling7b490412014-01-08 21:25:32 +11001676 mfspr r8, SPRN_EBBHR
Michael Neulingb005255e2014-01-08 21:25:21 +11001677 std r8, VCPU_EBBHR(r9)
1678 mfspr r5, SPRN_EBBRR
1679 mfspr r6, SPRN_BESCR
Michael Neulingb005255e2014-01-08 21:25:21 +11001680 mfspr r7, SPRN_PID
1681 mfspr r8, SPRN_WORT
Paul Mackerras83677f52016-11-16 22:33:27 +11001682 std r5, VCPU_EBBRR(r9)
1683 std r6, VCPU_BESCR(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001684 stw r7, VCPU_GUEST_PID(r9)
1685 std r8, VCPU_WORT(r9)
Paul Mackerras83677f52016-11-16 22:33:27 +11001686BEGIN_FTR_SECTION
1687 mfspr r5, SPRN_TCSCR
1688 mfspr r6, SPRN_ACOP
1689 mfspr r7, SPRN_CSIGR
1690 mfspr r8, SPRN_TACR
1691 std r5, VCPU_TCSCR(r9)
1692 std r6, VCPU_ACOP(r9)
1693 std r7, VCPU_CSIGR(r9)
1694 std r8, VCPU_TACR(r9)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001695FTR_SECTION_ELSE
1696 mfspr r5, SPRN_TIDR
1697 mfspr r6, SPRN_PSSCR
1698 std r5, VCPU_TID(r9)
1699 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1700 rotldi r6, r6, 60
1701 std r6, VCPU_PSSCR(r9)
Paul Mackerras769377f2017-02-15 14:30:17 +11001702 /* Restore host HFSCR value */
1703 ld r7, STACK_SLOT_HFSCR(r1)
1704 mtspr SPRN_HFSCR, r7
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001705ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasccec4452016-03-05 19:34:39 +11001706 /*
1707 * Restore various registers to 0, where non-zero values
1708 * set by the guest could disrupt the host.
1709 */
1710 li r0, 0
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001711 mtspr SPRN_PSPB, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001712 mtspr SPRN_WORT, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001713BEGIN_FTR_SECTION
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001714 mtspr SPRN_IAMR, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001715 mtspr SPRN_TCSCR, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001716 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1717 li r0, 1
1718 sldi r0, r0, 31
1719 mtspr SPRN_MMCRS, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001720END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Michael Neulingb005255e2014-01-08 21:25:21 +110017218:
1722
Paul Mackerrasde56a942011-06-29 00:21:34 +00001723 /* Save and reset AMR and UAMOR before turning on the MMU */
1724 mfspr r5,SPRN_AMR
1725 mfspr r6,SPRN_UAMOR
1726 std r5,VCPU_AMR(r9)
1727 std r6,VCPU_UAMOR(r9)
1728 li r6,0
1729 mtspr SPRN_AMR,r6
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001730 mtspr SPRN_UAMOR, r6
Paul Mackerrasde56a942011-06-29 00:21:34 +00001731
Paul Mackerrasde56a942011-06-29 00:21:34 +00001732 /* Switch DSCR back to host value */
1733 mfspr r8, SPRN_DSCR
1734 ld r7, HSTATE_DSCR(r13)
Paul Mackerrascfc86022013-09-21 09:53:28 +10001735 std r8, VCPU_DSCR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001736 mtspr SPRN_DSCR, r7
1737
1738 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001739 std r14, VCPU_GPR(R14)(r9)
1740 std r15, VCPU_GPR(R15)(r9)
1741 std r16, VCPU_GPR(R16)(r9)
1742 std r17, VCPU_GPR(R17)(r9)
1743 std r18, VCPU_GPR(R18)(r9)
1744 std r19, VCPU_GPR(R19)(r9)
1745 std r20, VCPU_GPR(R20)(r9)
1746 std r21, VCPU_GPR(R21)(r9)
1747 std r22, VCPU_GPR(R22)(r9)
1748 std r23, VCPU_GPR(R23)(r9)
1749 std r24, VCPU_GPR(R24)(r9)
1750 std r25, VCPU_GPR(R25)(r9)
1751 std r26, VCPU_GPR(R26)(r9)
1752 std r27, VCPU_GPR(R27)(r9)
1753 std r28, VCPU_GPR(R28)(r9)
1754 std r29, VCPU_GPR(R29)(r9)
1755 std r30, VCPU_GPR(R30)(r9)
1756 std r31, VCPU_GPR(R31)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001757
1758 /* Save SPRGs */
1759 mfspr r3, SPRN_SPRG0
1760 mfspr r4, SPRN_SPRG1
1761 mfspr r5, SPRN_SPRG2
1762 mfspr r6, SPRN_SPRG3
1763 std r3, VCPU_SPRG0(r9)
1764 std r4, VCPU_SPRG1(r9)
1765 std r5, VCPU_SPRG2(r9)
1766 std r6, VCPU_SPRG3(r9)
1767
Paul Mackerras89436332012-03-02 01:38:23 +00001768 /* save FP state */
1769 mr r3, r9
Paul Mackerras595e4f72013-10-15 20:43:04 +11001770 bl kvmppc_save_fp
Paul Mackerras89436332012-03-02 01:38:23 +00001771
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001772#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11001773/*
1774 * Branch around the call if both CPU_FTR_TM and
1775 * CPU_FTR_P9_TM_HV_ASSIST are off.
1776 */
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001777BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11001778 b 91f
1779END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10001780 /*
1781 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
1782 */
Paul Mackerrasf024ee02016-06-22 14:21:59 +10001783 bl kvmppc_save_tm
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100178491:
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001785#endif
1786
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001787 /* Increment yield count if they have a VPA */
1788 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1789 cmpdi r8, 0
1790 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +02001791 li r4, LPPACA_YIELDCOUNT
1792 LWZX_BE r3, r8, r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001793 addi r3, r3, 1
Alexander Graf0865a582014-06-11 10:36:17 +02001794 STWX_BE r3, r8, r4
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001795 li r3, 1
1796 stb r3, VCPU_VPA_DIRTY(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000179725:
1798 /* Save PMU registers if requested */
1799 /* r8 and cr0.eq are live here */
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001800BEGIN_FTR_SECTION
1801 /*
1802 * POWER8 seems to have a hardware bug where setting
1803 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1804 * when some counters are already negative doesn't seem
1805 * to cause a performance monitor alert (and hence interrupt).
1806 * The effect of this is that when saving the PMU state,
1807 * if there is no PMU alert pending when we read MMCR0
1808 * before freezing the counters, but one becomes pending
1809 * before we read the counters, we lose it.
1810 * To work around this, we need a way to freeze the counters
1811 * before reading MMCR0. Normally, freezing the counters
1812 * is done by writing MMCR0 (to set MMCR0[FC]) which
1813 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1814 * we can also freeze the counters using MMCR2, by writing
1815 * 1s to all the counter freeze condition bits (there are
1816 * 9 bits each for 6 counters).
1817 */
1818 li r3, -1 /* set all freeze bits */
1819 clrrdi r3, r3, 10
1820 mfspr r10, SPRN_MMCR2
1821 mtspr SPRN_MMCR2, r3
1822 isync
1823END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001824 li r3, 1
1825 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1826 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1827 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
Paul Mackerras89436332012-03-02 01:38:23 +00001828 mfspr r6, SPRN_MMCRA
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001829 /* Clear MMCRA in order to disable SDAR updates */
Paul Mackerras89436332012-03-02 01:38:23 +00001830 li r7, 0
1831 mtspr SPRN_MMCRA, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001832 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001833 beq 21f /* if no VPA, save PMU stuff anyway */
1834 lbz r7, LPPACA_PMCINUSE(r8)
1835 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1836 bne 21f
1837 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1838 b 22f
183921: mfspr r5, SPRN_MMCR1
Paul Mackerras14941782013-09-06 13:11:18 +10001840 mfspr r7, SPRN_SIAR
1841 mfspr r8, SPRN_SDAR
Paul Mackerrasde56a942011-06-29 00:21:34 +00001842 std r4, VCPU_MMCR(r9)
1843 std r5, VCPU_MMCR + 8(r9)
1844 std r6, VCPU_MMCR + 16(r9)
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001845BEGIN_FTR_SECTION
1846 std r10, VCPU_MMCR + 24(r9)
1847END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras14941782013-09-06 13:11:18 +10001848 std r7, VCPU_SIAR(r9)
1849 std r8, VCPU_SDAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001850 mfspr r3, SPRN_PMC1
1851 mfspr r4, SPRN_PMC2
1852 mfspr r5, SPRN_PMC3
1853 mfspr r6, SPRN_PMC4
1854 mfspr r7, SPRN_PMC5
1855 mfspr r8, SPRN_PMC6
1856 stw r3, VCPU_PMC(r9)
1857 stw r4, VCPU_PMC + 4(r9)
1858 stw r5, VCPU_PMC + 8(r9)
1859 stw r6, VCPU_PMC + 12(r9)
1860 stw r7, VCPU_PMC + 16(r9)
1861 stw r8, VCPU_PMC + 20(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001862BEGIN_FTR_SECTION
Michael Neulingb005255e2014-01-08 21:25:21 +11001863 mfspr r5, SPRN_SIER
Paul Mackerras83677f52016-11-16 22:33:27 +11001864 std r5, VCPU_SIER(r9)
1865BEGIN_FTR_SECTION_NESTED(96)
Michael Neulingb005255e2014-01-08 21:25:21 +11001866 mfspr r6, SPRN_SPMC1
1867 mfspr r7, SPRN_SPMC2
1868 mfspr r8, SPRN_MMCRS
Michael Neulingb005255e2014-01-08 21:25:21 +11001869 stw r6, VCPU_PMC + 24(r9)
1870 stw r7, VCPU_PMC + 28(r9)
1871 std r8, VCPU_MMCR + 32(r9)
1872 lis r4, 0x8000
1873 mtspr SPRN_MMCRS, r4
Paul Mackerras83677f52016-11-16 22:33:27 +11001874END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
Michael Neulingb005255e2014-01-08 21:25:21 +11001875END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000187622:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001877
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001878 /* Restore host values of some registers */
1879BEGIN_FTR_SECTION
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10001880 ld r5, STACK_SLOT_CIABR(r1)
1881 ld r6, STACK_SLOT_DAWR(r1)
1882 ld r7, STACK_SLOT_DAWRX(r1)
1883 mtspr SPRN_CIABR, r5
Michael Neulingb53221e2018-03-27 15:37:22 +11001884 /*
1885 * If the DAWR doesn't work, it's ok to write these here as
1886 * this value should always be zero
1887 */
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10001888 mtspr SPRN_DAWR, r6
1889 mtspr SPRN_DAWRX, r7
1890END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1891BEGIN_FTR_SECTION
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001892 ld r5, STACK_SLOT_TID(r1)
1893 ld r6, STACK_SLOT_PSSCR(r1)
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001894 ld r7, STACK_SLOT_PID(r1)
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001895 ld r8, STACK_SLOT_IAMR(r1)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001896 mtspr SPRN_TIDR, r5
1897 mtspr SPRN_PSSCR, r6
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001898 mtspr SPRN_PID, r7
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001899 mtspr SPRN_IAMR, r8
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001900END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001901
1902#ifdef CONFIG_PPC_RADIX_MMU
1903 /*
1904 * Are we running hash or radix ?
1905 */
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10001906 ld r5, VCPU_KVM(r9)
1907 lbz r0, KVM_RADIX(r5)
1908 cmpwi cr2, r0, 0
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001909 beq cr2, 4f
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001910
Paul Mackerrasdf158182018-05-17 14:47:59 +10001911 /*
1912 * Radix: do eieio; tlbsync; ptesync sequence in case we
1913 * interrupted the guest between a tlbie and a ptesync.
1914 */
1915 eieio
1916 tlbsync
1917 ptesync
1918
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001919 /* Radix: Handle the case where the guest used an illegal PID */
1920 LOAD_REG_ADDR(r4, mmu_base_pid)
1921 lwz r3, VCPU_GUEST_PID(r9)
1922 lwz r5, 0(r4)
1923 cmpw cr0,r3,r5
1924 blt 2f
1925
1926 /*
1927 * Illegal PID, the HW might have prefetched and cached in the TLB
1928 * some translations for the LPID 0 / guest PID combination which
1929 * Linux doesn't know about, so we need to flush that PID out of
1930 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
1931 * the right context.
1932 */
1933 li r0,0
1934 mtspr SPRN_LPID,r0
1935 isync
1936
1937 /* Then do a congruence class local flush */
1938 ld r6,VCPU_KVM(r9)
1939 lwz r0,KVM_TLB_SETS(r6)
1940 mtctr r0
1941 li r7,0x400 /* IS field = 0b01 */
1942 ptesync
1943 sldi r0,r3,32 /* RS has PID */
19441: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
1945 addi r7,r7,0x1000
1946 bdnz 1b
1947 ptesync
1948
19492: /* Flush the ERAT on radix P9 DD1 guest exit */
Paul Mackerrasf11f6f72017-01-30 21:21:52 +11001950BEGIN_FTR_SECTION
1951 PPC_INVALIDATE_ERAT
1952END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
Paul Mackerras6964e6a2018-01-11 14:51:02 +110019534:
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001954#endif /* CONFIG_PPC_RADIX_MMU */
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001955
Paul Mackerrasde56a942011-06-29 00:21:34 +00001956 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001957 * POWER7/POWER8 guest -> host partition switch code.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001958 * We don't have to lock against tlbies but we do
1959 * have to coordinate the hardware threads.
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11001960 * Here STACK_SLOT_TRAP(r1) contains the trap number.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001961 */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001962kvmhv_switch_to_host:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001963 /* Secondary threads wait for primary to do partition switch */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001964 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001965 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1966 lbz r3,HSTATE_PTID(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001967 cmpwi r3,0
1968 beq 15f
1969 HMT_LOW
197013: lbz r3,VCORE_IN_GUEST(r5)
1971 cmpwi r3,0
1972 bne 13b
1973 HMT_MEDIUM
1974 b 16f
1975
1976 /* Primary thread waits for all the secondaries to exit guest */
197715: lwz r3,VCORE_ENTRY_EXIT(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001978 rlwinm r0,r3,32-8,0xff
Paul Mackerrasde56a942011-06-29 00:21:34 +00001979 clrldi r3,r3,56
1980 cmpw r3,r0
1981 bne 15b
1982 isync
1983
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001984 /* Did we actually switch to the guest at all? */
1985 lbz r6, VCORE_IN_GUEST(r5)
1986 cmpwi r6, 0
1987 beq 19f
1988
Paul Mackerrasde56a942011-06-29 00:21:34 +00001989 /* Primary thread switches back to host partition */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001990 lwz r7,KVM_HOST_LPID(r4)
Paul Mackerras7a840842016-11-16 22:25:20 +11001991BEGIN_FTR_SECTION
1992 ld r6,KVM_HOST_SDR1(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001993 li r8,LPID_RSVD /* switch to reserved LPID */
1994 mtspr SPRN_LPID,r8
1995 ptesync
Paul Mackerras7a840842016-11-16 22:25:20 +11001996 mtspr SPRN_SDR1,r6 /* switch to host page table */
1997END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001998 mtspr SPRN_LPID,r7
1999 isync
2000
Michael Neulingb005255e2014-01-08 21:25:21 +11002001BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +10002002 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +11002003 mfspr r7, SPRN_DPDES
Paul Mackerras88b02cf92016-09-15 13:42:52 +10002004 mfspr r8, SPRN_VTB
Michael Neulingb005255e2014-01-08 21:25:21 +11002005 std r7, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +10002006 std r8, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +11002007 /* clear DPDES so we don't get guest doorbells in the host */
2008 li r8, 0
2009 mtspr SPRN_DPDES, r8
2010END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2011
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302012 /* If HMI, call kvmppc_realmode_hmi_handler() */
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11002013 lwz r12, STACK_SLOT_TRAP(r1)
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302014 cmpwi r12, BOOK3S_INTERRUPT_HMI
2015 bne 27f
2016 bl kvmppc_realmode_hmi_handler
2017 nop
Paul Mackerrasd0757452018-01-17 20:51:13 +11002018 cmpdi r3, 0
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302019 /*
Paul Mackerrasd0757452018-01-17 20:51:13 +11002020 * At this point kvmppc_realmode_hmi_handler may have resync-ed
2021 * the TB, and if it has, we must not subtract the guest timebase
2022 * offset from the timebase. So, skip it.
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302023 *
2024 * Also, do not call kvmppc_subcore_exit_guest() because it has
2025 * been invoked as part of kvmppc_realmode_hmi_handler().
2026 */
Paul Mackerrasd0757452018-01-17 20:51:13 +11002027 beq 30f
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302028
202927:
Paul Mackerrasde56a942011-06-29 00:21:34 +00002030 /* Subtract timebase offset from timebase */
Paul Mackerras57b8daa2018-04-20 22:51:11 +10002031 ld r8, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002032 cmpdi r8,0
2033 beq 17f
Paul Mackerras57b8daa2018-04-20 22:51:11 +10002034 li r0, 0
2035 std r0, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11002036 mftb r6 /* current guest timebase */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002037 subf r8,r8,r6
2038 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
2039 mftb r7 /* check if lower 24 bits overflowed */
2040 clrldi r6,r6,40
2041 clrldi r7,r7,40
2042 cmpld r7,r6
2043 bge 17f
2044 addis r8,r8,0x100 /* if so, increment upper 40 bits */
2045 mtspr SPRN_TBU40,r8
2046
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530204717: bl kvmppc_subcore_exit_guest
2048 nop
204930: ld r5,HSTATE_KVM_VCORE(r13)
2050 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
2051
Paul Mackerrasde56a942011-06-29 00:21:34 +00002052 /* Reset PCR */
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302053 ld r0, VCORE_PCR(r5)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002054 cmpdi r0, 0
2055 beq 18f
2056 li r0, 0
2057 mtspr SPRN_PCR, r0
205818:
2059 /* Signal secondary CPUs to continue */
2060 stb r0,VCORE_IN_GUEST(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000206119: lis r8,0x7fff /* MAX_INT@h */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002062 mtspr SPRN_HDEC,r8
2063
Paul Mackerrasc0101502017-10-19 14:11:23 +1100206416:
2065BEGIN_FTR_SECTION
2066 /* On POWER9 with HPT-on-radix we need to wait for all other threads */
2067 ld r3, HSTATE_SPLIT_MODE(r13)
2068 cmpdi r3, 0
2069 beq 47f
2070 lwz r8, KVM_SPLIT_DO_RESTORE(r3)
2071 cmpwi r8, 0
2072 beq 47f
Paul Mackerrasc0101502017-10-19 14:11:23 +11002073 bl kvmhv_p9_restore_lpcr
2074 nop
Paul Mackerrasc0101502017-10-19 14:11:23 +11002075 b 48f
207647:
2077END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2078 ld r8,KVM_HOST_LPCR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002079 mtspr SPRN_LPCR,r8
2080 isync
Paul Mackerrasc0101502017-10-19 14:11:23 +1100208148:
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002082#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2083 /* Finish timing, if we have a vcpu */
2084 ld r4, HSTATE_KVM_VCPU(r13)
2085 cmpdi r4, 0
2086 li r3, 0
2087 beq 2f
2088 bl kvmhv_accumulate_time
20892:
2090#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +00002091 /* Unset guest mode */
2092 li r0, KVM_GUEST_MODE_NONE
2093 stb r0, HSTATE_IN_GUEST(r13)
2094
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11002095 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10002096 ld r0, SFS+PPC_LR_STKOFF(r1)
2097 addi r1, r1, SFS
Paul Mackerras218309b2013-09-06 13:23:44 +10002098 mtlr r0
2099 blr
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002100
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002101#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2102/*
2103 * Softpatch interrupt for transactional memory emulation cases
2104 * on POWER9 DD2.2. This is early in the guest exit path - we
2105 * haven't saved registers or done a treclaim yet.
2106 */
2107kvmppc_tm_emul:
2108 /* Save instruction image in HEIR */
2109 mfspr r3, SPRN_HEIR
2110 stw r3, VCPU_HEIR(r9)
2111
2112 /*
2113 * The cases we want to handle here are those where the guest
2114 * is in real suspend mode and is trying to transition to
2115 * transactional mode.
2116 */
2117 lbz r0, HSTATE_FAKE_SUSPEND(r13)
2118 cmpwi r0, 0 /* keep exiting guest if in fake suspend */
2119 bne guest_exit_cont
2120 rldicl r3, r11, 64 - MSR_TS_S_LG, 62
2121 cmpwi r3, 1 /* or if not in suspend state */
2122 bne guest_exit_cont
2123
2124 /* Call C code to do the emulation */
2125 mr r3, r9
2126 bl kvmhv_p9_tm_emulation_early
2127 nop
2128 ld r9, HSTATE_KVM_VCPU(r13)
2129 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
2130 cmpwi r3, 0
2131 beq guest_exit_cont /* continue exiting if not handled */
2132 ld r10, VCPU_PC(r9)
2133 ld r11, VCPU_MSR(r9)
2134 b fast_interrupt_c_return /* go back to guest if handled */
2135#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2136
Paul Mackerras697d3892011-12-12 12:36:37 +00002137/*
2138 * Check whether an HDSI is an HPTE not found fault or something else.
2139 * If it is an HPTE not found fault that is due to the guest accessing
2140 * a page that they have mapped but which we have paged out, then
2141 * we continue on with the guest exit path. In all other cases,
2142 * reflect the HDSI to the guest as a DSI.
2143 */
2144kvmppc_hdsi:
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002145 ld r3, VCPU_KVM(r9)
2146 lbz r0, KVM_RADIX(r3)
Paul Mackerras697d3892011-12-12 12:36:37 +00002147 mfspr r4, SPRN_HDAR
2148 mfspr r6, SPRN_HDSISR
Michael Neulinge001fa72017-09-15 15:26:14 +10002149BEGIN_FTR_SECTION
2150 /* Look for DSISR canary. If we find it, retry instruction */
2151 cmpdi r6, 0x7fff
2152 beq 6f
2153END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2154 cmpwi r0, 0
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002155 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
Paul Mackerras4cf302b2011-12-12 12:38:51 +00002156 /* HPTE not found fault or protection fault? */
2157 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
Paul Mackerras697d3892011-12-12 12:36:37 +00002158 beq 1f /* if not, send it to the guest */
Paul Mackerras4e5acdc2017-02-28 11:05:47 +11002159 andi. r0, r11, MSR_DR /* data relocation enabled? */
2160 beq 3f
Paul Mackerrasef8c6402017-01-30 21:21:43 +11002161BEGIN_FTR_SECTION
2162 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2163 b 4f
2164END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras697d3892011-12-12 12:36:37 +00002165 clrrdi r0, r4, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00002166 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11002167 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
2168 bne 7f /* if no SLB entry found */
Paul Mackerras697d3892011-12-12 12:36:37 +000021694: std r4, VCPU_FAULT_DAR(r9)
2170 stw r6, VCPU_FAULT_DSISR(r9)
2171
2172 /* Search the hash table. */
2173 mr r3, r9 /* vcpu pointer */
Paul Mackerras342d3db2011-12-12 12:38:05 +00002174 li r7, 1 /* data fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002175 bl kvmppc_hpte_hv_fault
Paul Mackerras697d3892011-12-12 12:36:37 +00002176 ld r9, HSTATE_KVM_VCPU(r13)
2177 ld r10, VCPU_PC(r9)
2178 ld r11, VCPU_MSR(r9)
2179 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
2180 cmpdi r3, 0 /* retry the instruction */
2181 beq 6f
2182 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002183 beq guest_exit_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00002184 cmpdi r3, -2 /* MMIO emulation; need instr word */
2185 beq 2f
2186
Paul Mackerrascf29b212015-10-27 16:10:20 +11002187 /* Synthesize a DSI (or DSegI) for the guest */
Paul Mackerras697d3892011-12-12 12:36:37 +00002188 ld r4, VCPU_FAULT_DAR(r9)
2189 mr r6, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110021901: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
Paul Mackerras697d3892011-12-12 12:36:37 +00002191 mtspr SPRN_DSISR, r6
Paul Mackerrascf29b212015-10-27 16:10:20 +110021927: mtspr SPRN_DAR, r4
Paul Mackerras697d3892011-12-12 12:36:37 +00002193 mtspr SPRN_SRR0, r10
2194 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11002195 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11002196 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002197fast_interrupt_c_return:
Paul Mackerras697d3892011-12-12 12:36:37 +000021986: ld r7, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10002199 ld r8, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00002200 mtctr r7
2201 mtxer r8
2202 mr r4, r9
2203 b fast_guest_return
2204
22053: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
2206 ld r5, KVM_VRMA_SLB_V(r5)
2207 b 4b
2208
2209 /* If this is for emulated MMIO, load the instruction word */
22102: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
2211
2212 /* Set guest mode to 'jump over instruction' so if lwz faults
2213 * we'll just continue at the next IP. */
2214 li r0, KVM_GUEST_MODE_SKIP
2215 stb r0, HSTATE_IN_GUEST(r13)
2216
2217 /* Do the access with MSR:DR enabled */
2218 mfmsr r3
2219 ori r4, r3, MSR_DR /* Enable paging for data */
2220 mtmsrd r4
2221 lwz r8, 0(r10)
2222 mtmsrd r3
2223
2224 /* Store the result */
2225 stw r8, VCPU_LAST_INST(r9)
2226
2227 /* Unset guest mode. */
Paul Mackerras44a3add2013-10-04 21:45:04 +10002228 li r0, KVM_GUEST_MODE_HOST_HV
Paul Mackerras697d3892011-12-12 12:36:37 +00002229 stb r0, HSTATE_IN_GUEST(r13)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002230 b guest_exit_cont
Paul Mackerrasde56a942011-06-29 00:21:34 +00002231
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002232.Lradix_hdsi:
2233 std r4, VCPU_FAULT_DAR(r9)
2234 stw r6, VCPU_FAULT_DSISR(r9)
2235.Lradix_hisi:
2236 mfspr r5, SPRN_ASDR
2237 std r5, VCPU_FAULT_GPA(r9)
2238 b guest_exit_cont
2239
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002240/*
Paul Mackerras342d3db2011-12-12 12:38:05 +00002241 * Similarly for an HISI, reflect it to the guest as an ISI unless
2242 * it is an HPTE not found fault for a page that we have paged out.
2243 */
2244kvmppc_hisi:
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002245 ld r3, VCPU_KVM(r9)
2246 lbz r0, KVM_RADIX(r3)
2247 cmpwi r0, 0
2248 bne .Lradix_hisi /* for radix, just save ASDR */
Paul Mackerras342d3db2011-12-12 12:38:05 +00002249 andis. r0, r11, SRR1_ISI_NOPT@h
2250 beq 1f
Paul Mackerras4e5acdc2017-02-28 11:05:47 +11002251 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
2252 beq 3f
Paul Mackerrasef8c6402017-01-30 21:21:43 +11002253BEGIN_FTR_SECTION
2254 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2255 b 4f
2256END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras342d3db2011-12-12 12:38:05 +00002257 clrrdi r0, r10, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00002258 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11002259 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2260 bne 7f /* if no SLB entry found */
Paul Mackerras342d3db2011-12-12 12:38:05 +000022614:
2262 /* Search the hash table. */
2263 mr r3, r9 /* vcpu pointer */
2264 mr r4, r10
2265 mr r6, r11
2266 li r7, 0 /* instruction fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002267 bl kvmppc_hpte_hv_fault
Paul Mackerras342d3db2011-12-12 12:38:05 +00002268 ld r9, HSTATE_KVM_VCPU(r13)
2269 ld r10, VCPU_PC(r9)
2270 ld r11, VCPU_MSR(r9)
2271 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2272 cmpdi r3, 0 /* retry the instruction */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002273 beq fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00002274 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002275 beq guest_exit_cont
Paul Mackerras342d3db2011-12-12 12:38:05 +00002276
Paul Mackerrascf29b212015-10-27 16:10:20 +11002277 /* Synthesize an ISI (or ISegI) for the guest */
Paul Mackerras342d3db2011-12-12 12:38:05 +00002278 mr r11, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110022791: li r0, BOOK3S_INTERRUPT_INST_STORAGE
22807: mtspr SPRN_SRR0, r10
Paul Mackerras342d3db2011-12-12 12:38:05 +00002281 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11002282 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11002283 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002284 b fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00002285
22863: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2287 ld r5, KVM_VRMA_SLB_V(r6)
2288 b 4b
2289
2290/*
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002291 * Try to handle an hcall in real mode.
2292 * Returns to the guest if we handle it, or continues on up to
2293 * the kernel if we can't (i.e. if we don't have a handler for
2294 * it, or if the handler returns H_TOO_HARD).
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002295 *
2296 * r5 - r8 contain hcall args,
2297 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002298 */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002299hcall_try_real_mode:
Michael Neulingc75df6f2012-06-25 13:33:10 +00002300 ld r3,VCPU_GPR(R3)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002301 andi. r0,r11,MSR_PR
Liu Ping Fan27025a62013-11-19 14:12:48 +08002302 /* sc 1 from userspace - reflect to guest syscall */
2303 bne sc_1_fast_return
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002304 clrrdi r3,r3,2
2305 cmpldi r3,hcall_real_table_end - hcall_real_table
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002306 bge guest_exit_cont
Paul Mackerras699a0ea2014-06-02 11:02:59 +10002307 /* See if this hcall is enabled for in-kernel handling */
2308 ld r4, VCPU_KVM(r9)
2309 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2310 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2311 add r4, r4, r0
2312 ld r0, KVM_ENABLED_HCALLS(r4)
2313 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2314 srd r0, r0, r4
2315 andi. r0, r0, 1
2316 beq guest_exit_cont
2317 /* Get pointer to handler, if any, and call it */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002318 LOAD_REG_ADDR(r4, hcall_real_table)
Paul Mackerras4baa1d82013-07-08 20:09:53 +10002319 lwax r3,r3,r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002320 cmpwi r3,0
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002321 beq guest_exit_cont
Anton Blanchard05a308c2014-06-12 18:16:10 +10002322 add r12,r3,r4
2323 mtctr r12
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002324 mr r3,r9 /* get vcpu pointer */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002325 ld r4,VCPU_GPR(R4)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002326 bctrl
2327 cmpdi r3,H_TOO_HARD
2328 beq hcall_real_fallback
2329 ld r4,HSTATE_KVM_VCPU(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00002330 std r3,VCPU_GPR(R3)(r4)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002331 ld r10,VCPU_PC(r4)
2332 ld r11,VCPU_MSR(r4)
2333 b fast_guest_return
2334
Liu Ping Fan27025a62013-11-19 14:12:48 +08002335sc_1_fast_return:
2336 mtspr SPRN_SRR0,r10
2337 mtspr SPRN_SRR1,r11
2338 li r10, BOOK3S_INTERRUPT_SYSCALL
Michael Neulinge4e38122014-03-25 10:47:02 +11002339 bl kvmppc_msr_interrupt
Liu Ping Fan27025a62013-11-19 14:12:48 +08002340 mr r4,r9
2341 b fast_guest_return
2342
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002343 /* We've attempted a real mode hcall, but it's punted it back
2344 * to userspace. We need to restore some clobbered volatiles
2345 * before resuming the pass-it-to-qemu path */
2346hcall_real_fallback:
2347 li r12,BOOK3S_INTERRUPT_SYSCALL
2348 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002349
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002350 b guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002351
2352 .globl hcall_real_table
2353hcall_real_table:
2354 .long 0 /* 0 - unused */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002355 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2356 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2357 .long DOTSYM(kvmppc_h_read) - hcall_real_table
Paul Mackerrascdeee512015-06-24 21:18:07 +10002358 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2359 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002360 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2361 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11002362 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002363 .long 0 /* 0x24 - H_SET_SPRG0 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002364 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002365 .long 0 /* 0x2c */
2366 .long 0 /* 0x30 */
2367 .long 0 /* 0x34 */
2368 .long 0 /* 0x38 */
2369 .long 0 /* 0x3c */
2370 .long 0 /* 0x40 */
2371 .long 0 /* 0x44 */
2372 .long 0 /* 0x48 */
2373 .long 0 /* 0x4c */
2374 .long 0 /* 0x50 */
2375 .long 0 /* 0x54 */
2376 .long 0 /* 0x58 */
2377 .long 0 /* 0x5c */
2378 .long 0 /* 0x60 */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00002379#ifdef CONFIG_KVM_XICS
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002380 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2381 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2382 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10002383 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002384 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00002385#else
2386 .long 0 /* 0x64 - H_EOI */
2387 .long 0 /* 0x68 - H_CPPR */
2388 .long 0 /* 0x6c - H_IPI */
2389 .long 0 /* 0x70 - H_IPOLL */
2390 .long 0 /* 0x74 - H_XIRR */
2391#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002392 .long 0 /* 0x78 */
2393 .long 0 /* 0x7c */
2394 .long 0 /* 0x80 */
2395 .long 0 /* 0x84 */
2396 .long 0 /* 0x88 */
2397 .long 0 /* 0x8c */
2398 .long 0 /* 0x90 */
2399 .long 0 /* 0x94 */
2400 .long 0 /* 0x98 */
2401 .long 0 /* 0x9c */
2402 .long 0 /* 0xa0 */
2403 .long 0 /* 0xa4 */
2404 .long 0 /* 0xa8 */
2405 .long 0 /* 0xac */
2406 .long 0 /* 0xb0 */
2407 .long 0 /* 0xb4 */
2408 .long 0 /* 0xb8 */
2409 .long 0 /* 0xbc */
2410 .long 0 /* 0xc0 */
2411 .long 0 /* 0xc4 */
2412 .long 0 /* 0xc8 */
2413 .long 0 /* 0xcc */
2414 .long 0 /* 0xd0 */
2415 .long 0 /* 0xd4 */
2416 .long 0 /* 0xd8 */
2417 .long 0 /* 0xdc */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002418 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
Sam Bobroff90fd09f2014-12-03 13:30:40 +11002419 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002420 .long 0 /* 0xe8 */
2421 .long 0 /* 0xec */
2422 .long 0 /* 0xf0 */
2423 .long 0 /* 0xf4 */
2424 .long 0 /* 0xf8 */
2425 .long 0 /* 0xfc */
2426 .long 0 /* 0x100 */
2427 .long 0 /* 0x104 */
2428 .long 0 /* 0x108 */
2429 .long 0 /* 0x10c */
2430 .long 0 /* 0x110 */
2431 .long 0 /* 0x114 */
2432 .long 0 /* 0x118 */
2433 .long 0 /* 0x11c */
2434 .long 0 /* 0x120 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002435 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
Paul Mackerras8563bf52014-01-08 21:25:29 +11002436 .long 0 /* 0x128 */
2437 .long 0 /* 0x12c */
2438 .long 0 /* 0x130 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002439 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11002440 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +11002441 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
Michael Ellermane928e9c2015-03-20 20:39:41 +11002442 .long 0 /* 0x140 */
2443 .long 0 /* 0x144 */
2444 .long 0 /* 0x148 */
2445 .long 0 /* 0x14c */
2446 .long 0 /* 0x150 */
2447 .long 0 /* 0x154 */
2448 .long 0 /* 0x158 */
2449 .long 0 /* 0x15c */
2450 .long 0 /* 0x160 */
2451 .long 0 /* 0x164 */
2452 .long 0 /* 0x168 */
2453 .long 0 /* 0x16c */
2454 .long 0 /* 0x170 */
2455 .long 0 /* 0x174 */
2456 .long 0 /* 0x178 */
2457 .long 0 /* 0x17c */
2458 .long 0 /* 0x180 */
2459 .long 0 /* 0x184 */
2460 .long 0 /* 0x188 */
2461 .long 0 /* 0x18c */
2462 .long 0 /* 0x190 */
2463 .long 0 /* 0x194 */
2464 .long 0 /* 0x198 */
2465 .long 0 /* 0x19c */
2466 .long 0 /* 0x1a0 */
2467 .long 0 /* 0x1a4 */
2468 .long 0 /* 0x1a8 */
2469 .long 0 /* 0x1ac */
2470 .long 0 /* 0x1b0 */
2471 .long 0 /* 0x1b4 */
2472 .long 0 /* 0x1b8 */
2473 .long 0 /* 0x1bc */
2474 .long 0 /* 0x1c0 */
2475 .long 0 /* 0x1c4 */
2476 .long 0 /* 0x1c8 */
2477 .long 0 /* 0x1cc */
2478 .long 0 /* 0x1d0 */
2479 .long 0 /* 0x1d4 */
2480 .long 0 /* 0x1d8 */
2481 .long 0 /* 0x1dc */
2482 .long 0 /* 0x1e0 */
2483 .long 0 /* 0x1e4 */
2484 .long 0 /* 0x1e8 */
2485 .long 0 /* 0x1ec */
2486 .long 0 /* 0x1f0 */
2487 .long 0 /* 0x1f4 */
2488 .long 0 /* 0x1f8 */
2489 .long 0 /* 0x1fc */
2490 .long 0 /* 0x200 */
2491 .long 0 /* 0x204 */
2492 .long 0 /* 0x208 */
2493 .long 0 /* 0x20c */
2494 .long 0 /* 0x210 */
2495 .long 0 /* 0x214 */
2496 .long 0 /* 0x218 */
2497 .long 0 /* 0x21c */
2498 .long 0 /* 0x220 */
2499 .long 0 /* 0x224 */
2500 .long 0 /* 0x228 */
2501 .long 0 /* 0x22c */
2502 .long 0 /* 0x230 */
2503 .long 0 /* 0x234 */
2504 .long 0 /* 0x238 */
2505 .long 0 /* 0x23c */
2506 .long 0 /* 0x240 */
2507 .long 0 /* 0x244 */
2508 .long 0 /* 0x248 */
2509 .long 0 /* 0x24c */
2510 .long 0 /* 0x250 */
2511 .long 0 /* 0x254 */
2512 .long 0 /* 0x258 */
2513 .long 0 /* 0x25c */
2514 .long 0 /* 0x260 */
2515 .long 0 /* 0x264 */
2516 .long 0 /* 0x268 */
2517 .long 0 /* 0x26c */
2518 .long 0 /* 0x270 */
2519 .long 0 /* 0x274 */
2520 .long 0 /* 0x278 */
2521 .long 0 /* 0x27c */
2522 .long 0 /* 0x280 */
2523 .long 0 /* 0x284 */
2524 .long 0 /* 0x288 */
2525 .long 0 /* 0x28c */
2526 .long 0 /* 0x290 */
2527 .long 0 /* 0x294 */
2528 .long 0 /* 0x298 */
2529 .long 0 /* 0x29c */
2530 .long 0 /* 0x2a0 */
2531 .long 0 /* 0x2a4 */
2532 .long 0 /* 0x2a8 */
2533 .long 0 /* 0x2ac */
2534 .long 0 /* 0x2b0 */
2535 .long 0 /* 0x2b4 */
2536 .long 0 /* 0x2b8 */
2537 .long 0 /* 0x2bc */
2538 .long 0 /* 0x2c0 */
2539 .long 0 /* 0x2c4 */
2540 .long 0 /* 0x2c8 */
2541 .long 0 /* 0x2cc */
2542 .long 0 /* 0x2d0 */
2543 .long 0 /* 0x2d4 */
2544 .long 0 /* 0x2d8 */
2545 .long 0 /* 0x2dc */
2546 .long 0 /* 0x2e0 */
2547 .long 0 /* 0x2e4 */
2548 .long 0 /* 0x2e8 */
2549 .long 0 /* 0x2ec */
2550 .long 0 /* 0x2f0 */
2551 .long 0 /* 0x2f4 */
2552 .long 0 /* 0x2f8 */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10002553#ifdef CONFIG_KVM_XICS
2554 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2555#else
2556 .long 0 /* 0x2fc - H_XIRR_X*/
2557#endif
Michael Ellermane928e9c2015-03-20 20:39:41 +11002558 .long DOTSYM(kvmppc_h_random) - hcall_real_table
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002559 .globl hcall_real_table_end
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002560hcall_real_table_end:
2561
Paul Mackerras8563bf52014-01-08 21:25:29 +11002562_GLOBAL(kvmppc_h_set_xdabr)
2563 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2564 beq 6f
2565 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2566 andc. r0, r5, r0
2567 beq 3f
25686: li r3, H_PARAMETER
2569 blr
2570
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002571_GLOBAL(kvmppc_h_set_dabr)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002572 li r5, DABRX_USER | DABRX_KERNEL
25733:
Michael Neulingeee7ff92014-01-08 21:25:19 +11002574BEGIN_FTR_SECTION
2575 b 2f
2576END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002577 std r4,VCPU_DABR(r3)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002578 stw r5, VCPU_DABRX(r3)
2579 mtspr SPRN_DABRX, r5
Paul Mackerras89436332012-03-02 01:38:23 +00002580 /* Work around P7 bug where DABR can get corrupted on mtspr */
25811: mtspr SPRN_DABR,r4
2582 mfspr r5, SPRN_DABR
2583 cmpd r4, r5
2584 bne 1b
2585 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002586 li r3,0
2587 blr
2588
Michael Neulinge8ebedb2018-03-27 15:37:21 +110025892:
2590BEGIN_FTR_SECTION
2591 /* POWER9 with disabled DAWR */
Aneesh Kumar K.Vca9a16c2018-03-30 17:27:24 +05302592 li r3, H_HARDWARE
Michael Neulinge8ebedb2018-03-27 15:37:21 +11002593 blr
2594END_FTR_SECTION_IFCLR(CPU_FTR_DAWR)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002595 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
Michael Neulinge8ebedb2018-03-27 15:37:21 +11002596 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
Thomas Huth760a7362015-11-20 09:11:45 +01002597 rlwimi r5, r4, 2, DAWRX_WT
Paul Mackerras8563bf52014-01-08 21:25:29 +11002598 clrrdi r4, r4, 3
2599 std r4, VCPU_DAWR(r3)
2600 std r5, VCPU_DAWRX(r3)
2601 mtspr SPRN_DAWR, r4
2602 mtspr SPRN_DAWRX, r5
2603 li r3, 0
Paul Mackerrasde56a942011-06-29 00:21:34 +00002604 blr
2605
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002606_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002607 ori r11,r11,MSR_EE
2608 std r11,VCPU_MSR(r3)
2609 li r0,1
2610 stb r0,VCPU_CEDED(r3)
2611 sync /* order setting ceded vs. testing prodded */
2612 lbz r5,VCPU_PRODDED(r3)
2613 cmpwi r5,0
Paul Mackerras04f995a2012-08-06 00:03:28 +00002614 bne kvm_cede_prodded
Paul Mackerras6af27c82015-03-28 14:21:10 +11002615 li r12,0 /* set trap to 0 to say hcall is handled */
2616 stw r12,VCPU_TRAP(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002617 li r0,H_SUCCESS
Michael Neulingc75df6f2012-06-25 13:33:10 +00002618 std r0,VCPU_GPR(R3)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002619
2620 /*
2621 * Set our bit in the bitmask of napping threads unless all the
2622 * other threads are already napping, in which case we send this
2623 * up to the host.
2624 */
2625 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002626 lbz r6,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002627 lwz r8,VCORE_ENTRY_EXIT(r5)
2628 clrldi r8,r8,56
2629 li r0,1
2630 sld r0,r0,r6
2631 addi r6,r5,VCORE_NAPPING_THREADS
263231: lwarx r4,0,r6
2633 or r4,r4,r0
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002634 cmpw r4,r8
2635 beq kvm_cede_exit
Paul Mackerras19ccb762011-07-23 17:42:46 +10002636 stwcx. r4,0,r6
2637 bne 31b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002638 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11002639 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002640 li r0,NAPPING_CEDE
Paul Mackerras19ccb762011-07-23 17:42:46 +10002641 stb r0,HSTATE_NAPPING(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002642 lwz r7,VCORE_ENTRY_EXIT(r5)
2643 cmpwi r7,0x100
2644 bge 33f /* another thread already exiting */
2645
2646/*
2647 * Although not specifically required by the architecture, POWER7
2648 * preserves the following registers in nap mode, even if an SMT mode
2649 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2650 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2651 */
2652 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002653 std r14, VCPU_GPR(R14)(r3)
2654 std r15, VCPU_GPR(R15)(r3)
2655 std r16, VCPU_GPR(R16)(r3)
2656 std r17, VCPU_GPR(R17)(r3)
2657 std r18, VCPU_GPR(R18)(r3)
2658 std r19, VCPU_GPR(R19)(r3)
2659 std r20, VCPU_GPR(R20)(r3)
2660 std r21, VCPU_GPR(R21)(r3)
2661 std r22, VCPU_GPR(R22)(r3)
2662 std r23, VCPU_GPR(R23)(r3)
2663 std r24, VCPU_GPR(R24)(r3)
2664 std r25, VCPU_GPR(R25)(r3)
2665 std r26, VCPU_GPR(R26)(r3)
2666 std r27, VCPU_GPR(R27)(r3)
2667 std r28, VCPU_GPR(R28)(r3)
2668 std r29, VCPU_GPR(R29)(r3)
2669 std r30, VCPU_GPR(R30)(r3)
2670 std r31, VCPU_GPR(R31)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002671
2672 /* save FP state */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002673 bl kvmppc_save_fp
Paul Mackerras19ccb762011-07-23 17:42:46 +10002674
Paul Mackerras93d17392016-06-22 15:52:55 +10002675#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002676/*
2677 * Branch around the call if both CPU_FTR_TM and
2678 * CPU_FTR_P9_TM_HV_ASSIST are off.
2679 */
Paul Mackerras93d17392016-06-22 15:52:55 +10002680BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002681 b 91f
2682END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10002683 /*
2684 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2685 */
Paul Mackerras93d17392016-06-22 15:52:55 +10002686 ld r9, HSTATE_KVM_VCPU(r13)
2687 bl kvmppc_save_tm
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100268891:
Paul Mackerras93d17392016-06-22 15:52:55 +10002689#endif
2690
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002691 /*
2692 * Set DEC to the smaller of DEC and HDEC, so that we wake
2693 * no later than the end of our timeslice (HDEC interrupts
2694 * don't wake us from nap).
2695 */
2696 mfspr r3, SPRN_DEC
2697 mfspr r4, SPRN_HDEC
2698 mftb r5
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10002699BEGIN_FTR_SECTION
2700 /* On P9 check whether the guest has large decrementer mode enabled */
2701 ld r6, HSTATE_KVM_VCORE(r13)
2702 ld r6, VCORE_LPCR(r6)
2703 andis. r6, r6, LPCR_LD@h
2704 bne 68f
2705END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras2f272462017-05-22 16:25:14 +10002706 extsw r3, r3
Paul Mackerras1bc3fe82017-05-22 16:55:16 +1000270768: EXTEND_HDEC(r4)
Paul Mackerras2f272462017-05-22 16:25:14 +10002708 cmpd r3, r4
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002709 ble 67f
2710 mtspr SPRN_DEC, r4
271167:
2712 /* save expiry time of guest decrementer */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002713 add r3, r3, r5
2714 ld r4, HSTATE_KVM_VCPU(r13)
2715 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10002716 ld r6, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002717 subf r3, r6, r3 /* convert to host TB value */
2718 std r3, VCPU_DEC_EXPIRES(r4)
2719
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002720#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2721 ld r4, HSTATE_KVM_VCPU(r13)
2722 addi r3, r4, VCPU_TB_CEDE
2723 bl kvmhv_accumulate_time
2724#endif
2725
Paul Mackerrasccc07772015-03-28 14:21:07 +11002726 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2727
Paul Mackerras19ccb762011-07-23 17:42:46 +10002728 /*
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002729 * Take a nap until a decrementer or external or doobell interrupt
Paul Mackerrasccc07772015-03-28 14:21:07 +11002730 * occurs, with PECE1 and PECE0 set in LPCR.
Paul Mackerras66feed62015-03-28 14:21:12 +11002731 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
Paul Mackerrasccc07772015-03-28 14:21:07 +11002732 * Also clear the runlatch bit before napping.
Paul Mackerras19ccb762011-07-23 17:42:46 +10002733 */
Paul Mackerras56548fc2014-12-03 14:48:40 +11002734kvm_do_nap:
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002735 mfspr r0, SPRN_CTRLF
2736 clrrdi r0, r0, 1
2737 mtspr SPRN_CTRLT, r0
Preeti U Murthy582b9102014-04-11 16:02:08 +05302738
Paul Mackerrasf0888f72012-02-03 00:54:17 +00002739 li r0,1
2740 stb r0,HSTATE_HWTHREAD_REQ(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002741 mfspr r5,SPRN_LPCR
2742 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002743BEGIN_FTR_SECTION
Paul Mackerras66feed62015-03-28 14:21:12 +11002744 ori r5, r5, LPCR_PECEDH
Paul Mackerrasccc07772015-03-28 14:21:07 +11002745 rlwimi r5, r3, 0, LPCR_PECEDP
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002746END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002747
2748kvm_nap_sequence: /* desired LPCR value in r5 */
2749BEGIN_FTR_SECTION
2750 /*
2751 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2752 * enable state loss = 1 (allow SMT mode switch)
2753 * requested level = 0 (just stop dispatching)
2754 */
2755 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2756 mtspr SPRN_PSSCR, r3
2757 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2758 li r4, LPCR_PECE_HVEE@higher
2759 sldi r4, r4, 32
2760 or r5, r5, r4
2761END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002762 mtspr SPRN_LPCR,r5
2763 isync
2764 li r0, 0
2765 std r0, HSTATE_SCRATCH0(r13)
2766 ptesync
2767 ld r0, HSTATE_SCRATCH0(r13)
27681: cmpd r0, r0
2769 bne 1b
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002770BEGIN_FTR_SECTION
Paul Mackerras19ccb762011-07-23 17:42:46 +10002771 nap
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002772FTR_SECTION_ELSE
2773 PPC_STOP
2774ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002775 b .
2776
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100277733: mr r4, r3
2778 li r3, 0
2779 li r12, 0
2780 b 34f
2781
Paul Mackerras19ccb762011-07-23 17:42:46 +10002782kvm_end_cede:
Paul Mackerras4619ac82013-04-17 20:31:41 +00002783 /* get vcpu pointer */
2784 ld r4, HSTATE_KVM_VCPU(r13)
2785
Paul Mackerras19ccb762011-07-23 17:42:46 +10002786 /* Woken by external or decrementer interrupt */
2787 ld r1, HSTATE_HOST_R1(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002788
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002789#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2790 addi r3, r4, VCPU_TB_RMINTR
2791 bl kvmhv_accumulate_time
2792#endif
2793
Paul Mackerras93d17392016-06-22 15:52:55 +10002794#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002795/*
2796 * Branch around the call if both CPU_FTR_TM and
2797 * CPU_FTR_P9_TM_HV_ASSIST are off.
2798 */
Paul Mackerras93d17392016-06-22 15:52:55 +10002799BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002800 b 91f
2801END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10002802 /*
2803 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2804 */
Paul Mackerras93d17392016-06-22 15:52:55 +10002805 bl kvmppc_restore_tm
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100280691:
Paul Mackerras93d17392016-06-22 15:52:55 +10002807#endif
2808
Paul Mackerras19ccb762011-07-23 17:42:46 +10002809 /* load up FP state */
2810 bl kvmppc_load_fp
2811
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002812 /* Restore guest decrementer */
2813 ld r3, VCPU_DEC_EXPIRES(r4)
2814 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10002815 ld r6, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002816 add r3, r3, r6 /* convert host TB to guest TB value */
2817 mftb r7
2818 subf r3, r7, r3
2819 mtspr SPRN_DEC, r3
2820
Paul Mackerras19ccb762011-07-23 17:42:46 +10002821 /* Load NV GPRS */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002822 ld r14, VCPU_GPR(R14)(r4)
2823 ld r15, VCPU_GPR(R15)(r4)
2824 ld r16, VCPU_GPR(R16)(r4)
2825 ld r17, VCPU_GPR(R17)(r4)
2826 ld r18, VCPU_GPR(R18)(r4)
2827 ld r19, VCPU_GPR(R19)(r4)
2828 ld r20, VCPU_GPR(R20)(r4)
2829 ld r21, VCPU_GPR(R21)(r4)
2830 ld r22, VCPU_GPR(R22)(r4)
2831 ld r23, VCPU_GPR(R23)(r4)
2832 ld r24, VCPU_GPR(R24)(r4)
2833 ld r25, VCPU_GPR(R25)(r4)
2834 ld r26, VCPU_GPR(R26)(r4)
2835 ld r27, VCPU_GPR(R27)(r4)
2836 ld r28, VCPU_GPR(R28)(r4)
2837 ld r29, VCPU_GPR(R29)(r4)
2838 ld r30, VCPU_GPR(R30)(r4)
2839 ld r31, VCPU_GPR(R31)(r4)
Suresh Warrier37f55d32016-08-19 15:35:46 +10002840
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002841 /* Check the wake reason in SRR1 to see why we got here */
2842 bl kvmppc_check_wake_reason
Paul Mackerras19ccb762011-07-23 17:42:46 +10002843
Suresh Warrier37f55d32016-08-19 15:35:46 +10002844 /*
2845 * Restore volatile registers since we could have called a
2846 * C routine in kvmppc_check_wake_reason
2847 * r4 = VCPU
2848 * r3 tells us whether we need to return to host or not
2849 * WARNING: it gets checked further down:
2850 * should not modify r3 until this check is done.
2851 */
2852 ld r4, HSTATE_KVM_VCPU(r13)
2853
Paul Mackerras19ccb762011-07-23 17:42:46 +10002854 /* clear our bit in vcore->napping_threads */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100285534: ld r5,HSTATE_KVM_VCORE(r13)
2856 lbz r7,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002857 li r0,1
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002858 sld r0,r0,r7
Paul Mackerras19ccb762011-07-23 17:42:46 +10002859 addi r6,r5,VCORE_NAPPING_THREADS
286032: lwarx r7,0,r6
2861 andc r7,r7,r0
2862 stwcx. r7,0,r6
2863 bne 32b
2864 li r0,0
2865 stb r0,HSTATE_NAPPING(r13)
2866
Suresh Warrier37f55d32016-08-19 15:35:46 +10002867 /* See if the wake reason saved in r3 means we need to exit */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002868 stw r12, VCPU_TRAP(r4)
Paul Mackerras4619ac82013-04-17 20:31:41 +00002869 mr r9, r4
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002870 cmpdi r3, 0
2871 bgt guest_exit_cont
Paul Mackerras4619ac82013-04-17 20:31:41 +00002872
Paul Mackerras19ccb762011-07-23 17:42:46 +10002873 /* see if any other thread is already exiting */
2874 lwz r0,VCORE_ENTRY_EXIT(r5)
2875 cmpwi r0,0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002876 bge guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002877
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002878 b kvmppc_cede_reentry /* if not go back to guest */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002879
2880 /* cede when already previously prodded case */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002881kvm_cede_prodded:
2882 li r0,0
Paul Mackerras19ccb762011-07-23 17:42:46 +10002883 stb r0,VCPU_PRODDED(r3)
2884 sync /* order testing prodded vs. clearing ceded */
2885 stb r0,VCPU_CEDED(r3)
2886 li r3,H_SUCCESS
2887 blr
2888
2889 /* we've ceded but we want to give control to the host */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002890kvm_cede_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +11002891 ld r9, HSTATE_KVM_VCPU(r13)
Benjamin Herrenschmidt9b9b13a2018-01-12 13:37:16 +11002892#ifdef CONFIG_KVM_XICS
2893 /* Abort if we still have a pending escalation */
2894 lbz r5, VCPU_XIVE_ESC_ON(r9)
2895 cmpwi r5, 0
2896 beq 1f
2897 li r0, 0
2898 stb r0, VCPU_CEDED(r9)
28991: /* Enable XIVE escalation */
2900 li r5, XIVE_ESB_SET_PQ_00
2901 mfmsr r0
2902 andi. r0, r0, MSR_DR /* in real mode? */
2903 beq 1f
2904 ld r10, VCPU_XIVE_ESC_VADDR(r9)
2905 cmpdi r10, 0
2906 beq 3f
2907 ldx r0, r10, r5
2908 b 2f
29091: ld r10, VCPU_XIVE_ESC_RADDR(r9)
2910 cmpdi r10, 0
2911 beq 3f
2912 ldcix r0, r10, r5
29132: sync
2914 li r0, 1
2915 stb r0, VCPU_XIVE_ESC_ON(r9)
2916#endif /* CONFIG_KVM_XICS */
29173: b guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002918
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002919 /* Try to handle a machine check in real mode */
2920machine_check_realmode:
2921 mr r3, r9 /* get vcpu pointer */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002922 bl kvmppc_realmode_machine_check
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002923 nop
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002924 ld r9, HSTATE_KVM_VCPU(r13)
2925 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302926 /*
Aravinda Prasade20bbd32017-05-11 16:33:37 +05302927 * For the guest that is FWNMI capable, deliver all the MCE errors
2928 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
2929 * reason. This new approach injects machine check errors in guest
2930 * address space to guest with additional information in the form
2931 * of RTAS event, thus enabling guest kernel to suitably handle
2932 * such errors.
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302933 *
Aravinda Prasade20bbd32017-05-11 16:33:37 +05302934 * For the guest that is not FWNMI capable (old QEMU) fallback
2935 * to old behaviour for backward compatibility:
2936 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
2937 * through machine check interrupt (set HSRR0 to 0x200).
2938 * For handled errors (no-fatal), just go back to guest execution
2939 * with current HSRR0.
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302940 * if we receive machine check with MSR(RI=0) then deliver it to
2941 * guest as machine check causing guest to crash.
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302942 */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302943 ld r11, VCPU_MSR(r9)
Paul Mackerras1c9e3d52015-11-12 16:43:48 +11002944 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2945 bne mc_cont /* if so, exit to host */
Aravinda Prasade20bbd32017-05-11 16:33:37 +05302946 /* Check if guest is capable of handling NMI exit */
2947 ld r10, VCPU_KVM(r9)
2948 lbz r10, KVM_FWNMI(r10)
2949 cmpdi r10, 1 /* FWNMI capable? */
2950 beq mc_cont /* if so, exit with KVM_EXIT_NMI. */
2951
2952 /* if not, fall through for backward compatibility. */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302953 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2954 beq 1f /* Deliver a machine check to guest */
2955 ld r10, VCPU_PC(r9)
2956 cmpdi r3, 0 /* Did we handle MCE ? */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302957 bne 2f /* Continue guest execution. */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002958 /* If not, deliver a machine check. SRR0/1 are already set */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +053029591: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
Michael Neulinge4e38122014-03-25 10:47:02 +11002960 bl kvmppc_msr_interrupt
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +053029612: b fast_interrupt_c_return
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002962
Paul Mackerrasde56a942011-06-29 00:21:34 +00002963/*
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002964 * Check the reason we woke from nap, and take appropriate action.
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002965 * Returns (in r3):
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002966 * 0 if nothing needs to be done
2967 * 1 if something happened that needs to be handled by the host
Paul Mackerras66feed62015-03-28 14:21:12 +11002968 * -1 if there was a guest wakeup (IPI or msgsnd)
Suresh Warriere3c13e52016-08-19 15:35:51 +10002969 * -2 if we handled a PCI passthrough interrupt (returned by
2970 * kvmppc_read_intr only)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002971 *
2972 * Also sets r12 to the interrupt vector for any interrupt that needs
2973 * to be handled now by the host (0x500 for external interrupt), or zero.
Suresh Warrier37f55d32016-08-19 15:35:46 +10002974 * Modifies all volatile registers (since it may call a C function).
2975 * This routine calls kvmppc_read_intr, a C function, if an external
2976 * interrupt is pending.
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002977 */
2978kvmppc_check_wake_reason:
2979 mfspr r6, SPRN_SRR1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002980BEGIN_FTR_SECTION
2981 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2982FTR_SECTION_ELSE
2983 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2984ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2985 cmpwi r6, 8 /* was it an external interrupt? */
Suresh Warrier37f55d32016-08-19 15:35:46 +10002986 beq 7f /* if so, see what it was */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002987 li r3, 0
2988 li r12, 0
2989 cmpwi r6, 6 /* was it the decrementer? */
2990 beq 0f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002991BEGIN_FTR_SECTION
2992 cmpwi r6, 5 /* privileged doorbell? */
2993 beq 0f
Paul Mackerras5d00f662014-01-08 21:25:28 +11002994 cmpwi r6, 3 /* hypervisor doorbell? */
2995 beq 3f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002996END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302997 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2998 beq 4f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002999 li r3, 1 /* anything else, return 1 */
30000: blr
3001
Paul Mackerras5d00f662014-01-08 21:25:28 +11003002 /* hypervisor doorbell */
30033: li r12, BOOK3S_INTERRUPT_H_DOORBELL
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05303004
3005 /*
3006 * Clear the doorbell as we will invoke the handler
3007 * explicitly in the guest exit path.
3008 */
3009 lis r6, (PPC_DBELL_SERVER << (63-36))@h
3010 PPC_MSGCLR(6)
Paul Mackerras66feed62015-03-28 14:21:12 +11003011 /* see if it's a host IPI */
Paul Mackerras5d00f662014-01-08 21:25:28 +11003012 li r3, 1
Nicholas Piggin2cde3712017-10-10 20:18:28 +10003013BEGIN_FTR_SECTION
3014 PPC_MSGSYNC
3015 lwsync
3016END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras66feed62015-03-28 14:21:12 +11003017 lbz r0, HSTATE_HOST_IPI(r13)
3018 cmpwi r0, 0
3019 bnelr
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05303020 /* if not, return -1 */
Paul Mackerras66feed62015-03-28 14:21:12 +11003021 li r3, -1
Paul Mackerras5d00f662014-01-08 21:25:28 +11003022 blr
3023
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05303024 /* Woken up due to Hypervisor maintenance interrupt */
30254: li r12, BOOK3S_INTERRUPT_HMI
3026 li r3, 1
3027 blr
3028
Suresh Warrier37f55d32016-08-19 15:35:46 +10003029 /* external interrupt - create a stack frame so we can call C */
30307: mflr r0
3031 std r0, PPC_LR_STKOFF(r1)
3032 stdu r1, -PPC_MIN_STKFRM(r1)
3033 bl kvmppc_read_intr
3034 nop
3035 li r12, BOOK3S_INTERRUPT_EXTERNAL
Suresh Warrierf7af5202016-08-19 15:35:52 +10003036 cmpdi r3, 1
3037 ble 1f
3038
3039 /*
3040 * Return code of 2 means PCI passthrough interrupt, but
3041 * we need to return back to host to complete handling the
3042 * interrupt. Trap reason is expected in r12 by guest
3043 * exit code.
3044 */
3045 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
30461:
Suresh Warrier37f55d32016-08-19 15:35:46 +10003047 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
3048 addi r1, r1, PPC_MIN_STKFRM
3049 mtlr r0
3050 blr
Paul Mackerrasde56a942011-06-29 00:21:34 +00003051
3052/*
3053 * Save away FP, VMX and VSX registers.
3054 * r3 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11003055 * N.B. r30 and r31 are volatile across this function,
3056 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00003057 */
Paul Mackerras595e4f72013-10-15 20:43:04 +11003058kvmppc_save_fp:
3059 mflr r30
3060 mr r31,r3
Paul Mackerras89436332012-03-02 01:38:23 +00003061 mfmsr r5
3062 ori r8,r5,MSR_FP
Paul Mackerrasde56a942011-06-29 00:21:34 +00003063#ifdef CONFIG_ALTIVEC
3064BEGIN_FTR_SECTION
3065 oris r8,r8,MSR_VEC@h
3066END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3067#endif
3068#ifdef CONFIG_VSX
3069BEGIN_FTR_SECTION
3070 oris r8,r8,MSR_VSX@h
3071END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3072#endif
3073 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11003074 addi r3,r3,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02003075 bl store_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00003076#ifdef CONFIG_ALTIVEC
3077BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11003078 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02003079 bl store_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00003080END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3081#endif
3082 mfspr r6,SPRN_VRSAVE
Paul Mackerrase724f082014-03-13 20:02:48 +11003083 stw r6,VCPU_VRSAVE(r31)
Paul Mackerras595e4f72013-10-15 20:43:04 +11003084 mtlr r30
Paul Mackerrasde56a942011-06-29 00:21:34 +00003085 blr
3086
3087/*
3088 * Load up FP, VMX and VSX registers
3089 * r4 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11003090 * N.B. r30 and r31 are volatile across this function,
3091 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00003092 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00003093kvmppc_load_fp:
Paul Mackerras595e4f72013-10-15 20:43:04 +11003094 mflr r30
3095 mr r31,r4
Paul Mackerrasde56a942011-06-29 00:21:34 +00003096 mfmsr r9
3097 ori r8,r9,MSR_FP
3098#ifdef CONFIG_ALTIVEC
3099BEGIN_FTR_SECTION
3100 oris r8,r8,MSR_VEC@h
3101END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3102#endif
3103#ifdef CONFIG_VSX
3104BEGIN_FTR_SECTION
3105 oris r8,r8,MSR_VSX@h
3106END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3107#endif
3108 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11003109 addi r3,r4,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02003110 bl load_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00003111#ifdef CONFIG_ALTIVEC
3112BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11003113 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02003114 bl load_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00003115END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3116#endif
Paul Mackerrase724f082014-03-13 20:02:48 +11003117 lwz r7,VCPU_VRSAVE(r31)
Paul Mackerrasde56a942011-06-29 00:21:34 +00003118 mtspr SPRN_VRSAVE,r7
Paul Mackerras595e4f72013-10-15 20:43:04 +11003119 mtlr r30
3120 mr r4,r31
Paul Mackerrasde56a942011-06-29 00:21:34 +00003121 blr
Paul Mackerras44a3add2013-10-04 21:45:04 +10003122
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003123#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
3124/*
3125 * Save transactional state and TM-related registers.
3126 * Called with r9 pointing to the vcpu struct.
3127 * This can modify all checkpointed registers, but
3128 * restores r1, r2 and r9 (vcpu pointer) before exit.
3129 */
3130kvmppc_save_tm:
3131 mflr r0
3132 std r0, PPC_LR_STKOFF(r1)
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003133 stdu r1, -PPC_MIN_STKFRM(r1)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003134
3135 /* Turn on TM. */
3136 mfmsr r8
3137 li r0, 1
3138 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
3139 mtmsrd r8
3140
3141 ld r5, VCPU_MSR(r9)
3142 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
3143 beq 1f /* TM not active in guest. */
3144
3145 std r1, HSTATE_HOST_R1(r13)
3146 li r3, TM_CAUSE_KVM_RESCHED
3147
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003148BEGIN_FTR_SECTION
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003149 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
3150 cmpwi r0, 0
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003151 beq 3f
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003152 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
3153 beq 4f
3154BEGIN_FTR_SECTION_NESTED(96)
3155 bl pnv_power9_force_smt4_catch
3156END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
3157 nop
Paul Mackerras681c6172018-03-21 21:32:03 +11003158 b 6f
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +110031593:
Paul Mackerras681c6172018-03-21 21:32:03 +11003160 /* Emulation of the treclaim instruction needs TEXASR before treclaim */
3161 mfspr r6, SPRN_TEXASR
3162 std r6, VCPU_ORIG_TEXASR(r9)
31636:
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003164END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
3165
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003166 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
3167 li r5, 0
3168 mtmsrd r5, 1
3169
3170 /* All GPRs are volatile at this point. */
3171 TRECLAIM(R3)
3172
3173 /* Temporarily store r13 and r9 so we have some regs to play with */
3174 SET_SCRATCH0(r13)
3175 GET_PACA(r13)
3176 std r9, PACATMSCRATCH(r13)
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003177
3178 /* If doing TM emulation on POWER9 DD2.2, check for fake suspend mode */
3179BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003180 lbz r9, HSTATE_FAKE_SUSPEND(r13)
3181 cmpwi r9, 0
3182 beq 2f
3183 /*
3184 * We were in fake suspend, so we are not going to save the
3185 * register state as the guest checkpointed state (since
3186 * we already have it), therefore we can now use any volatile GPR.
3187 */
3188 /* Reload stack pointer and TOC. */
3189 ld r1, HSTATE_HOST_R1(r13)
3190 ld r2, PACATOC(r13)
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003191 /* Set MSR RI now we have r1 and r13 back. */
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003192 li r5, MSR_RI
3193 mtmsrd r5, 1
3194 HMT_MEDIUM
3195 ld r6, HSTATE_DSCR(r13)
3196 mtspr SPRN_DSCR, r6
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003197BEGIN_FTR_SECTION_NESTED(96)
3198 bl pnv_power9_force_smt4_release
3199END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
3200 nop
3201
32024:
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003203 mfspr r3, SPRN_PSSCR
3204 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */
3205 li r0, PSSCR_FAKE_SUSPEND
3206 andc r3, r3, r0
3207 mtspr SPRN_PSSCR, r3
3208 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerras681c6172018-03-21 21:32:03 +11003209 /* Don't save TEXASR, use value from last exit in real suspend state */
3210 b 11f
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +110032112:
3212END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
3213
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003214 ld r9, HSTATE_KVM_VCPU(r13)
3215
3216 /* Get a few more GPRs free. */
3217 std r29, VCPU_GPRS_TM(29)(r9)
3218 std r30, VCPU_GPRS_TM(30)(r9)
3219 std r31, VCPU_GPRS_TM(31)(r9)
3220
3221 /* Save away PPR and DSCR soon so don't run with user values. */
3222 mfspr r31, SPRN_PPR
3223 HMT_MEDIUM
3224 mfspr r30, SPRN_DSCR
3225 ld r29, HSTATE_DSCR(r13)
3226 mtspr SPRN_DSCR, r29
3227
3228 /* Save all but r9, r13 & r29-r31 */
3229 reg = 0
3230 .rept 29
3231 .if (reg != 9) && (reg != 13)
3232 std reg, VCPU_GPRS_TM(reg)(r9)
3233 .endif
3234 reg = reg + 1
3235 .endr
3236 /* ... now save r13 */
3237 GET_SCRATCH0(r4)
3238 std r4, VCPU_GPRS_TM(13)(r9)
3239 /* ... and save r9 */
3240 ld r4, PACATMSCRATCH(r13)
3241 std r4, VCPU_GPRS_TM(9)(r9)
3242
3243 /* Reload stack pointer and TOC. */
3244 ld r1, HSTATE_HOST_R1(r13)
3245 ld r2, PACATOC(r13)
3246
3247 /* Set MSR RI now we have r1 and r13 back. */
3248 li r5, MSR_RI
3249 mtmsrd r5, 1
3250
3251 /* Save away checkpinted SPRs. */
3252 std r31, VCPU_PPR_TM(r9)
3253 std r30, VCPU_DSCR_TM(r9)
3254 mflr r5
3255 mfcr r6
3256 mfctr r7
3257 mfspr r8, SPRN_AMR
3258 mfspr r10, SPRN_TAR
Paul Mackerras0d808df2016-11-07 15:09:58 +11003259 mfxer r11
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003260 std r5, VCPU_LR_TM(r9)
3261 stw r6, VCPU_CR_TM(r9)
3262 std r7, VCPU_CTR_TM(r9)
3263 std r8, VCPU_AMR_TM(r9)
3264 std r10, VCPU_TAR_TM(r9)
Paul Mackerras0d808df2016-11-07 15:09:58 +11003265 std r11, VCPU_XER_TM(r9)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003266
3267 /* Restore r12 as trap number. */
3268 lwz r12, VCPU_TRAP(r9)
3269
3270 /* Save FP/VSX. */
3271 addi r3, r9, VCPU_FPRS_TM
3272 bl store_fp_state
3273 addi r3, r9, VCPU_VRS_TM
3274 bl store_vr_state
3275 mfspr r6, SPRN_VRSAVE
3276 stw r6, VCPU_VRSAVE_TM(r9)
32771:
3278 /*
3279 * We need to save these SPRs after the treclaim so that the software
3280 * error code is recorded correctly in the TEXASR. Also the user may
3281 * change these outside of a transaction, so they must always be
3282 * context switched.
3283 */
Paul Mackerras681c6172018-03-21 21:32:03 +11003284 mfspr r7, SPRN_TEXASR
3285 std r7, VCPU_TEXASR(r9)
328611:
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003287 mfspr r5, SPRN_TFHAR
3288 mfspr r6, SPRN_TFIAR
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003289 std r5, VCPU_TFHAR(r9)
3290 std r6, VCPU_TFIAR(r9)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003291
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003292 addi r1, r1, PPC_MIN_STKFRM
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003293 ld r0, PPC_LR_STKOFF(r1)
3294 mtlr r0
3295 blr
3296
3297/*
3298 * Restore transactional state and TM-related registers.
3299 * Called with r4 pointing to the vcpu struct.
3300 * This potentially modifies all checkpointed registers.
3301 * It restores r1, r2, r4 from the PACA.
3302 */
3303kvmppc_restore_tm:
3304 mflr r0
3305 std r0, PPC_LR_STKOFF(r1)
3306
3307 /* Turn on TM/FP/VSX/VMX so we can restore them. */
3308 mfmsr r5
3309 li r6, MSR_TM >> 32
3310 sldi r6, r6, 32
3311 or r5, r5, r6
3312 ori r5, r5, MSR_FP
3313 oris r5, r5, (MSR_VEC | MSR_VSX)@h
3314 mtmsrd r5
3315
3316 /*
3317 * The user may change these outside of a transaction, so they must
3318 * always be context switched.
3319 */
3320 ld r5, VCPU_TFHAR(r4)
3321 ld r6, VCPU_TFIAR(r4)
3322 ld r7, VCPU_TEXASR(r4)
3323 mtspr SPRN_TFHAR, r5
3324 mtspr SPRN_TFIAR, r6
3325 mtspr SPRN_TEXASR, r7
3326
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003327 li r0, 0
3328 stb r0, HSTATE_FAKE_SUSPEND(r13)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003329 ld r5, VCPU_MSR(r4)
3330 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
3331 beqlr /* TM not active in guest */
3332 std r1, HSTATE_HOST_R1(r13)
3333
3334 /* Make sure the failure summary is set, otherwise we'll program check
3335 * when we trechkpt. It's possible that this might have been not set
3336 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
3337 * host.
3338 */
3339 oris r7, r7, (TEXASR_FS)@h
3340 mtspr SPRN_TEXASR, r7
3341
3342 /*
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003343 * If we are doing TM emulation for the guest on a POWER9 DD2,
3344 * then we don't actually do a trechkpt -- we either set up
3345 * fake-suspend mode, or emulate a TM rollback.
3346 */
3347BEGIN_FTR_SECTION
3348 b .Ldo_tm_fake_load
3349END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
3350
3351 /*
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003352 * We need to load up the checkpointed state for the guest.
3353 * We need to do this early as it will blow away any GPRs, VSRs and
3354 * some SPRs.
3355 */
3356
3357 mr r31, r4
3358 addi r3, r31, VCPU_FPRS_TM
3359 bl load_fp_state
3360 addi r3, r31, VCPU_VRS_TM
3361 bl load_vr_state
3362 mr r4, r31
3363 lwz r7, VCPU_VRSAVE_TM(r4)
3364 mtspr SPRN_VRSAVE, r7
3365
3366 ld r5, VCPU_LR_TM(r4)
3367 lwz r6, VCPU_CR_TM(r4)
3368 ld r7, VCPU_CTR_TM(r4)
3369 ld r8, VCPU_AMR_TM(r4)
3370 ld r9, VCPU_TAR_TM(r4)
Paul Mackerras0d808df2016-11-07 15:09:58 +11003371 ld r10, VCPU_XER_TM(r4)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003372 mtlr r5
3373 mtcr r6
3374 mtctr r7
3375 mtspr SPRN_AMR, r8
3376 mtspr SPRN_TAR, r9
Paul Mackerras0d808df2016-11-07 15:09:58 +11003377 mtxer r10
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003378
3379 /*
3380 * Load up PPR and DSCR values but don't put them in the actual SPRs
3381 * till the last moment to avoid running with userspace PPR and DSCR for
3382 * too long.
3383 */
3384 ld r29, VCPU_DSCR_TM(r4)
3385 ld r30, VCPU_PPR_TM(r4)
3386
3387 std r2, PACATMSCRATCH(r13) /* Save TOC */
3388
3389 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
3390 li r5, 0
3391 mtmsrd r5, 1
3392
3393 /* Load GPRs r0-r28 */
3394 reg = 0
3395 .rept 29
3396 ld reg, VCPU_GPRS_TM(reg)(r31)
3397 reg = reg + 1
3398 .endr
3399
3400 mtspr SPRN_DSCR, r29
3401 mtspr SPRN_PPR, r30
3402
3403 /* Load final GPRs */
3404 ld 29, VCPU_GPRS_TM(29)(r31)
3405 ld 30, VCPU_GPRS_TM(30)(r31)
3406 ld 31, VCPU_GPRS_TM(31)(r31)
3407
3408 /* TM checkpointed state is now setup. All GPRs are now volatile. */
3409 TRECHKPT
3410
3411 /* Now let's get back the state we need. */
3412 HMT_MEDIUM
3413 GET_PACA(r13)
3414 ld r29, HSTATE_DSCR(r13)
3415 mtspr SPRN_DSCR, r29
3416 ld r4, HSTATE_KVM_VCPU(r13)
3417 ld r1, HSTATE_HOST_R1(r13)
3418 ld r2, PACATMSCRATCH(r13)
3419
3420 /* Set the MSR RI since we have our registers back. */
3421 li r5, MSR_RI
3422 mtmsrd r5, 1
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +110034239:
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003424 ld r0, PPC_LR_STKOFF(r1)
3425 mtlr r0
3426 blr
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003427
3428.Ldo_tm_fake_load:
3429 cmpwi r5, 1 /* check for suspended state */
3430 bgt 10f
3431 stb r5, HSTATE_FAKE_SUSPEND(r13)
3432 b 9b /* and return */
343310: stdu r1, -PPC_MIN_STKFRM(r1)
3434 /* guest is in transactional state, so simulate rollback */
3435 mr r3, r4
3436 bl kvmhv_emulate_tm_rollback
3437 nop
3438 ld r4, HSTATE_KVM_VCPU(r13) /* our vcpu pointer has been trashed */
3439 addi r1, r1, PPC_MIN_STKFRM
3440 b 9b
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003441#endif
3442
Paul Mackerras44a3add2013-10-04 21:45:04 +10003443/*
3444 * We come here if we get any exception or interrupt while we are
3445 * executing host real mode code while in guest MMU context.
Paul Mackerras857b99e2017-09-01 16:17:27 +10003446 * r12 is (CR << 32) | vector
3447 * r13 points to our PACA
3448 * r12 is saved in HSTATE_SCRATCH0(r13)
3449 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE
3450 * r9 is saved in HSTATE_SCRATCH2(r13)
3451 * r13 is saved in HSPRG1
3452 * cfar is saved in HSTATE_CFAR(r13)
3453 * ppr is saved in HSTATE_PPR(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +10003454 */
3455kvmppc_bad_host_intr:
Paul Mackerras857b99e2017-09-01 16:17:27 +10003456 /*
3457 * Switch to the emergency stack, but start half-way down in
3458 * case we were already on it.
3459 */
3460 mr r9, r1
3461 std r1, PACAR1(r13)
3462 ld r1, PACAEMERGSP(r13)
3463 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
3464 std r9, 0(r1)
3465 std r0, GPR0(r1)
3466 std r9, GPR1(r1)
3467 std r2, GPR2(r1)
3468 SAVE_4GPRS(3, r1)
3469 SAVE_2GPRS(7, r1)
3470 srdi r0, r12, 32
3471 clrldi r12, r12, 32
3472 std r0, _CCR(r1)
3473 std r12, _TRAP(r1)
3474 andi. r0, r12, 2
3475 beq 1f
3476 mfspr r3, SPRN_HSRR0
3477 mfspr r4, SPRN_HSRR1
3478 mfspr r5, SPRN_HDAR
3479 mfspr r6, SPRN_HDSISR
3480 b 2f
34811: mfspr r3, SPRN_SRR0
3482 mfspr r4, SPRN_SRR1
3483 mfspr r5, SPRN_DAR
3484 mfspr r6, SPRN_DSISR
34852: std r3, _NIP(r1)
3486 std r4, _MSR(r1)
3487 std r5, _DAR(r1)
3488 std r6, _DSISR(r1)
3489 ld r9, HSTATE_SCRATCH2(r13)
3490 ld r12, HSTATE_SCRATCH0(r13)
3491 GET_SCRATCH0(r0)
3492 SAVE_4GPRS(9, r1)
3493 std r0, GPR13(r1)
3494 SAVE_NVGPRS(r1)
3495 ld r5, HSTATE_CFAR(r13)
3496 std r5, ORIG_GPR3(r1)
3497 mflr r3
3498#ifdef CONFIG_RELOCATABLE
3499 ld r4, HSTATE_SCRATCH1(r13)
3500#else
3501 mfctr r4
3502#endif
3503 mfxer r5
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +05303504 lbz r6, PACAIRQSOFTMASK(r13)
Paul Mackerras857b99e2017-09-01 16:17:27 +10003505 std r3, _LINK(r1)
3506 std r4, _CTR(r1)
3507 std r5, _XER(r1)
3508 std r6, SOFTE(r1)
3509 ld r2, PACATOC(r13)
3510 LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
3511 std r3, STACK_FRAME_OVERHEAD-16(r1)
3512
3513 /*
3514 * On POWER9 do a minimal restore of the MMU and call C code,
3515 * which will print a message and panic.
3516 * XXX On POWER7 and POWER8, we just spin here since we don't
3517 * know what the other threads are doing (and we don't want to
3518 * coordinate with them) - but at least we now have register state
3519 * in memory that we might be able to look at from another CPU.
3520 */
3521BEGIN_FTR_SECTION
Paul Mackerras44a3add2013-10-04 21:45:04 +10003522 b .
Paul Mackerras857b99e2017-09-01 16:17:27 +10003523END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
3524 ld r9, HSTATE_KVM_VCPU(r13)
3525 ld r10, VCPU_KVM(r9)
3526
3527 li r0, 0
3528 mtspr SPRN_AMR, r0
3529 mtspr SPRN_IAMR, r0
3530 mtspr SPRN_CIABR, r0
3531 mtspr SPRN_DAWRX, r0
3532
3533 /* Flush the ERAT on radix P9 DD1 guest exit */
3534BEGIN_FTR_SECTION
3535 PPC_INVALIDATE_ERAT
3536END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
3537
3538BEGIN_MMU_FTR_SECTION
3539 b 4f
3540END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3541
3542 slbmte r0, r0
3543 slbia
3544 ptesync
3545 ld r8, PACA_SLBSHADOWPTR(r13)
3546 .rept SLB_NUM_BOLTED
3547 li r3, SLBSHADOW_SAVEAREA
3548 LDX_BE r5, r8, r3
3549 addi r3, r3, 8
3550 LDX_BE r6, r8, r3
3551 andis. r7, r5, SLB_ESID_V@h
3552 beq 3f
3553 slbmte r6, r5
35543: addi r8, r8, 16
3555 .endr
3556
35574: lwz r7, KVM_HOST_LPID(r10)
3558 mtspr SPRN_LPID, r7
3559 mtspr SPRN_PID, r0
3560 ld r8, KVM_HOST_LPCR(r10)
3561 mtspr SPRN_LPCR, r8
3562 isync
3563 li r0, KVM_GUEST_MODE_NONE
3564 stb r0, HSTATE_IN_GUEST(r13)
3565
3566 /*
3567 * Turn on the MMU and jump to C code
3568 */
3569 bcl 20, 31, .+4
35705: mflr r3
3571 addi r3, r3, 9f - 5b
3572 ld r4, PACAKMSR(r13)
3573 mtspr SPRN_SRR0, r3
3574 mtspr SPRN_SRR1, r4
Nicholas Piggin222f20f2018-01-10 03:07:15 +11003575 RFI_TO_KERNEL
Paul Mackerras857b99e2017-09-01 16:17:27 +100035769: addi r3, r1, STACK_FRAME_OVERHEAD
3577 bl kvmppc_bad_interrupt
3578 b 9b
Michael Neulinge4e38122014-03-25 10:47:02 +11003579
3580/*
3581 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3582 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3583 * r11 has the guest MSR value (in/out)
3584 * r9 has a vcpu pointer (in)
3585 * r0 is used as a scratch register
3586 */
3587kvmppc_msr_interrupt:
3588 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3589 cmpwi r0, 2 /* Check if we are in transactional state.. */
3590 ld r11, VCPU_INTR_MSR(r9)
3591 bne 1f
3592 /* ... if transactional, change to suspended */
3593 li r0, 1
35941: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3595 blr
Paul Mackerras9bc01a92014-05-26 19:48:40 +10003596
3597/*
3598 * This works around a hardware bug on POWER8E processors, where
3599 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3600 * performance monitor interrupt. Instead, when we need to have
3601 * an interrupt pending, we have to arrange for a counter to overflow.
3602 */
3603kvmppc_fix_pmao:
3604 li r3, 0
3605 mtspr SPRN_MMCR2, r3
3606 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3607 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3608 mtspr SPRN_MMCR0, r3
3609 lis r3, 0x7fff
3610 ori r3, r3, 0xffff
3611 mtspr SPRN_PMC6, r3
3612 isync
3613 blr
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003614
3615#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3616/*
3617 * Start timing an activity
3618 * r3 = pointer to time accumulation struct, r4 = vcpu
3619 */
3620kvmhv_start_timing:
3621 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10003622 ld r6, VCORE_TB_OFFSET_APPL(r5)
3623 mftb r5
3624 subf r5, r6, r5 /* subtract current timebase offset */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003625 std r3, VCPU_CUR_ACTIVITY(r4)
3626 std r5, VCPU_ACTIVITY_START(r4)
3627 blr
3628
3629/*
3630 * Accumulate time to one activity and start another.
3631 * r3 = pointer to new time accumulation struct, r4 = vcpu
3632 */
3633kvmhv_accumulate_time:
3634 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10003635 ld r8, VCORE_TB_OFFSET_APPL(r5)
3636 ld r5, VCPU_CUR_ACTIVITY(r4)
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003637 ld r6, VCPU_ACTIVITY_START(r4)
3638 std r3, VCPU_CUR_ACTIVITY(r4)
3639 mftb r7
Paul Mackerras57b8daa2018-04-20 22:51:11 +10003640 subf r7, r8, r7 /* subtract current timebase offset */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003641 std r7, VCPU_ACTIVITY_START(r4)
3642 cmpdi r5, 0
3643 beqlr
3644 subf r3, r6, r7
3645 ld r8, TAS_SEQCOUNT(r5)
3646 cmpdi r8, 0
3647 addi r8, r8, 1
3648 std r8, TAS_SEQCOUNT(r5)
3649 lwsync
3650 ld r7, TAS_TOTAL(r5)
3651 add r7, r7, r3
3652 std r7, TAS_TOTAL(r5)
3653 ld r6, TAS_MIN(r5)
3654 ld r7, TAS_MAX(r5)
3655 beq 3f
3656 cmpd r3, r6
3657 bge 1f
36583: std r3, TAS_MIN(r5)
36591: cmpd r3, r7
3660 ble 2f
3661 std r3, TAS_MAX(r5)
36622: lwsync
3663 addi r8, r8, 1
3664 std r8, TAS_SEQCOUNT(r5)
3665 blr
3666#endif