blob: 6752da132ba6be9f82b97a15b9d8c963b75fafc5 [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100023#include <asm/mmu.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000024#include <asm/page.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100025#include <asm/ptrace.h>
26#include <asm/hvcall.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000027#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
Paul Mackerrasf0888f72012-02-03 00:54:17 +000029#include <asm/kvm_book3s_asm.h>
Aneesh Kumar K.Vf64e8082016-03-01 12:59:20 +053030#include <asm/book3s/64/mmu-hash.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110031#include <asm/tm.h>
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +053032#include <asm/opal.h>
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100033#include <asm/xive-regs.h>
Paul Mackerras857b99e2017-09-01 16:17:27 +100034#include <asm/thread_info.h>
Christophe Leroyec0c4642018-07-05 16:24:57 +000035#include <asm/asm-compat.h>
Christophe Leroy2c86cd12018-07-05 16:25:01 +000036#include <asm/feature-fixups.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110037
Paul Mackerras2f272462017-05-22 16:25:14 +100038/* Sign-extend HDEC if not on POWER9 */
39#define EXTEND_HDEC(reg) \
40BEGIN_FTR_SECTION; \
41 extsw reg, reg; \
42END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
43
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110044/* Values in HSTATE_NAPPING(r13) */
45#define NAPPING_CEDE 1
46#define NAPPING_NOVCPU 2
47
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100048/* Stack frame offsets for kvmppc_hv_entry */
Paul Mackerras769377f2017-02-15 14:30:17 +110049#define SFS 160
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100050#define STACK_SLOT_TRAP (SFS-4)
51#define STACK_SLOT_TID (SFS-16)
52#define STACK_SLOT_PSSCR (SFS-24)
53#define STACK_SLOT_PID (SFS-32)
54#define STACK_SLOT_IAMR (SFS-40)
55#define STACK_SLOT_CIABR (SFS-48)
56#define STACK_SLOT_DAWR (SFS-56)
57#define STACK_SLOT_DAWRX (SFS-64)
Paul Mackerras769377f2017-02-15 14:30:17 +110058#define STACK_SLOT_HFSCR (SFS-72)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100059
Paul Mackerrasde56a942011-06-29 00:21:34 +000060/*
Paul Mackerras19ccb762011-07-23 17:42:46 +100061 * Call kvmppc_hv_entry in real mode.
Paul Mackerrasde56a942011-06-29 00:21:34 +000062 * Must be called with interrupts hard-disabled.
63 *
64 * Input Registers:
65 *
66 * LR = return address to continue at after eventually re-enabling MMU
67 */
Anton Blanchard6ed179b2014-06-12 18:16:53 +100068_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
Paul Mackerras218309b2013-09-06 13:23:44 +100069 mflr r0
70 std r0, PPC_LR_STKOFF(r1)
71 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +000072 mfmsr r10
Paul Mackerras8b24e692017-06-26 15:45:51 +100073 std r10, HSTATE_HOST_MSR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100074 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
Paul Mackerrasde56a942011-06-29 00:21:34 +000075 li r0,MSR_RI
76 andc r0,r10,r0
77 li r6,MSR_IR | MSR_DR
78 andc r6,r10,r6
79 mtmsrd r0,1 /* clear RI in MSR */
80 mtsrr0 r5
81 mtsrr1 r6
Nicholas Piggin222f20f2018-01-10 03:07:15 +110082 RFI_TO_KERNEL
Paul Mackerrasde56a942011-06-29 00:21:34 +000083
Paul Mackerras218309b2013-09-06 13:23:44 +100084kvmppc_call_hv_entry:
Paul Mackerrasc0101502017-10-19 14:11:23 +110085BEGIN_FTR_SECTION
86 /* On P9, do LPCR setting, if necessary */
87 ld r3, HSTATE_SPLIT_MODE(r13)
88 cmpdi r3, 0
89 beq 46f
90 lwz r4, KVM_SPLIT_DO_SET(r3)
91 cmpwi r4, 0
92 beq 46f
93 bl kvmhv_p9_set_lpcr
94 nop
9546:
96END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
97
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110098 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100099 bl kvmppc_hv_entry
100
101 /* Back from guest - restore host state and return to caller */
102
Michael Neulingeee7ff92014-01-08 21:25:19 +1100103BEGIN_FTR_SECTION
Paul Mackerras218309b2013-09-06 13:23:44 +1000104 /* Restore host DABR and DABRX */
105 ld r5,HSTATE_DABR(r13)
106 li r6,7
107 mtspr SPRN_DABR,r5
108 mtspr SPRN_DABRX,r6
Michael Neulingeee7ff92014-01-08 21:25:19 +1100109END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +1000110
111 /* Restore SPRG3 */
Scott Wood9d378df2014-03-10 17:29:38 -0500112 ld r3,PACA_SPRG_VDSO(r13)
113 mtspr SPRN_SPRG_VDSO_WRITE,r3
Paul Mackerras218309b2013-09-06 13:23:44 +1000114
Paul Mackerras218309b2013-09-06 13:23:44 +1000115 /* Reload the host's PMU registers */
Nicholas Piggin8e0b634b2018-02-14 01:08:11 +1000116 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
Paul Mackerras218309b2013-09-06 13:23:44 +1000117 cmpwi r4, 0
118 beq 23f /* skip if not */
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000119BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000120 ld r3, HSTATE_MMCR0(r13)
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000121 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
122 cmpwi r4, MMCR0_PMAO
123 beql kvmppc_fix_pmao
124END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000125 lwz r3, HSTATE_PMC1(r13)
126 lwz r4, HSTATE_PMC2(r13)
127 lwz r5, HSTATE_PMC3(r13)
128 lwz r6, HSTATE_PMC4(r13)
129 lwz r8, HSTATE_PMC5(r13)
130 lwz r9, HSTATE_PMC6(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000131 mtspr SPRN_PMC1, r3
132 mtspr SPRN_PMC2, r4
133 mtspr SPRN_PMC3, r5
134 mtspr SPRN_PMC4, r6
135 mtspr SPRN_PMC5, r8
136 mtspr SPRN_PMC6, r9
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000137 ld r3, HSTATE_MMCR0(r13)
138 ld r4, HSTATE_MMCR1(r13)
139 ld r5, HSTATE_MMCRA(r13)
140 ld r6, HSTATE_SIAR(r13)
141 ld r7, HSTATE_SDAR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000142 mtspr SPRN_MMCR1, r4
143 mtspr SPRN_MMCRA, r5
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100144 mtspr SPRN_SIAR, r6
145 mtspr SPRN_SDAR, r7
146BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000147 ld r8, HSTATE_MMCR2(r13)
148 ld r9, HSTATE_SIER(r13)
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100149 mtspr SPRN_MMCR2, r8
150 mtspr SPRN_SIER, r9
151END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +1000152 mtspr SPRN_MMCR0, r3
153 isync
15423:
155
156 /*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100157 * Reload DEC. HDEC interrupts were disabled when
158 * we reloaded the host's LPCR value.
159 */
160 ld r3, HSTATE_DECEXP(r13)
161 mftb r4
162 subf r4, r4, r3
163 mtspr SPRN_DEC, r4
164
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000165 /* hwthread_req may have got set by cede or no vcpu, so clear it */
166 li r0, 0
167 stb r0, HSTATE_HWTHREAD_REQ(r13)
168
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100169 /*
Aravinda Prasade20bbd32017-05-11 16:33:37 +0530170 * For external interrupts we need to call the Linux
171 * handler to process the interrupt. We do that by jumping
172 * to absolute address 0x500 for external interrupts.
173 * The [h]rfid at the end of the handler will return to
174 * the book3s_hv_interrupts.S code. For other interrupts
175 * we do the rfid to get back to the book3s_hv_interrupts.S
176 * code here.
Paul Mackerras218309b2013-09-06 13:23:44 +1000177 */
178 ld r8, 112+PPC_LR_STKOFF(r1)
179 addi r1, r1, 112
180 ld r7, HSTATE_HOST_MSR(r13)
181
Paul Mackerras8b24e692017-06-26 15:45:51 +1000182 /* Return the trap number on this thread as the return value */
183 mr r3, r12
184
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100185 /*
186 * If we came back from the guest via a relocation-on interrupt,
187 * we will be in virtual mode at this point, which makes it a
188 * little easier to get back to the caller.
189 */
190 mfmsr r0
191 andi. r0, r0, MSR_IR /* in real mode? */
192 bne .Lvirt_return
193
Paul Mackerras8b24e692017-06-26 15:45:51 +1000194 /* RFI into the highmem handler */
Paul Mackerras218309b2013-09-06 13:23:44 +1000195 mfmsr r6
196 li r0, MSR_RI
197 andc r6, r6, r0
198 mtmsrd r6, 1 /* Clear RI in MSR */
199 mtsrr0 r8
200 mtsrr1 r7
Nicholas Piggin222f20f2018-01-10 03:07:15 +1100201 RFI_TO_KERNEL
Paul Mackerras218309b2013-09-06 13:23:44 +1000202
Paul Mackerras8b24e692017-06-26 15:45:51 +1000203 /* Virtual-mode return */
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100204.Lvirt_return:
Paul Mackerras8b24e692017-06-26 15:45:51 +1000205 mtlr r8
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100206 blr
207
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100208kvmppc_primary_no_guest:
209 /* We handle this much like a ceded vcpu */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100210 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
Paul Mackerras2f272462017-05-22 16:25:14 +1000211 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
212 /* HDEC value came from DEC in the first place, it will fit */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100213 mfspr r3, SPRN_HDEC
214 mtspr SPRN_DEC, r3
Paul Mackerras6af27c82015-03-28 14:21:10 +1100215 /*
216 * Make sure the primary has finished the MMU switch.
217 * We should never get here on a secondary thread, but
218 * check it for robustness' sake.
219 */
220 ld r5, HSTATE_KVM_VCORE(r13)
22165: lbz r0, VCORE_IN_GUEST(r5)
222 cmpwi r0, 0
223 beq 65b
224 /* Set LPCR. */
225 ld r8,VCORE_LPCR(r5)
226 mtspr SPRN_LPCR,r8
227 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100228 /* set our bit in napping_threads */
229 ld r5, HSTATE_KVM_VCORE(r13)
230 lbz r7, HSTATE_PTID(r13)
231 li r0, 1
232 sld r0, r0, r7
233 addi r6, r5, VCORE_NAPPING_THREADS
2341: lwarx r3, 0, r6
235 or r3, r3, r0
236 stwcx. r3, 0, r6
237 bne 1b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100238 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100239 isync
240 li r12, 0
241 lwz r7, VCORE_ENTRY_EXIT(r5)
242 cmpwi r7, 0x100
243 bge kvm_novcpu_exit /* another thread already exiting */
244 li r3, NAPPING_NOVCPU
245 stb r3, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100246
Paul Mackerrasccc07772015-03-28 14:21:07 +1100247 li r3, 0 /* Don't wake on privileged (OS) doorbell */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100248 b kvm_do_nap
249
Suresh Warrier37f55d32016-08-19 15:35:46 +1000250/*
251 * kvm_novcpu_wakeup
252 * Entered from kvm_start_guest if kvm_hstate.napping is set
253 * to NAPPING_NOVCPU
254 * r2 = kernel TOC
255 * r13 = paca
256 */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100257kvm_novcpu_wakeup:
258 ld r1, HSTATE_HOST_R1(r13)
259 ld r5, HSTATE_KVM_VCORE(r13)
260 li r0, 0
261 stb r0, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100262
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100263 /* check the wake reason */
264 bl kvmppc_check_wake_reason
Paul Mackerras6af27c82015-03-28 14:21:10 +1100265
Suresh Warrier37f55d32016-08-19 15:35:46 +1000266 /*
267 * Restore volatile registers since we could have called
268 * a C routine in kvmppc_check_wake_reason.
269 * r5 = VCORE
270 */
271 ld r5, HSTATE_KVM_VCORE(r13)
272
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100273 /* see if any other thread is already exiting */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100274 lwz r0, VCORE_ENTRY_EXIT(r5)
275 cmpwi r0, 0x100
276 bge kvm_novcpu_exit
277
278 /* clear our bit in napping_threads */
279 lbz r7, HSTATE_PTID(r13)
280 li r0, 1
281 sld r0, r0, r7
282 addi r6, r5, VCORE_NAPPING_THREADS
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002834: lwarx r7, 0, r6
284 andc r7, r7, r0
285 stwcx. r7, 0, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100286 bne 4b
287
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100288 /* See if the wake reason means we need to exit */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100289 cmpdi r3, 0
290 bge kvm_novcpu_exit
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100291
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100292 /* See if our timeslice has expired (HDEC is negative) */
293 mfspr r0, SPRN_HDEC
Paul Mackerras2f272462017-05-22 16:25:14 +1000294 EXTEND_HDEC(r0)
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100295 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerras2f272462017-05-22 16:25:14 +1000296 cmpdi r0, 0
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100297 blt kvm_novcpu_exit
298
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100299 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
300 ld r4, HSTATE_KVM_VCPU(r13)
301 cmpdi r4, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100302 beq kvmppc_primary_no_guest
303
304#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
305 addi r3, r4, VCPU_TB_RMENTRY
306 bl kvmhv_start_timing
307#endif
308 b kvmppc_got_guest
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100309
310kvm_novcpu_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100311#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
312 ld r4, HSTATE_KVM_VCPU(r13)
313 cmpdi r4, 0
314 beq 13f
315 addi r3, r4, VCPU_TB_RMEXIT
316 bl kvmhv_accumulate_time
317#endif
Paul Mackerraseddb60f2015-03-28 14:21:11 +110031813: mr r3, r12
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000319 stw r12, STACK_SLOT_TRAP(r1)
Paul Mackerraseddb60f2015-03-28 14:21:11 +1100320 bl kvmhv_commence_exit
321 nop
Paul Mackerras6af27c82015-03-28 14:21:10 +1100322 b kvmhv_switch_to_host
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100323
Paul Mackerras371fefd2011-06-29 00:23:08 +0000324/*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100325 * We come in here when wakened from nap mode.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000326 * Relocation is off and most register values are lost.
327 * r13 points to the PACA.
Nicholas Piggin9d292502017-06-13 23:05:51 +1000328 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000329 */
330 .globl kvm_start_guest
331kvm_start_guest:
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530332 /* Set runlatch bit the minute you wake up from nap */
Paul Mackerras1f09c3e2015-03-28 14:21:04 +1100333 mfspr r0, SPRN_CTRLF
334 ori r0, r0, 1
335 mtspr SPRN_CTRLT, r0
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530336
Nicholas Piggin9d292502017-06-13 23:05:51 +1000337 /*
338 * Could avoid this and pass it through in r3. For now,
339 * code expects it to be in SRR1.
340 */
341 mtspr SPRN_SRR1,r3
342
Paul Mackerras19ccb762011-07-23 17:42:46 +1000343 ld r2,PACATOC(r13)
344
Naveen N. Raoa4bc64d2018-04-19 12:34:05 +0530345 li r0,0
346 stb r0,PACA_FTRACE_ENABLED(r13)
347
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000348 li r0,KVM_HWTHREAD_IN_KVM
349 stb r0,HSTATE_HWTHREAD_STATE(r13)
350
351 /* NV GPR values from power7_idle() will no longer be valid */
352 li r0,1
353 stb r0,PACA_NAPSTATELOST(r13)
354
Paul Mackerras4619ac82013-04-17 20:31:41 +0000355 /* were we napping due to cede? */
356 lbz r0,HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100357 cmpwi r0,NAPPING_CEDE
358 beq kvm_end_cede
359 cmpwi r0,NAPPING_NOVCPU
360 beq kvm_novcpu_wakeup
361
362 ld r1,PACAEMERGSP(r13)
363 subi r1,r1,STACK_FRAME_OVERHEAD
Paul Mackerras4619ac82013-04-17 20:31:41 +0000364
365 /*
366 * We weren't napping due to cede, so this must be a secondary
367 * thread being woken up to run a guest, or being woken up due
368 * to a stray IPI. (Or due to some machine check or hypervisor
369 * maintenance interrupt while the core is in KVM.)
370 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000371
372 /* Check the wake reason in SRR1 to see why we got here */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100373 bl kvmppc_check_wake_reason
Suresh Warrier37f55d32016-08-19 15:35:46 +1000374 /*
375 * kvmppc_check_wake_reason could invoke a C routine, but we
376 * have no volatile registers to restore when we return.
377 */
378
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100379 cmpdi r3, 0
380 bge kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000381
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000382 /* get vcore pointer, NULL if we have nothing to run */
383 ld r5,HSTATE_KVM_VCORE(r13)
384 cmpdi r5,0
385 /* if we have no vcore to run, go back to sleep */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000386 beq kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000387
Paul Mackerras56548fc2014-12-03 14:48:40 +1100388kvm_secondary_got_guest:
389
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100390 /* Set HSTATE_DSCR(r13) to something sensible */
Anshuman Khandual1db36522015-05-21 12:13:03 +0530391 ld r6, PACA_DSCR_DEFAULT(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100392 std r6, HSTATE_DSCR(r13)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000393
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000394 /* On thread 0 of a subcore, set HDEC to max */
395 lbz r4, HSTATE_PTID(r13)
396 cmpwi r4, 0
397 bne 63f
Paul Mackerras2f272462017-05-22 16:25:14 +1000398 LOAD_REG_ADDR(r6, decrementer_max)
399 ld r6, 0(r6)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000400 mtspr SPRN_HDEC, r6
401 /* and set per-LPAR registers, if doing dynamic micro-threading */
402 ld r6, HSTATE_SPLIT_MODE(r13)
403 cmpdi r6, 0
404 beq 63f
Paul Mackerrasc0101502017-10-19 14:11:23 +1100405BEGIN_FTR_SECTION
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000406 ld r0, KVM_SPLIT_RPR(r6)
407 mtspr SPRN_RPR, r0
408 ld r0, KVM_SPLIT_PMMAR(r6)
409 mtspr SPRN_PMMAR, r0
410 ld r0, KVM_SPLIT_LDBAR(r6)
411 mtspr SPRN_LDBAR, r0
412 isync
Paul Mackerrasc0101502017-10-19 14:11:23 +1100413FTR_SECTION_ELSE
414 /* On P9 we use the split_info for coordinating LPCR changes */
415 lwz r4, KVM_SPLIT_DO_SET(r6)
416 cmpwi r4, 0
Alexander Grafd20fe502018-02-08 18:38:53 +0100417 beq 1f
Paul Mackerrasc0101502017-10-19 14:11:23 +1100418 mr r3, r6
419 bl kvmhv_p9_set_lpcr
420 nop
Alexander Grafd20fe502018-02-08 18:38:53 +01004211:
Paul Mackerrasc0101502017-10-19 14:11:23 +1100422ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasb4deba52015-07-02 20:38:16 +100042363:
424 /* Order load of vcpu after load of vcore */
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100425 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000426 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100427 bl kvmppc_hv_entry
Paul Mackerras218309b2013-09-06 13:23:44 +1000428
429 /* Back from the guest, go back to nap */
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000430 /* Clear our vcpu and vcore pointers so we don't come back in early */
Paul Mackerras218309b2013-09-06 13:23:44 +1000431 li r0, 0
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000432 std r0, HSTATE_KVM_VCPU(r13)
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100433 /*
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000434 * Once we clear HSTATE_KVM_VCORE(r13), the code in
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100435 * kvmppc_run_core() is going to assume that all our vcpu
436 * state is visible in memory. This lwsync makes sure
437 * that that is true.
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100438 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000439 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000440 std r0, HSTATE_KVM_VCORE(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000441
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530442 /*
443 * All secondaries exiting guest will fall through this path.
444 * Before proceeding, just check for HMI interrupt and
445 * invoke opal hmi handler. By now we are sure that the
446 * primary thread on this core/subcore has already made partition
447 * switch/TB resync and we are good to call opal hmi handler.
448 */
449 cmpwi r12, BOOK3S_INTERRUPT_HMI
450 bne kvm_no_guest
451
452 li r3,0 /* NULL argument */
453 bl hmi_exception_realmode
Paul Mackerras56548fc2014-12-03 14:48:40 +1100454/*
455 * At this point we have finished executing in the guest.
456 * We need to wait for hwthread_req to become zero, since
457 * we may not turn on the MMU while hwthread_req is non-zero.
458 * While waiting we also need to check if we get given a vcpu to run.
459 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000460kvm_no_guest:
Paul Mackerras56548fc2014-12-03 14:48:40 +1100461 lbz r3, HSTATE_HWTHREAD_REQ(r13)
462 cmpwi r3, 0
463 bne 53f
464 HMT_MEDIUM
465 li r0, KVM_HWTHREAD_IN_KERNEL
Paul Mackerras218309b2013-09-06 13:23:44 +1000466 stb r0, HSTATE_HWTHREAD_STATE(r13)
Paul Mackerras56548fc2014-12-03 14:48:40 +1100467 /* need to recheck hwthread_req after a barrier, to avoid race */
468 sync
469 lbz r3, HSTATE_HWTHREAD_REQ(r13)
470 cmpwi r3, 0
471 bne 54f
472/*
Shreyas B. Prabhu5fa6b6b2016-07-08 11:50:46 +0530473 * We jump to pnv_wakeup_loss, which will return to the caller
Paul Mackerras56548fc2014-12-03 14:48:40 +1100474 * of power7_nap in the powernv cpu offline loop. The value we
Nicholas Piggin9d292502017-06-13 23:05:51 +1000475 * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss
476 * requires SRR1 in r12.
Paul Mackerras56548fc2014-12-03 14:48:40 +1100477 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000478 li r3, LPCR_PECE0
479 mfspr r4, SPRN_LPCR
480 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
481 mtspr SPRN_LPCR, r4
Paul Mackerras56548fc2014-12-03 14:48:40 +1100482 li r3, 0
Nicholas Piggin9d292502017-06-13 23:05:51 +1000483 mfspr r12,SPRN_SRR1
Shreyas B. Prabhu5fa6b6b2016-07-08 11:50:46 +0530484 b pnv_wakeup_loss
Paul Mackerras56548fc2014-12-03 14:48:40 +1100485
48653: HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000487 ld r5, HSTATE_KVM_VCORE(r13)
488 cmpdi r5, 0
489 bne 60f
490 ld r3, HSTATE_SPLIT_MODE(r13)
491 cmpdi r3, 0
492 beq kvm_no_guest
Paul Mackerrasc0101502017-10-19 14:11:23 +1100493 lwz r0, KVM_SPLIT_DO_SET(r3)
494 cmpwi r0, 0
495 bne kvmhv_do_set
496 lwz r0, KVM_SPLIT_DO_RESTORE(r3)
497 cmpwi r0, 0
498 bne kvmhv_do_restore
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000499 lbz r0, KVM_SPLIT_DO_NAP(r3)
500 cmpwi r0, 0
Paul Mackerras56548fc2014-12-03 14:48:40 +1100501 beq kvm_no_guest
502 HMT_MEDIUM
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000503 b kvm_unsplit_nap
50460: HMT_MEDIUM
Paul Mackerras56548fc2014-12-03 14:48:40 +1100505 b kvm_secondary_got_guest
506
50754: li r0, KVM_HWTHREAD_IN_KVM
508 stb r0, HSTATE_HWTHREAD_STATE(r13)
509 b kvm_no_guest
Paul Mackerras218309b2013-09-06 13:23:44 +1000510
Paul Mackerrasc0101502017-10-19 14:11:23 +1100511kvmhv_do_set:
512 /* Set LPCR, LPIDR etc. on P9 */
513 HMT_MEDIUM
514 bl kvmhv_p9_set_lpcr
515 nop
516 b kvm_no_guest
517
518kvmhv_do_restore:
519 HMT_MEDIUM
520 bl kvmhv_p9_restore_lpcr
521 nop
522 b kvm_no_guest
523
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000524/*
525 * Here the primary thread is trying to return the core to
526 * whole-core mode, so we need to nap.
527 */
528kvm_unsplit_nap:
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530529 /*
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530530 * When secondaries are napping in kvm_unsplit_nap() with
531 * hwthread_req = 1, HMI goes ignored even though subcores are
532 * already exited the guest. Hence HMI keeps waking up secondaries
533 * from nap in a loop and secondaries always go back to nap since
534 * no vcore is assigned to them. This makes impossible for primary
535 * thread to get hold of secondary threads resulting into a soft
536 * lockup in KVM path.
537 *
538 * Let us check if HMI is pending and handle it before we go to nap.
539 */
540 cmpwi r12, BOOK3S_INTERRUPT_HMI
541 bne 55f
542 li r3, 0 /* NULL argument */
543 bl hmi_exception_realmode
54455:
545 /*
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530546 * Ensure that secondary doesn't nap when it has
547 * its vcore pointer set.
548 */
549 sync /* matches smp_mb() before setting split_info.do_nap */
550 ld r0, HSTATE_KVM_VCORE(r13)
551 cmpdi r0, 0
552 bne kvm_no_guest
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000553 /* clear any pending message */
554BEGIN_FTR_SECTION
555 lis r6, (PPC_DBELL_SERVER << (63-36))@h
556 PPC_MSGCLR(6)
557END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
558 /* Set kvm_split_mode.napped[tid] = 1 */
559 ld r3, HSTATE_SPLIT_MODE(r13)
560 li r0, 1
Paul Mackerrasc0101502017-10-19 14:11:23 +1100561 lbz r4, HSTATE_TID(r13)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000562 addi r4, r4, KVM_SPLIT_NAPPED
563 stbx r0, r3, r4
564 /* Check the do_nap flag again after setting napped[] */
565 sync
566 lbz r0, KVM_SPLIT_DO_NAP(r3)
567 cmpwi r0, 0
568 beq 57f
569 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
Paul Mackerrasbf53c882016-11-18 14:34:07 +1100570 mfspr r5, SPRN_LPCR
571 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
572 b kvm_nap_sequence
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000573
57457: li r0, 0
575 stbx r0, r3, r4
576 b kvm_no_guest
577
Paul Mackerras218309b2013-09-06 13:23:44 +1000578/******************************************************************************
579 * *
580 * Entry code *
581 * *
582 *****************************************************************************/
583
Paul Mackerrasde56a942011-06-29 00:21:34 +0000584.global kvmppc_hv_entry
585kvmppc_hv_entry:
586
587 /* Required state:
588 *
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100589 * R4 = vcpu pointer (or NULL)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000590 * MSR = ~IR|DR
591 * R13 = PACA
592 * R1 = host R1
Michael Neuling06a29e42014-08-19 14:59:30 +1000593 * R2 = TOC
Paul Mackerrasde56a942011-06-29 00:21:34 +0000594 * all other volatile GPRS = free
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100595 * Does not preserve non-volatile GPRs or CR fields
Paul Mackerrasde56a942011-06-29 00:21:34 +0000596 */
597 mflr r0
Paul Mackerras218309b2013-09-06 13:23:44 +1000598 std r0, PPC_LR_STKOFF(r1)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000599 stdu r1, -SFS(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000600
Paul Mackerrasde56a942011-06-29 00:21:34 +0000601 /* Save R1 in the PACA */
602 std r1, HSTATE_HOST_R1(r13)
603
Paul Mackerras44a3add2013-10-04 21:45:04 +1000604 li r6, KVM_GUEST_MODE_HOST_HV
605 stb r6, HSTATE_IN_GUEST(r13)
606
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100607#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
608 /* Store initial timestamp */
609 cmpdi r4, 0
610 beq 1f
611 addi r3, r4, VCPU_TB_RMENTRY
612 bl kvmhv_start_timing
6131:
614#endif
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100615
616 /* Use cr7 as an indication of radix mode */
617 ld r5, HSTATE_KVM_VCORE(r13)
618 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
619 lbz r0, KVM_RADIX(r9)
620 cmpwi cr7, r0, 0
621
Paul Mackerras9e368f22011-06-29 00:40:08 +0000622 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100623 * POWER7/POWER8 host -> guest partition switch code.
Paul Mackerras9e368f22011-06-29 00:40:08 +0000624 * We don't have to lock against concurrent tlbies,
625 * but we do have to coordinate across hardware threads.
626 */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100627 /* Set bit in entry map iff exit map is zero. */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100628 li r7, 1
629 lbz r6, HSTATE_PTID(r13)
630 sld r7, r7, r6
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100631 addi r8, r5, VCORE_ENTRY_EXIT
63221: lwarx r3, 0, r8
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100633 cmpwi r3, 0x100 /* any threads starting to exit? */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000634 bge secondary_too_late /* if so we're too late to the party */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100635 or r3, r3, r7
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100636 stwcx. r3, 0, r8
Paul Mackerras371fefd2011-06-29 00:23:08 +0000637 bne 21b
638
639 /* Primary thread switches to guest partition. */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000640 cmpwi r6,0
Paul Mackerras6af27c82015-03-28 14:21:10 +1100641 bne 10f
Nicholas Piggin9a4506e2018-05-17 17:06:29 +1000642
643 /* Radix has already switched LPID and flushed core TLB */
644 bne cr7, 22f
645
Paul Mackerrasde56a942011-06-29 00:21:34 +0000646 lwz r7,KVM_LPID(r9)
Paul Mackerras7a840842016-11-16 22:25:20 +1100647BEGIN_FTR_SECTION
648 ld r6,KVM_SDR1(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000649 li r0,LPID_RSVD /* switch to reserved LPID */
650 mtspr SPRN_LPID,r0
651 ptesync
652 mtspr SPRN_SDR1,r6 /* switch to partition page table */
Paul Mackerras7a840842016-11-16 22:25:20 +1100653END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000654 mtspr SPRN_LPID,r7
655 isync
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000656
Nicholas Piggin9a4506e2018-05-17 17:06:29 +1000657 /* See if we need to flush the TLB. Hash has to be done in RM */
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000658 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100659BEGIN_FTR_SECTION
660 /*
661 * On POWER9, individual threads can come in here, but the
662 * TLB is shared between the 4 threads in a core, hence
663 * invalidating on one thread invalidates for all.
664 * Thus we make all 4 threads use the same bit here.
665 */
666 clrrdi r6,r6,2
667END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000668 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
669 srdi r6,r6,6 /* doubleword number */
670 sldi r6,r6,3 /* address offset */
671 add r6,r6,r9
672 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100673 li r8,1
674 sld r8,r8,r7
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000675 ld r7,0(r6)
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100676 and. r7,r7,r8
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000677 beq 22f
Paul Mackerrasca252052014-01-08 21:25:22 +1100678 /* Flush the TLB of any entries for this LPID */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100679 lwz r0,KVM_TLB_SETS(r9)
680 mtctr r0
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000681 li r7,0x800 /* IS field = 0b10 */
682 ptesync
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100683 li r0,0 /* RS for P9 version of tlbiel */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +110068428: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000685 addi r7,r7,0x1000
686 bdnz 28b
Nicholas Piggin9a4506e2018-05-17 17:06:29 +1000687 ptesync
Paul Mackerrasa29ebea2017-01-30 21:21:50 +110068823: ldarx r7,0,r6 /* clear the bit after TLB flushed */
689 andc r7,r7,r8
690 stdcx. r7,0,r6
691 bne 23b
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000692
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000693 /* Add timebase offset onto timebase */
69422: ld r8,VCORE_TB_OFFSET(r5)
695 cmpdi r8,0
696 beq 37f
Paul Mackerras57b8daa2018-04-20 22:51:11 +1000697 std r8, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000698 mftb r6 /* current host timebase */
699 add r8,r8,r6
700 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
701 mftb r7 /* check if lower 24 bits overflowed */
702 clrldi r6,r6,40
703 clrldi r7,r7,40
704 cmpld r7,r6
705 bge 37f
706 addis r8,r8,0x100 /* if so, increment upper 40 bits */
707 mtspr SPRN_TBU40,r8
708
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000709 /* Load guest PCR value to select appropriate compat mode */
71037: ld r7, VCORE_PCR(r5)
711 cmpdi r7, 0
712 beq 38f
713 mtspr SPRN_PCR, r7
71438:
Michael Neulingb005255e2014-01-08 21:25:21 +1100715
716BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000717 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +1100718 ld r8, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000719 ld r7, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +1100720 mtspr SPRN_DPDES, r8
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000721 mtspr SPRN_VTB, r7
Michael Neulingb005255e2014-01-08 21:25:21 +1100722END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
723
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530724 /* Mark the subcore state as inside guest */
725 bl kvmppc_subcore_enter_guest
726 nop
727 ld r5, HSTATE_KVM_VCORE(r13)
728 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000729 li r0,1
Paul Mackerras371fefd2011-06-29 00:23:08 +0000730 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000731
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100732 /* Do we have a guest vcpu to run? */
Paul Mackerras6af27c82015-03-28 14:21:10 +110073310: cmpdi r4, 0
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100734 beq kvmppc_primary_no_guest
735kvmppc_got_guest:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100736 /* Increment yield count if they have a VPA */
737 ld r3, VCPU_VPA(r4)
738 cmpdi r3, 0
739 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +0200740 li r6, LPPACA_YIELDCOUNT
741 LWZX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100742 addi r5, r5, 1
Alexander Graf0865a582014-06-11 10:36:17 +0200743 STWX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100744 li r6, 1
745 stb r6, VCPU_VPA_DIRTY(r4)
74625:
747
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100748 /* Save purr/spurr */
749 mfspr r5,SPRN_PURR
750 mfspr r6,SPRN_SPURR
751 std r5,HSTATE_PURR(r13)
752 std r6,HSTATE_SPURR(r13)
753 ld r7,VCPU_PURR(r4)
754 ld r8,VCPU_SPURR(r4)
755 mtspr SPRN_PURR,r7
756 mtspr SPRN_SPURR,r8
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100757
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100758 /* Save host values of some registers */
759BEGIN_FTR_SECTION
760 mfspr r5, SPRN_TIDR
761 mfspr r6, SPRN_PSSCR
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100762 mfspr r7, SPRN_PID
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +1000763 mfspr r8, SPRN_IAMR
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100764 std r5, STACK_SLOT_TID(r1)
765 std r6, STACK_SLOT_PSSCR(r1)
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100766 std r7, STACK_SLOT_PID(r1)
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +1000767 std r8, STACK_SLOT_IAMR(r1)
Paul Mackerras769377f2017-02-15 14:30:17 +1100768 mfspr r5, SPRN_HFSCR
769 std r5, STACK_SLOT_HFSCR(r1)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100770END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000771BEGIN_FTR_SECTION
772 mfspr r5, SPRN_CIABR
773 mfspr r6, SPRN_DAWR
774 mfspr r7, SPRN_DAWRX
775 std r5, STACK_SLOT_CIABR(r1)
776 std r6, STACK_SLOT_DAWR(r1)
777 std r7, STACK_SLOT_DAWRX(r1)
778END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100779
Michael Neulingeee7ff92014-01-08 21:25:19 +1100780BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000781 /* Set partition DABR */
782 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
Paul Mackerras8563bf52014-01-08 21:25:29 +1100783 lwz r5,VCPU_DABRX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000784 ld r6,VCPU_DABR(r4)
785 mtspr SPRN_DABRX,r5
786 mtspr SPRN_DABR,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000787 isync
Michael Neulingeee7ff92014-01-08 21:25:19 +1100788END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000789
Michael Neulinge4e38122014-03-25 10:47:02 +1100790#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100791/*
792 * Branch around the call if both CPU_FTR_TM and
793 * CPU_FTR_P9_TM_HV_ASSIST are off.
794 */
Michael Neulinge4e38122014-03-25 10:47:02 +1100795BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100796 b 91f
797END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +1000798 /*
799 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
800 */
Simon Guo6f597c62018-05-23 15:01:48 +0800801 mr r3, r4
802 ld r4, VCPU_MSR(r3)
Paul Mackerras7b0e8272018-05-30 20:07:52 +1000803 bl kvmppc_restore_tm_hv
Simon Guo6f597c62018-05-23 15:01:48 +0800804 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +110080591:
Michael Neulinge4e38122014-03-25 10:47:02 +1100806#endif
807
Paul Mackerrasde56a942011-06-29 00:21:34 +0000808 /* Load guest PMU registers */
809 /* R4 is live here (vcpu pointer) */
810 li r3, 1
811 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
812 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
813 isync
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000814BEGIN_FTR_SECTION
815 ld r3, VCPU_MMCR(r4)
816 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
817 cmpwi r5, MMCR0_PMAO
818 beql kvmppc_fix_pmao
819END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000820 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
821 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
822 lwz r6, VCPU_PMC + 8(r4)
823 lwz r7, VCPU_PMC + 12(r4)
824 lwz r8, VCPU_PMC + 16(r4)
825 lwz r9, VCPU_PMC + 20(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000826 mtspr SPRN_PMC1, r3
827 mtspr SPRN_PMC2, r5
828 mtspr SPRN_PMC3, r6
829 mtspr SPRN_PMC4, r7
830 mtspr SPRN_PMC5, r8
831 mtspr SPRN_PMC6, r9
Paul Mackerrasde56a942011-06-29 00:21:34 +0000832 ld r3, VCPU_MMCR(r4)
833 ld r5, VCPU_MMCR + 8(r4)
834 ld r6, VCPU_MMCR + 16(r4)
835 ld r7, VCPU_SIAR(r4)
836 ld r8, VCPU_SDAR(r4)
837 mtspr SPRN_MMCR1, r5
838 mtspr SPRN_MMCRA, r6
839 mtspr SPRN_SIAR, r7
840 mtspr SPRN_SDAR, r8
Michael Neulingb005255e2014-01-08 21:25:21 +1100841BEGIN_FTR_SECTION
842 ld r5, VCPU_MMCR + 24(r4)
843 ld r6, VCPU_SIER(r4)
Paul Mackerras83677f52016-11-16 22:33:27 +1100844 mtspr SPRN_MMCR2, r5
845 mtspr SPRN_SIER, r6
846BEGIN_FTR_SECTION_NESTED(96)
Michael Neulingb005255e2014-01-08 21:25:21 +1100847 lwz r7, VCPU_PMC + 24(r4)
848 lwz r8, VCPU_PMC + 28(r4)
849 ld r9, VCPU_MMCR + 32(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100850 mtspr SPRN_SPMC1, r7
851 mtspr SPRN_SPMC2, r8
852 mtspr SPRN_MMCRS, r9
Paul Mackerras83677f52016-11-16 22:33:27 +1100853END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
Michael Neulingb005255e2014-01-08 21:25:21 +1100854END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000855 mtspr SPRN_MMCR0, r3
856 isync
857
858 /* Load up FP, VMX and VSX registers */
859 bl kvmppc_load_fp
860
861 ld r14, VCPU_GPR(R14)(r4)
862 ld r15, VCPU_GPR(R15)(r4)
863 ld r16, VCPU_GPR(R16)(r4)
864 ld r17, VCPU_GPR(R17)(r4)
865 ld r18, VCPU_GPR(R18)(r4)
866 ld r19, VCPU_GPR(R19)(r4)
867 ld r20, VCPU_GPR(R20)(r4)
868 ld r21, VCPU_GPR(R21)(r4)
869 ld r22, VCPU_GPR(R22)(r4)
870 ld r23, VCPU_GPR(R23)(r4)
871 ld r24, VCPU_GPR(R24)(r4)
872 ld r25, VCPU_GPR(R25)(r4)
873 ld r26, VCPU_GPR(R26)(r4)
874 ld r27, VCPU_GPR(R27)(r4)
875 ld r28, VCPU_GPR(R28)(r4)
876 ld r29, VCPU_GPR(R29)(r4)
877 ld r30, VCPU_GPR(R30)(r4)
878 ld r31, VCPU_GPR(R31)(r4)
879
Paul Mackerrasde56a942011-06-29 00:21:34 +0000880 /* Switch DSCR to guest value */
881 ld r5, VCPU_DSCR(r4)
882 mtspr SPRN_DSCR, r5
Paul Mackerrasde56a942011-06-29 00:21:34 +0000883
Michael Neulingb005255e2014-01-08 21:25:21 +1100884BEGIN_FTR_SECTION
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100885 /* Skip next section on POWER7 */
Michael Neulingb005255e2014-01-08 21:25:21 +1100886 b 8f
887END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +1100888 /* Load up POWER8-specific registers */
889 ld r5, VCPU_IAMR(r4)
890 lwz r6, VCPU_PSPB(r4)
891 ld r7, VCPU_FSCR(r4)
892 mtspr SPRN_IAMR, r5
893 mtspr SPRN_PSPB, r6
894 mtspr SPRN_FSCR, r7
895 ld r5, VCPU_DAWR(r4)
896 ld r6, VCPU_DAWRX(r4)
897 ld r7, VCPU_CIABR(r4)
898 ld r8, VCPU_TAR(r4)
Michael Neulingb53221e2018-03-27 15:37:22 +1100899 /*
900 * Handle broken DAWR case by not writing it. This means we
901 * can still store the DAWR register for migration.
902 */
903BEGIN_FTR_SECTION
Michael Neulingb005255e2014-01-08 21:25:21 +1100904 mtspr SPRN_DAWR, r5
905 mtspr SPRN_DAWRX, r6
Michael Neulingb53221e2018-03-27 15:37:22 +1100906END_FTR_SECTION_IFSET(CPU_FTR_DAWR)
Michael Neulingb005255e2014-01-08 21:25:21 +1100907 mtspr SPRN_CIABR, r7
908 mtspr SPRN_TAR, r8
909 ld r5, VCPU_IC(r4)
Michael Neuling7b490412014-01-08 21:25:32 +1100910 ld r8, VCPU_EBBHR(r4)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000911 mtspr SPRN_IC, r5
Michael Neulingb005255e2014-01-08 21:25:21 +1100912 mtspr SPRN_EBBHR, r8
913 ld r5, VCPU_EBBRR(r4)
914 ld r6, VCPU_BESCR(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100915 lwz r7, VCPU_GUEST_PID(r4)
916 ld r8, VCPU_WORT(r4)
Paul Mackerras83677f52016-11-16 22:33:27 +1100917 mtspr SPRN_EBBRR, r5
918 mtspr SPRN_BESCR, r6
Michael Neulingb005255e2014-01-08 21:25:21 +1100919 mtspr SPRN_PID, r7
920 mtspr SPRN_WORT, r8
Paul Mackerras83677f52016-11-16 22:33:27 +1100921BEGIN_FTR_SECTION
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100922 /* POWER8-only registers */
Paul Mackerras83677f52016-11-16 22:33:27 +1100923 ld r5, VCPU_TCSCR(r4)
924 ld r6, VCPU_ACOP(r4)
925 ld r7, VCPU_CSIGR(r4)
926 ld r8, VCPU_TACR(r4)
927 mtspr SPRN_TCSCR, r5
928 mtspr SPRN_ACOP, r6
929 mtspr SPRN_CSIGR, r7
930 mtspr SPRN_TACR, r8
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100931 nop
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100932FTR_SECTION_ELSE
933 /* POWER9-only registers */
934 ld r5, VCPU_TID(r4)
935 ld r6, VCPU_PSSCR(r4)
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100936 lbz r8, HSTATE_FAKE_SUSPEND(r13)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100937 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100938 rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
Paul Mackerras769377f2017-02-15 14:30:17 +1100939 ld r7, VCPU_HFSCR(r4)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100940 mtspr SPRN_TIDR, r5
941 mtspr SPRN_PSSCR, r6
Paul Mackerras769377f2017-02-15 14:30:17 +1100942 mtspr SPRN_HFSCR, r7
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100943ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Michael Neulingb005255e2014-01-08 21:25:21 +11009448:
945
Paul Mackerrasde56a942011-06-29 00:21:34 +0000946 ld r5, VCPU_SPRG0(r4)
947 ld r6, VCPU_SPRG1(r4)
948 ld r7, VCPU_SPRG2(r4)
949 ld r8, VCPU_SPRG3(r4)
950 mtspr SPRN_SPRG0, r5
951 mtspr SPRN_SPRG1, r6
952 mtspr SPRN_SPRG2, r7
953 mtspr SPRN_SPRG3, r8
954
Paul Mackerrasde56a942011-06-29 00:21:34 +0000955 /* Load up DAR and DSISR */
956 ld r5, VCPU_DAR(r4)
957 lwz r6, VCPU_DSISR(r4)
958 mtspr SPRN_DAR, r5
959 mtspr SPRN_DSISR, r6
960
Paul Mackerrasde56a942011-06-29 00:21:34 +0000961 /* Restore AMR and UAMOR, set AMOR to all 1s */
962 ld r5,VCPU_AMR(r4)
963 ld r6,VCPU_UAMOR(r4)
964 li r7,-1
965 mtspr SPRN_AMR,r5
966 mtspr SPRN_UAMOR,r6
967 mtspr SPRN_AMOR,r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000968
969 /* Restore state of CTRL run bit; assume 1 on entry */
970 lwz r5,VCPU_CTRL(r4)
971 andi. r5,r5,1
972 bne 4f
973 mfspr r6,SPRN_CTRLF
974 clrrdi r6,r6,1
975 mtspr SPRN_CTRLT,r6
9764:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100977 /* Secondary threads wait for primary to have done partition switch */
978 ld r5, HSTATE_KVM_VCORE(r13)
979 lbz r6, HSTATE_PTID(r13)
980 cmpwi r6, 0
981 beq 21f
982 lbz r0, VCORE_IN_GUEST(r5)
983 cmpwi r0, 0
984 bne 21f
985 HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +100098620: lwz r3, VCORE_ENTRY_EXIT(r5)
987 cmpwi r3, 0x100
988 bge no_switch_exit
989 lbz r0, VCORE_IN_GUEST(r5)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100990 cmpwi r0, 0
991 beq 20b
992 HMT_MEDIUM
99321:
994 /* Set LPCR. */
995 ld r8,VCORE_LPCR(r5)
996 mtspr SPRN_LPCR,r8
997 isync
998
Paul Mackerras57b8daa2018-04-20 22:51:11 +1000999 /*
1000 * Set the decrementer to the guest decrementer.
1001 */
1002 ld r8,VCPU_DEC_EXPIRES(r4)
1003 /* r8 is a host timebase value here, convert to guest TB */
1004 ld r5,HSTATE_KVM_VCORE(r13)
1005 ld r6,VCORE_TB_OFFSET_APPL(r5)
1006 add r8,r8,r6
1007 mftb r7
1008 subf r3,r7,r8
1009 mtspr SPRN_DEC,r3
1010
Paul Mackerras6af27c82015-03-28 14:21:10 +11001011 /* Check if HDEC expires soon */
1012 mfspr r3, SPRN_HDEC
Paul Mackerras2f272462017-05-22 16:25:14 +10001013 EXTEND_HDEC(r3)
1014 cmpdi r3, 512 /* 1 microsecond */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001015 blt hdec_soon
1016
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001017 /* For hash guest, clear out and reload the SLB */
1018 ld r6, VCPU_KVM(r4)
1019 lbz r0, KVM_RADIX(r6)
1020 cmpwi r0, 0
1021 bne 9f
1022 li r6, 0
1023 slbmte r6, r6
1024 slbia
1025 ptesync
1026
1027 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
1028 lwz r5,VCPU_SLB_MAX(r4)
1029 cmpwi r5,0
1030 beq 9f
1031 mtctr r5
1032 addi r6,r4,VCPU_SLB
10331: ld r8,VCPU_SLB_E(r6)
1034 ld r9,VCPU_SLB_V(r6)
1035 slbmte r9,r8
1036 addi r6,r6,VCPU_SLB_SIZE
1037 bdnz 1b
10389:
1039
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001040#ifdef CONFIG_KVM_XICS
1041 /* We are entering the guest on that thread, push VCPU to XIVE */
1042 ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
Andreas Schwab0bfa33c2017-08-15 14:37:01 +10001043 cmpldi cr0, r10, 0
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001044 beq no_xive
1045 ld r11, VCPU_XIVE_SAVED_STATE(r4)
1046 li r9, TM_QW1_OS
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001047 eieio
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001048 stdcix r11,r9,r10
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001049 lwz r11, VCPU_XIVE_CAM_WORD(r4)
1050 li r9, TM_QW1_OS + TM_WORD2
1051 stwcix r11,r9,r10
1052 li r9, 1
Benjamin Herrenschmidt35c24052018-01-12 13:37:15 +11001053 stb r9, VCPU_XIVE_PUSHED(r4)
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001054 eieio
Benjamin Herrenschmidt2267ea72018-01-12 13:37:13 +11001055
1056 /*
1057 * We clear the irq_pending flag. There is a small chance of a
1058 * race vs. the escalation interrupt happening on another
1059 * processor setting it again, but the only consequence is to
1060 * cause a spurrious wakeup on the next H_CEDE which is not an
1061 * issue.
1062 */
1063 li r0,0
1064 stb r0, VCPU_IRQ_PENDING(r4)
Benjamin Herrenschmidt9b9b13a2018-01-12 13:37:16 +11001065
1066 /*
1067 * In single escalation mode, if the escalation interrupt is
1068 * on, we mask it.
1069 */
1070 lbz r0, VCPU_XIVE_ESC_ON(r4)
1071 cmpwi r0,0
1072 beq 1f
1073 ld r10, VCPU_XIVE_ESC_RADDR(r4)
1074 li r9, XIVE_ESB_SET_PQ_01
1075 ldcix r0, r10, r9
1076 sync
1077
1078 /* We have a possible subtle race here: The escalation interrupt might
1079 * have fired and be on its way to the host queue while we mask it,
1080 * and if we unmask it early enough (re-cede right away), there is
1081 * a theorical possibility that it fires again, thus landing in the
1082 * target queue more than once which is a big no-no.
1083 *
1084 * Fortunately, solving this is rather easy. If the above load setting
1085 * PQ to 01 returns a previous value where P is set, then we know the
1086 * escalation interrupt is somewhere on its way to the host. In that
1087 * case we simply don't clear the xive_esc_on flag below. It will be
1088 * eventually cleared by the handler for the escalation interrupt.
1089 *
1090 * Then, when doing a cede, we check that flag again before re-enabling
1091 * the escalation interrupt, and if set, we abort the cede.
1092 */
1093 andi. r0, r0, XIVE_ESB_VAL_P
1094 bne- 1f
1095
1096 /* Now P is 0, we can clear the flag */
1097 li r0, 0
1098 stb r0, VCPU_XIVE_ESC_ON(r4)
10991:
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001100no_xive:
1101#endif /* CONFIG_KVM_XICS */
1102
Suresh Warrier37f55d32016-08-19 15:35:46 +10001103deliver_guest_interrupt:
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001104kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
Paul Mackerrasf7035ce2018-10-08 16:30:50 +11001105 /* Check if we can deliver an external or decrementer interrupt now */
1106 ld r0, VCPU_PENDING_EXC(r4)
1107BEGIN_FTR_SECTION
1108 /* On POWER9, also check for emulated doorbell interrupt */
1109 lbz r3, VCPU_DBELL_REQ(r4)
1110 or r0, r0, r3
1111END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1112 cmpdi r0, 0
1113 beq 71f
1114 mr r3, r4
1115 bl kvmppc_guest_entry_inject_int
1116 ld r4, HSTATE_KVM_VCPU(r13)
111771:
Paul Mackerras4619ac82013-04-17 20:31:41 +00001118 ld r10, VCPU_PC(r4)
1119 ld r11, VCPU_MSR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001120 ld r6, VCPU_SRR0(r4)
1121 ld r7, VCPU_SRR1(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001122 mtspr SPRN_SRR0, r6
1123 mtspr SPRN_SRR1, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001124
Paul Mackerras4619ac82013-04-17 20:31:41 +00001125 /* r11 = vcpu->arch.msr & ~MSR_HV */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001126 rldicl r11, r11, 63 - MSR_HV_LG, 1
1127 rotldi r11, r11, 1 + MSR_HV_LG
1128 ori r11, r11, MSR_ME
1129
Paul Mackerrasf7035ce2018-10-08 16:30:50 +11001130 ld r6, VCPU_CTR(r4)
1131 ld r7, VCPU_XER(r4)
1132 mtctr r6
1133 mtxer r7
Paul Mackerras19ccb762011-07-23 17:42:46 +10001134
Liu Ping Fan27025a62013-11-19 14:12:48 +08001135/*
1136 * Required state:
1137 * R4 = vcpu
1138 * R10: value for HSRR0
1139 * R11: value for HSRR1
1140 * R13 = PACA
1141 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001142fast_guest_return:
Paul Mackerras4619ac82013-04-17 20:31:41 +00001143 li r0,0
1144 stb r0,VCPU_CEDED(r4) /* cancel cede */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001145 mtspr SPRN_HSRR0,r10
1146 mtspr SPRN_HSRR1,r11
1147
1148 /* Activate guest mode, so faults get handled by KVM */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001149 li r9, KVM_GUEST_MODE_GUEST_HV
Paul Mackerrasde56a942011-06-29 00:21:34 +00001150 stb r9, HSTATE_IN_GUEST(r13)
1151
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001152#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1153 /* Accumulate timing */
1154 addi r3, r4, VCPU_TB_GUEST
1155 bl kvmhv_accumulate_time
1156#endif
1157
Paul Mackerrasde56a942011-06-29 00:21:34 +00001158 /* Enter guest */
1159
Paul Mackerras0acb9112013-02-04 18:10:51 +00001160BEGIN_FTR_SECTION
1161 ld r5, VCPU_CFAR(r4)
1162 mtspr SPRN_CFAR, r5
1163END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001164BEGIN_FTR_SECTION
1165 ld r0, VCPU_PPR(r4)
1166END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001167
Paul Mackerrasde56a942011-06-29 00:21:34 +00001168 ld r5, VCPU_LR(r4)
1169 lwz r6, VCPU_CR(r4)
1170 mtlr r5
1171 mtcr r6
1172
Michael Neulingc75df6f2012-06-25 13:33:10 +00001173 ld r1, VCPU_GPR(R1)(r4)
1174 ld r2, VCPU_GPR(R2)(r4)
1175 ld r3, VCPU_GPR(R3)(r4)
1176 ld r5, VCPU_GPR(R5)(r4)
1177 ld r6, VCPU_GPR(R6)(r4)
1178 ld r7, VCPU_GPR(R7)(r4)
1179 ld r8, VCPU_GPR(R8)(r4)
1180 ld r9, VCPU_GPR(R9)(r4)
1181 ld r10, VCPU_GPR(R10)(r4)
1182 ld r11, VCPU_GPR(R11)(r4)
1183 ld r12, VCPU_GPR(R12)(r4)
1184 ld r13, VCPU_GPR(R13)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001185
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001186BEGIN_FTR_SECTION
1187 mtspr SPRN_PPR, r0
1188END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Michael Neulinge001fa72017-09-15 15:26:14 +10001189
1190/* Move canary into DSISR to check for later */
1191BEGIN_FTR_SECTION
1192 li r0, 0x7fff
1193 mtspr SPRN_HDSISR, r0
1194END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1195
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001196 ld r0, VCPU_GPR(R0)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001197 ld r4, VCPU_GPR(R4)(r4)
Nicholas Piggin222f20f2018-01-10 03:07:15 +11001198 HRFI_TO_GUEST
Paul Mackerrasde56a942011-06-29 00:21:34 +00001199 b .
1200
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001201secondary_too_late:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001202 li r12, 0
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11001203 stw r12, STACK_SLOT_TRAP(r1)
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001204 cmpdi r4, 0
1205 beq 11f
Paul Mackerras6af27c82015-03-28 14:21:10 +11001206 stw r12, VCPU_TRAP(r4)
1207#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001208 addi r3, r4, VCPU_TB_RMEXIT
1209 bl kvmhv_accumulate_time
Paul Mackerras6af27c82015-03-28 14:21:10 +11001210#endif
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100121111: b kvmhv_switch_to_host
1212
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001213no_switch_exit:
1214 HMT_MEDIUM
1215 li r12, 0
1216 b 12f
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001217hdec_soon:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001218 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000121912: stw r12, VCPU_TRAP(r4)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001220 mr r9, r4
1221#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001222 addi r3, r4, VCPU_TB_RMEXIT
1223 bl kvmhv_accumulate_time
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001224#endif
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001225 b guest_bypass
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001226
Paul Mackerrasde56a942011-06-29 00:21:34 +00001227/******************************************************************************
1228 * *
1229 * Exit code *
1230 * *
1231 *****************************************************************************/
1232
1233/*
1234 * We come here from the first-level interrupt handlers.
1235 */
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301236 .globl kvmppc_interrupt_hv
1237kvmppc_interrupt_hv:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001238 /*
1239 * Register contents:
Nicholas Piggind3918e72016-12-22 04:29:25 +10001240 * R12 = (guest CR << 32) | interrupt vector
Paul Mackerrasde56a942011-06-29 00:21:34 +00001241 * R13 = PACA
Nicholas Piggind3918e72016-12-22 04:29:25 +10001242 * guest R12 saved in shadow VCPU SCRATCH0
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001243 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
Paul Mackerrasde56a942011-06-29 00:21:34 +00001244 * guest R13 saved in SPRN_SCRATCH0
1245 */
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001246 std r9, HSTATE_SCRATCH2(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +10001247 lbz r9, HSTATE_IN_GUEST(r13)
1248 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1249 beq kvmppc_bad_host_intr
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301250#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1251 cmpwi r9, KVM_GUEST_MODE_GUEST
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001252 ld r9, HSTATE_SCRATCH2(r13)
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301253 beq kvmppc_interrupt_pr
1254#endif
Paul Mackerras44a3add2013-10-04 21:45:04 +10001255 /* We're now back in the host but in guest MMU context */
1256 li r9, KVM_GUEST_MODE_HOST_HV
1257 stb r9, HSTATE_IN_GUEST(r13)
1258
Paul Mackerrasde56a942011-06-29 00:21:34 +00001259 ld r9, HSTATE_KVM_VCPU(r13)
1260
1261 /* Save registers */
1262
Michael Neulingc75df6f2012-06-25 13:33:10 +00001263 std r0, VCPU_GPR(R0)(r9)
1264 std r1, VCPU_GPR(R1)(r9)
1265 std r2, VCPU_GPR(R2)(r9)
1266 std r3, VCPU_GPR(R3)(r9)
1267 std r4, VCPU_GPR(R4)(r9)
1268 std r5, VCPU_GPR(R5)(r9)
1269 std r6, VCPU_GPR(R6)(r9)
1270 std r7, VCPU_GPR(R7)(r9)
1271 std r8, VCPU_GPR(R8)(r9)
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001272 ld r0, HSTATE_SCRATCH2(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001273 std r0, VCPU_GPR(R9)(r9)
1274 std r10, VCPU_GPR(R10)(r9)
1275 std r11, VCPU_GPR(R11)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001276 ld r3, HSTATE_SCRATCH0(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001277 std r3, VCPU_GPR(R12)(r9)
Nicholas Piggind3918e72016-12-22 04:29:25 +10001278 /* CR is in the high half of r12 */
1279 srdi r4, r12, 32
Paul Mackerrasde56a942011-06-29 00:21:34 +00001280 stw r4, VCPU_CR(r9)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001281BEGIN_FTR_SECTION
1282 ld r3, HSTATE_CFAR(r13)
1283 std r3, VCPU_CFAR(r9)
1284END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001285BEGIN_FTR_SECTION
1286 ld r4, HSTATE_PPR(r13)
1287 std r4, VCPU_PPR(r9)
1288END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001289
1290 /* Restore R1/R2 so we can handle faults */
1291 ld r1, HSTATE_HOST_R1(r13)
1292 ld r2, PACATOC(r13)
1293
1294 mfspr r10, SPRN_SRR0
1295 mfspr r11, SPRN_SRR1
1296 std r10, VCPU_SRR0(r9)
1297 std r11, VCPU_SRR1(r9)
Nicholas Piggind3918e72016-12-22 04:29:25 +10001298 /* trap is in the low half of r12, clear CR from the high half */
1299 clrldi r12, r12, 32
Paul Mackerrasde56a942011-06-29 00:21:34 +00001300 andi. r0, r12, 2 /* need to read HSRR0/1? */
1301 beq 1f
1302 mfspr r10, SPRN_HSRR0
1303 mfspr r11, SPRN_HSRR1
1304 clrrdi r12, r12, 2
13051: std r10, VCPU_PC(r9)
1306 std r11, VCPU_MSR(r9)
1307
1308 GET_SCRATCH0(r3)
1309 mflr r4
Michael Neulingc75df6f2012-06-25 13:33:10 +00001310 std r3, VCPU_GPR(R13)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001311 std r4, VCPU_LR(r9)
1312
Paul Mackerrasde56a942011-06-29 00:21:34 +00001313 stw r12,VCPU_TRAP(r9)
1314
Paul Mackerras8b24e692017-06-26 15:45:51 +10001315 /*
1316 * Now that we have saved away SRR0/1 and HSRR0/1,
1317 * interrupts are recoverable in principle, so set MSR_RI.
1318 * This becomes important for relocation-on interrupts from
1319 * the guest, which we can get in radix mode on POWER9.
1320 */
1321 li r0, MSR_RI
1322 mtmsrd r0, 1
1323
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001324#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1325 addi r3, r9, VCPU_TB_RMINTR
1326 mr r4, r9
1327 bl kvmhv_accumulate_time
1328 ld r5, VCPU_GPR(R5)(r9)
1329 ld r6, VCPU_GPR(R6)(r9)
1330 ld r7, VCPU_GPR(R7)(r9)
1331 ld r8, VCPU_GPR(R8)(r9)
1332#endif
1333
Paul Mackerras4a157d62014-12-03 13:30:39 +11001334 /* Save HEIR (HV emulation assist reg) in emul_inst
Paul Mackerras697d3892011-12-12 12:36:37 +00001335 if this is an HEI (HV emulation interrupt, e40) */
1336 li r3,KVM_INST_FETCH_FAILED
Paul Mackerras2bf27602015-03-20 20:39:40 +11001337 stw r3,VCPU_LAST_INST(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001338 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1339 bne 11f
1340 mfspr r3,SPRN_HEIR
Paul Mackerras4a157d62014-12-03 13:30:39 +1100134111: stw r3,VCPU_HEIR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001342
1343 /* these are volatile across C function calls */
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001344#ifdef CONFIG_RELOCATABLE
1345 ld r3, HSTATE_SCRATCH1(r13)
1346 mtctr r3
1347#else
Paul Mackerras697d3892011-12-12 12:36:37 +00001348 mfctr r3
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001349#endif
Paul Mackerras697d3892011-12-12 12:36:37 +00001350 mfxer r4
1351 std r3, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001352 std r4, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001353
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11001354#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1355 /* For softpatch interrupt, go off and do TM instruction emulation */
1356 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
1357 beq kvmppc_tm_emul
1358#endif
1359
Paul Mackerras697d3892011-12-12 12:36:37 +00001360 /* If this is a page table miss then see if it's theirs or ours */
1361 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1362 beq kvmppc_hdsi
Paul Mackerras342d3db2011-12-12 12:38:05 +00001363 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1364 beq kvmppc_hisi
Paul Mackerras697d3892011-12-12 12:36:37 +00001365
Paul Mackerrasde56a942011-06-29 00:21:34 +00001366 /* See if this is a leftover HDEC interrupt */
1367 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1368 bne 2f
1369 mfspr r3,SPRN_HDEC
Paul Mackerrasa4faf2e2017-08-25 19:52:12 +10001370 EXTEND_HDEC(r3)
1371 cmpdi r3,0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001372 mr r4,r9
1373 bge fast_guest_return
Paul Mackerrasde56a942011-06-29 00:21:34 +000013742:
Paul Mackerras697d3892011-12-12 12:36:37 +00001375 /* See if this is an hcall we can handle in real mode */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001376 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1377 beq hcall_try_real_mode
Paul Mackerrasde56a942011-06-29 00:21:34 +00001378
Paul Mackerras66feed62015-03-28 14:21:12 +11001379 /* Hypervisor doorbell - exit only if host IPI flag set */
1380 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1381 bne 3f
Nicholas Pigginbd0fdb12017-03-13 03:03:49 +10001382BEGIN_FTR_SECTION
1383 PPC_MSGSYNC
Nicholas Piggin2cde3712017-10-10 20:18:28 +10001384 lwsync
Nicholas Pigginbd0fdb12017-03-13 03:03:49 +10001385END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras66feed62015-03-28 14:21:12 +11001386 lbz r0, HSTATE_HOST_IPI(r13)
Gautham R. Shenoy06554d92015-08-07 17:41:20 +05301387 cmpwi r0, 0
Paul Mackerras66feed62015-03-28 14:21:12 +11001388 beq 4f
1389 b guest_exit_cont
13903:
Paul Mackerras769377f2017-02-15 14:30:17 +11001391 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1392 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1393 bne 14f
1394 mfspr r3, SPRN_HFSCR
1395 std r3, VCPU_HFSCR(r9)
1396 b guest_exit_cont
139714:
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001398 /* External interrupt ? */
1399 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001400 bne+ guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001401
1402 /* External interrupt, first check for host_ipi. If this is
1403 * set, we know the host wants us out so let's do it now
1404 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001405 bl kvmppc_read_intr
Suresh Warrier37f55d32016-08-19 15:35:46 +10001406
1407 /*
1408 * Restore the active volatile registers after returning from
1409 * a C function.
1410 */
1411 ld r9, HSTATE_KVM_VCPU(r13)
1412 li r12, BOOK3S_INTERRUPT_EXTERNAL
1413
1414 /*
1415 * kvmppc_read_intr return codes:
1416 *
1417 * Exit to host (r3 > 0)
1418 * 1 An interrupt is pending that needs to be handled by the host
1419 * Exit guest and return to host by branching to guest_exit_cont
1420 *
Suresh Warrierf7af5202016-08-19 15:35:52 +10001421 * 2 Passthrough that needs completion in the host
1422 * Exit guest and return to host by branching to guest_exit_cont
1423 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1424 * to indicate to the host to complete handling the interrupt
1425 *
Suresh Warrier37f55d32016-08-19 15:35:46 +10001426 * Before returning to guest, we check if any CPU is heading out
1427 * to the host and if so, we head out also. If no CPUs are heading
1428 * check return values <= 0.
1429 *
1430 * Return to guest (r3 <= 0)
1431 * 0 No external interrupt is pending
1432 * -1 A guest wakeup IPI (which has now been cleared)
1433 * In either case, we return to guest to deliver any pending
1434 * guest interrupts.
Suresh Warriere3c13e52016-08-19 15:35:51 +10001435 *
1436 * -2 A PCI passthrough external interrupt was handled
1437 * (interrupt was delivered directly to guest)
1438 * Return to guest to deliver any pending guest interrupts.
Suresh Warrier37f55d32016-08-19 15:35:46 +10001439 */
1440
Suresh Warrierf7af5202016-08-19 15:35:52 +10001441 cmpdi r3, 1
1442 ble 1f
1443
1444 /* Return code = 2 */
1445 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1446 stw r12, VCPU_TRAP(r9)
1447 b guest_exit_cont
1448
14491: /* Return code <= 1 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001450 cmpdi r3, 0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001451 bgt guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001452
Suresh Warrier37f55d32016-08-19 15:35:46 +10001453 /* Return code <= 0 */
Paul Mackerras66feed62015-03-28 14:21:12 +110014544: ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras4619ac82013-04-17 20:31:41 +00001455 lwz r0, VCORE_ENTRY_EXIT(r5)
1456 cmpwi r0, 0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001457 mr r4, r9
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001458 blt deliver_guest_interrupt
Paul Mackerrasde56a942011-06-29 00:21:34 +00001459
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001460guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
Paul Mackerras43ff3f62018-01-11 14:31:43 +11001461 /* Save more register state */
1462 mfdar r6
1463 mfdsisr r7
1464 std r6, VCPU_DAR(r9)
1465 stw r7, VCPU_DSISR(r9)
1466 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1467 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1468 beq mc_cont
1469 std r6, VCPU_FAULT_DAR(r9)
1470 stw r7, VCPU_FAULT_DSISR(r9)
1471
1472 /* See if it is a machine check */
1473 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1474 beq machine_check_realmode
1475mc_cont:
1476#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1477 addi r3, r9, VCPU_TB_RMEXIT
1478 mr r4, r9
1479 bl kvmhv_accumulate_time
1480#endif
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001481#ifdef CONFIG_KVM_XICS
1482 /* We are exiting, pull the VP from the XIVE */
Benjamin Herrenschmidt35c24052018-01-12 13:37:15 +11001483 lbz r0, VCPU_XIVE_PUSHED(r9)
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001484 cmpwi cr0, r0, 0
1485 beq 1f
1486 li r7, TM_SPC_PULL_OS_CTX
1487 li r6, TM_QW1_OS
1488 mfmsr r0
Benjamin Herrenschmidt2662efd2018-01-12 13:37:14 +11001489 andi. r0, r0, MSR_DR /* in real mode? */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001490 beq 2f
1491 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1492 cmpldi cr0, r10, 0
1493 beq 1f
1494 /* First load to pull the context, we ignore the value */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001495 eieio
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001496 lwzx r11, r7, r10
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001497 /* Second load to recover the context state (Words 0 and 1) */
1498 ldx r11, r6, r10
1499 b 3f
15002: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1501 cmpldi cr0, r10, 0
1502 beq 1f
1503 /* First load to pull the context, we ignore the value */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001504 eieio
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001505 lwzcix r11, r7, r10
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001506 /* Second load to recover the context state (Words 0 and 1) */
1507 ldcix r11, r6, r10
15083: std r11, VCPU_XIVE_SAVED_STATE(r9)
1509 /* Fixup some of the state for the next load */
1510 li r10, 0
1511 li r0, 0xff
Benjamin Herrenschmidt35c24052018-01-12 13:37:15 +11001512 stb r10, VCPU_XIVE_PUSHED(r9)
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001513 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1514 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001515 eieio
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100015161:
1517#endif /* CONFIG_KVM_XICS */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001518
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001519 /* For hash guest, read the guest SLB and save it away */
1520 ld r5, VCPU_KVM(r9)
1521 lbz r0, KVM_RADIX(r5)
1522 li r5, 0
1523 cmpwi r0, 0
1524 bne 3f /* for radix, save 0 entries */
1525 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1526 mtctr r0
1527 li r6,0
1528 addi r7,r9,VCPU_SLB
15291: slbmfee r8,r6
1530 andis. r0,r8,SLB_ESID_V@h
1531 beq 2f
1532 add r8,r8,r6 /* put index in */
1533 slbmfev r3,r6
1534 std r8,VCPU_SLB_E(r7)
1535 std r3,VCPU_SLB_V(r7)
1536 addi r7,r7,VCPU_SLB_SIZE
1537 addi r5,r5,1
15382: addi r6,r6,1
1539 bdnz 1b
1540 /* Finally clear out the SLB */
1541 li r0,0
1542 slbmte r0,r0
1543 slbia
1544 ptesync
15453: stw r5,VCPU_SLB_MAX(r9)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001546
Paul Mackerrascda4a142018-03-22 09:48:54 +11001547 /* load host SLB entries */
1548BEGIN_MMU_FTR_SECTION
1549 b 0f
1550END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1551 ld r8,PACA_SLBSHADOWPTR(r13)
1552
1553 .rept SLB_NUM_BOLTED
1554 li r3, SLBSHADOW_SAVEAREA
1555 LDX_BE r5, r8, r3
1556 addi r3, r3, 8
1557 LDX_BE r6, r8, r3
1558 andis. r7,r5,SLB_ESID_V@h
1559 beq 1f
1560 slbmte r6,r5
15611: addi r8,r8,16
1562 .endr
15630:
1564
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001565guest_bypass:
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11001566 stw r12, STACK_SLOT_TRAP(r1)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10001567
1568 /* Save DEC */
1569 /* Do this before kvmhv_commence_exit so we know TB is guest TB */
1570 ld r3, HSTATE_KVM_VCORE(r13)
1571 mfspr r5,SPRN_DEC
1572 mftb r6
1573 /* On P9, if the guest has large decr enabled, don't sign extend */
1574BEGIN_FTR_SECTION
1575 ld r4, VCORE_LPCR(r3)
1576 andis. r4, r4, LPCR_LD@h
1577 bne 16f
1578END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1579 extsw r5,r5
158016: add r5,r5,r6
1581 /* r5 is a guest timebase value here, convert to host TB */
1582 ld r4,VCORE_TB_OFFSET_APPL(r3)
1583 subf r5,r4,r5
1584 std r5,VCPU_DEC_EXPIRES(r9)
1585
Paul Mackerras6af27c82015-03-28 14:21:10 +11001586 /* Increment exit count, poke other threads to exit */
Paul Mackerras57b8daa2018-04-20 22:51:11 +10001587 mr r3, r12
Paul Mackerras6af27c82015-03-28 14:21:10 +11001588 bl kvmhv_commence_exit
Paul Mackerraseddb60f2015-03-28 14:21:11 +11001589 nop
1590 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001591
Paul Mackerrasec257162015-06-24 21:18:03 +10001592 /* Stop others sending VCPU interrupts to this physical CPU */
1593 li r0, -1
1594 stw r0, VCPU_CPU(r9)
1595 stw r0, VCPU_THREAD_CPU(r9)
1596
Paul Mackerrasde56a942011-06-29 00:21:34 +00001597 /* Save guest CTRL register, set runlatch to 1 */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001598 mfspr r6,SPRN_CTRLF
Paul Mackerrasde56a942011-06-29 00:21:34 +00001599 stw r6,VCPU_CTRL(r9)
1600 andi. r0,r6,1
1601 bne 4f
1602 ori r6,r6,1
1603 mtspr SPRN_CTRLT,r6
16044:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001605 /*
1606 * Save the guest PURR/SPURR
1607 */
1608 mfspr r5,SPRN_PURR
1609 mfspr r6,SPRN_SPURR
1610 ld r7,VCPU_PURR(r9)
1611 ld r8,VCPU_SPURR(r9)
1612 std r5,VCPU_PURR(r9)
1613 std r6,VCPU_SPURR(r9)
1614 subf r5,r7,r5
1615 subf r6,r8,r6
1616
1617 /*
1618 * Restore host PURR/SPURR and add guest times
1619 * so that the time in the guest gets accounted.
1620 */
1621 ld r3,HSTATE_PURR(r13)
1622 ld r4,HSTATE_SPURR(r13)
1623 add r3,r3,r5
1624 add r4,r4,r6
1625 mtspr SPRN_PURR,r3
1626 mtspr SPRN_SPURR,r4
1627
Michael Neulingb005255e2014-01-08 21:25:21 +11001628BEGIN_FTR_SECTION
1629 b 8f
1630END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +11001631 /* Save POWER8-specific registers */
1632 mfspr r5, SPRN_IAMR
1633 mfspr r6, SPRN_PSPB
1634 mfspr r7, SPRN_FSCR
1635 std r5, VCPU_IAMR(r9)
1636 stw r6, VCPU_PSPB(r9)
1637 std r7, VCPU_FSCR(r9)
1638 mfspr r5, SPRN_IC
Michael Neulingb005255e2014-01-08 21:25:21 +11001639 mfspr r7, SPRN_TAR
1640 std r5, VCPU_IC(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001641 std r7, VCPU_TAR(r9)
Michael Neuling7b490412014-01-08 21:25:32 +11001642 mfspr r8, SPRN_EBBHR
Michael Neulingb005255e2014-01-08 21:25:21 +11001643 std r8, VCPU_EBBHR(r9)
1644 mfspr r5, SPRN_EBBRR
1645 mfspr r6, SPRN_BESCR
Michael Neulingb005255e2014-01-08 21:25:21 +11001646 mfspr r7, SPRN_PID
1647 mfspr r8, SPRN_WORT
Paul Mackerras83677f52016-11-16 22:33:27 +11001648 std r5, VCPU_EBBRR(r9)
1649 std r6, VCPU_BESCR(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001650 stw r7, VCPU_GUEST_PID(r9)
1651 std r8, VCPU_WORT(r9)
Paul Mackerras83677f52016-11-16 22:33:27 +11001652BEGIN_FTR_SECTION
1653 mfspr r5, SPRN_TCSCR
1654 mfspr r6, SPRN_ACOP
1655 mfspr r7, SPRN_CSIGR
1656 mfspr r8, SPRN_TACR
1657 std r5, VCPU_TCSCR(r9)
1658 std r6, VCPU_ACOP(r9)
1659 std r7, VCPU_CSIGR(r9)
1660 std r8, VCPU_TACR(r9)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001661FTR_SECTION_ELSE
1662 mfspr r5, SPRN_TIDR
1663 mfspr r6, SPRN_PSSCR
1664 std r5, VCPU_TID(r9)
1665 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1666 rotldi r6, r6, 60
1667 std r6, VCPU_PSSCR(r9)
Paul Mackerras769377f2017-02-15 14:30:17 +11001668 /* Restore host HFSCR value */
1669 ld r7, STACK_SLOT_HFSCR(r1)
1670 mtspr SPRN_HFSCR, r7
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001671ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasccec4452016-03-05 19:34:39 +11001672 /*
1673 * Restore various registers to 0, where non-zero values
1674 * set by the guest could disrupt the host.
1675 */
1676 li r0, 0
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001677 mtspr SPRN_PSPB, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001678 mtspr SPRN_WORT, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001679BEGIN_FTR_SECTION
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001680 mtspr SPRN_IAMR, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001681 mtspr SPRN_TCSCR, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001682 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1683 li r0, 1
1684 sldi r0, r0, 31
1685 mtspr SPRN_MMCRS, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001686END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Michael Neulingb005255e2014-01-08 21:25:21 +110016878:
1688
Paul Mackerrasde56a942011-06-29 00:21:34 +00001689 /* Save and reset AMR and UAMOR before turning on the MMU */
1690 mfspr r5,SPRN_AMR
1691 mfspr r6,SPRN_UAMOR
1692 std r5,VCPU_AMR(r9)
1693 std r6,VCPU_UAMOR(r9)
1694 li r6,0
1695 mtspr SPRN_AMR,r6
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001696 mtspr SPRN_UAMOR, r6
Paul Mackerrasde56a942011-06-29 00:21:34 +00001697
Paul Mackerrasde56a942011-06-29 00:21:34 +00001698 /* Switch DSCR back to host value */
1699 mfspr r8, SPRN_DSCR
1700 ld r7, HSTATE_DSCR(r13)
Paul Mackerrascfc86022013-09-21 09:53:28 +10001701 std r8, VCPU_DSCR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001702 mtspr SPRN_DSCR, r7
1703
1704 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001705 std r14, VCPU_GPR(R14)(r9)
1706 std r15, VCPU_GPR(R15)(r9)
1707 std r16, VCPU_GPR(R16)(r9)
1708 std r17, VCPU_GPR(R17)(r9)
1709 std r18, VCPU_GPR(R18)(r9)
1710 std r19, VCPU_GPR(R19)(r9)
1711 std r20, VCPU_GPR(R20)(r9)
1712 std r21, VCPU_GPR(R21)(r9)
1713 std r22, VCPU_GPR(R22)(r9)
1714 std r23, VCPU_GPR(R23)(r9)
1715 std r24, VCPU_GPR(R24)(r9)
1716 std r25, VCPU_GPR(R25)(r9)
1717 std r26, VCPU_GPR(R26)(r9)
1718 std r27, VCPU_GPR(R27)(r9)
1719 std r28, VCPU_GPR(R28)(r9)
1720 std r29, VCPU_GPR(R29)(r9)
1721 std r30, VCPU_GPR(R30)(r9)
1722 std r31, VCPU_GPR(R31)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001723
1724 /* Save SPRGs */
1725 mfspr r3, SPRN_SPRG0
1726 mfspr r4, SPRN_SPRG1
1727 mfspr r5, SPRN_SPRG2
1728 mfspr r6, SPRN_SPRG3
1729 std r3, VCPU_SPRG0(r9)
1730 std r4, VCPU_SPRG1(r9)
1731 std r5, VCPU_SPRG2(r9)
1732 std r6, VCPU_SPRG3(r9)
1733
Paul Mackerras89436332012-03-02 01:38:23 +00001734 /* save FP state */
1735 mr r3, r9
Paul Mackerras595e4f72013-10-15 20:43:04 +11001736 bl kvmppc_save_fp
Paul Mackerras89436332012-03-02 01:38:23 +00001737
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001738#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11001739/*
1740 * Branch around the call if both CPU_FTR_TM and
1741 * CPU_FTR_P9_TM_HV_ASSIST are off.
1742 */
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001743BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11001744 b 91f
1745END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10001746 /*
1747 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
1748 */
Simon Guo6f597c62018-05-23 15:01:48 +08001749 mr r3, r9
1750 ld r4, VCPU_MSR(r3)
Paul Mackerras7b0e8272018-05-30 20:07:52 +10001751 bl kvmppc_save_tm_hv
Simon Guo6f597c62018-05-23 15:01:48 +08001752 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100175391:
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001754#endif
1755
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001756 /* Increment yield count if they have a VPA */
1757 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1758 cmpdi r8, 0
1759 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +02001760 li r4, LPPACA_YIELDCOUNT
1761 LWZX_BE r3, r8, r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001762 addi r3, r3, 1
Alexander Graf0865a582014-06-11 10:36:17 +02001763 STWX_BE r3, r8, r4
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001764 li r3, 1
1765 stb r3, VCPU_VPA_DIRTY(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000176625:
1767 /* Save PMU registers if requested */
1768 /* r8 and cr0.eq are live here */
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001769BEGIN_FTR_SECTION
1770 /*
1771 * POWER8 seems to have a hardware bug where setting
1772 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1773 * when some counters are already negative doesn't seem
1774 * to cause a performance monitor alert (and hence interrupt).
1775 * The effect of this is that when saving the PMU state,
1776 * if there is no PMU alert pending when we read MMCR0
1777 * before freezing the counters, but one becomes pending
1778 * before we read the counters, we lose it.
1779 * To work around this, we need a way to freeze the counters
1780 * before reading MMCR0. Normally, freezing the counters
1781 * is done by writing MMCR0 (to set MMCR0[FC]) which
1782 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1783 * we can also freeze the counters using MMCR2, by writing
1784 * 1s to all the counter freeze condition bits (there are
1785 * 9 bits each for 6 counters).
1786 */
1787 li r3, -1 /* set all freeze bits */
1788 clrrdi r3, r3, 10
1789 mfspr r10, SPRN_MMCR2
1790 mtspr SPRN_MMCR2, r3
1791 isync
1792END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001793 li r3, 1
1794 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1795 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1796 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
Paul Mackerras89436332012-03-02 01:38:23 +00001797 mfspr r6, SPRN_MMCRA
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001798 /* Clear MMCRA in order to disable SDAR updates */
Paul Mackerras89436332012-03-02 01:38:23 +00001799 li r7, 0
1800 mtspr SPRN_MMCRA, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001801 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001802 beq 21f /* if no VPA, save PMU stuff anyway */
1803 lbz r7, LPPACA_PMCINUSE(r8)
1804 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1805 bne 21f
1806 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1807 b 22f
180821: mfspr r5, SPRN_MMCR1
Paul Mackerras14941782013-09-06 13:11:18 +10001809 mfspr r7, SPRN_SIAR
1810 mfspr r8, SPRN_SDAR
Paul Mackerrasde56a942011-06-29 00:21:34 +00001811 std r4, VCPU_MMCR(r9)
1812 std r5, VCPU_MMCR + 8(r9)
1813 std r6, VCPU_MMCR + 16(r9)
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001814BEGIN_FTR_SECTION
1815 std r10, VCPU_MMCR + 24(r9)
1816END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras14941782013-09-06 13:11:18 +10001817 std r7, VCPU_SIAR(r9)
1818 std r8, VCPU_SDAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001819 mfspr r3, SPRN_PMC1
1820 mfspr r4, SPRN_PMC2
1821 mfspr r5, SPRN_PMC3
1822 mfspr r6, SPRN_PMC4
1823 mfspr r7, SPRN_PMC5
1824 mfspr r8, SPRN_PMC6
1825 stw r3, VCPU_PMC(r9)
1826 stw r4, VCPU_PMC + 4(r9)
1827 stw r5, VCPU_PMC + 8(r9)
1828 stw r6, VCPU_PMC + 12(r9)
1829 stw r7, VCPU_PMC + 16(r9)
1830 stw r8, VCPU_PMC + 20(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001831BEGIN_FTR_SECTION
Michael Neulingb005255e2014-01-08 21:25:21 +11001832 mfspr r5, SPRN_SIER
Paul Mackerras83677f52016-11-16 22:33:27 +11001833 std r5, VCPU_SIER(r9)
1834BEGIN_FTR_SECTION_NESTED(96)
Michael Neulingb005255e2014-01-08 21:25:21 +11001835 mfspr r6, SPRN_SPMC1
1836 mfspr r7, SPRN_SPMC2
1837 mfspr r8, SPRN_MMCRS
Michael Neulingb005255e2014-01-08 21:25:21 +11001838 stw r6, VCPU_PMC + 24(r9)
1839 stw r7, VCPU_PMC + 28(r9)
1840 std r8, VCPU_MMCR + 32(r9)
1841 lis r4, 0x8000
1842 mtspr SPRN_MMCRS, r4
Paul Mackerras83677f52016-11-16 22:33:27 +11001843END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
Michael Neulingb005255e2014-01-08 21:25:21 +11001844END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000184522:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001846
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001847 /* Restore host values of some registers */
1848BEGIN_FTR_SECTION
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10001849 ld r5, STACK_SLOT_CIABR(r1)
1850 ld r6, STACK_SLOT_DAWR(r1)
1851 ld r7, STACK_SLOT_DAWRX(r1)
1852 mtspr SPRN_CIABR, r5
Michael Neulingb53221e2018-03-27 15:37:22 +11001853 /*
1854 * If the DAWR doesn't work, it's ok to write these here as
1855 * this value should always be zero
1856 */
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10001857 mtspr SPRN_DAWR, r6
1858 mtspr SPRN_DAWRX, r7
1859END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1860BEGIN_FTR_SECTION
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001861 ld r5, STACK_SLOT_TID(r1)
1862 ld r6, STACK_SLOT_PSSCR(r1)
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001863 ld r7, STACK_SLOT_PID(r1)
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001864 ld r8, STACK_SLOT_IAMR(r1)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001865 mtspr SPRN_TIDR, r5
1866 mtspr SPRN_PSSCR, r6
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001867 mtspr SPRN_PID, r7
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001868 mtspr SPRN_IAMR, r8
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001869END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001870
1871#ifdef CONFIG_PPC_RADIX_MMU
1872 /*
1873 * Are we running hash or radix ?
1874 */
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10001875 ld r5, VCPU_KVM(r9)
1876 lbz r0, KVM_RADIX(r5)
1877 cmpwi cr2, r0, 0
Nicholas Piggin2bf10712018-07-05 18:47:00 +10001878 beq cr2, 2f
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001879
Paul Mackerrasdf158182018-05-17 14:47:59 +10001880 /*
1881 * Radix: do eieio; tlbsync; ptesync sequence in case we
1882 * interrupted the guest between a tlbie and a ptesync.
1883 */
1884 eieio
1885 tlbsync
1886 ptesync
1887
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001888 /* Radix: Handle the case where the guest used an illegal PID */
1889 LOAD_REG_ADDR(r4, mmu_base_pid)
1890 lwz r3, VCPU_GUEST_PID(r9)
1891 lwz r5, 0(r4)
1892 cmpw cr0,r3,r5
1893 blt 2f
1894
1895 /*
1896 * Illegal PID, the HW might have prefetched and cached in the TLB
1897 * some translations for the LPID 0 / guest PID combination which
1898 * Linux doesn't know about, so we need to flush that PID out of
1899 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
1900 * the right context.
1901 */
1902 li r0,0
1903 mtspr SPRN_LPID,r0
1904 isync
1905
1906 /* Then do a congruence class local flush */
1907 ld r6,VCPU_KVM(r9)
1908 lwz r0,KVM_TLB_SETS(r6)
1909 mtctr r0
1910 li r7,0x400 /* IS field = 0b01 */
1911 ptesync
1912 sldi r0,r3,32 /* RS has PID */
19131: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
1914 addi r7,r7,0x1000
1915 bdnz 1b
1916 ptesync
1917
Nicholas Piggin2bf10712018-07-05 18:47:00 +100019182:
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001919#endif /* CONFIG_PPC_RADIX_MMU */
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001920
Paul Mackerrasde56a942011-06-29 00:21:34 +00001921 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001922 * POWER7/POWER8 guest -> host partition switch code.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001923 * We don't have to lock against tlbies but we do
1924 * have to coordinate the hardware threads.
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11001925 * Here STACK_SLOT_TRAP(r1) contains the trap number.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001926 */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001927kvmhv_switch_to_host:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001928 /* Secondary threads wait for primary to do partition switch */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001929 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001930 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1931 lbz r3,HSTATE_PTID(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001932 cmpwi r3,0
1933 beq 15f
1934 HMT_LOW
193513: lbz r3,VCORE_IN_GUEST(r5)
1936 cmpwi r3,0
1937 bne 13b
1938 HMT_MEDIUM
1939 b 16f
1940
1941 /* Primary thread waits for all the secondaries to exit guest */
194215: lwz r3,VCORE_ENTRY_EXIT(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001943 rlwinm r0,r3,32-8,0xff
Paul Mackerrasde56a942011-06-29 00:21:34 +00001944 clrldi r3,r3,56
1945 cmpw r3,r0
1946 bne 15b
1947 isync
1948
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001949 /* Did we actually switch to the guest at all? */
1950 lbz r6, VCORE_IN_GUEST(r5)
1951 cmpwi r6, 0
1952 beq 19f
1953
Paul Mackerrasde56a942011-06-29 00:21:34 +00001954 /* Primary thread switches back to host partition */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001955 lwz r7,KVM_HOST_LPID(r4)
Paul Mackerras7a840842016-11-16 22:25:20 +11001956BEGIN_FTR_SECTION
1957 ld r6,KVM_HOST_SDR1(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001958 li r8,LPID_RSVD /* switch to reserved LPID */
1959 mtspr SPRN_LPID,r8
1960 ptesync
Paul Mackerras7a840842016-11-16 22:25:20 +11001961 mtspr SPRN_SDR1,r6 /* switch to host page table */
1962END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001963 mtspr SPRN_LPID,r7
1964 isync
1965
Michael Neulingb005255e2014-01-08 21:25:21 +11001966BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001967 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +11001968 mfspr r7, SPRN_DPDES
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001969 mfspr r8, SPRN_VTB
Michael Neulingb005255e2014-01-08 21:25:21 +11001970 std r7, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001971 std r8, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +11001972 /* clear DPDES so we don't get guest doorbells in the host */
1973 li r8, 0
1974 mtspr SPRN_DPDES, r8
1975END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1976
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301977 /* If HMI, call kvmppc_realmode_hmi_handler() */
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11001978 lwz r12, STACK_SLOT_TRAP(r1)
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301979 cmpwi r12, BOOK3S_INTERRUPT_HMI
1980 bne 27f
1981 bl kvmppc_realmode_hmi_handler
1982 nop
Paul Mackerrasd0757452018-01-17 20:51:13 +11001983 cmpdi r3, 0
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301984 /*
Paul Mackerrasd0757452018-01-17 20:51:13 +11001985 * At this point kvmppc_realmode_hmi_handler may have resync-ed
1986 * the TB, and if it has, we must not subtract the guest timebase
1987 * offset from the timebase. So, skip it.
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301988 *
1989 * Also, do not call kvmppc_subcore_exit_guest() because it has
1990 * been invoked as part of kvmppc_realmode_hmi_handler().
1991 */
Paul Mackerrasd0757452018-01-17 20:51:13 +11001992 beq 30f
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301993
199427:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001995 /* Subtract timebase offset from timebase */
Paul Mackerras57b8daa2018-04-20 22:51:11 +10001996 ld r8, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001997 cmpdi r8,0
1998 beq 17f
Paul Mackerras57b8daa2018-04-20 22:51:11 +10001999 li r0, 0
2000 std r0, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11002001 mftb r6 /* current guest timebase */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002002 subf r8,r8,r6
2003 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
2004 mftb r7 /* check if lower 24 bits overflowed */
2005 clrldi r6,r6,40
2006 clrldi r7,r7,40
2007 cmpld r7,r6
2008 bge 17f
2009 addis r8,r8,0x100 /* if so, increment upper 40 bits */
2010 mtspr SPRN_TBU40,r8
2011
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530201217: bl kvmppc_subcore_exit_guest
2013 nop
201430: ld r5,HSTATE_KVM_VCORE(r13)
2015 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
2016
Paul Mackerrasde56a942011-06-29 00:21:34 +00002017 /* Reset PCR */
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302018 ld r0, VCORE_PCR(r5)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002019 cmpdi r0, 0
2020 beq 18f
2021 li r0, 0
2022 mtspr SPRN_PCR, r0
202318:
2024 /* Signal secondary CPUs to continue */
2025 stb r0,VCORE_IN_GUEST(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000202619: lis r8,0x7fff /* MAX_INT@h */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002027 mtspr SPRN_HDEC,r8
2028
Paul Mackerrasc0101502017-10-19 14:11:23 +1100202916:
2030BEGIN_FTR_SECTION
2031 /* On POWER9 with HPT-on-radix we need to wait for all other threads */
2032 ld r3, HSTATE_SPLIT_MODE(r13)
2033 cmpdi r3, 0
2034 beq 47f
2035 lwz r8, KVM_SPLIT_DO_RESTORE(r3)
2036 cmpwi r8, 0
2037 beq 47f
Paul Mackerrasc0101502017-10-19 14:11:23 +11002038 bl kvmhv_p9_restore_lpcr
2039 nop
Paul Mackerrasc0101502017-10-19 14:11:23 +11002040 b 48f
204147:
2042END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2043 ld r8,KVM_HOST_LPCR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002044 mtspr SPRN_LPCR,r8
2045 isync
Paul Mackerrasc0101502017-10-19 14:11:23 +1100204648:
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002047#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2048 /* Finish timing, if we have a vcpu */
2049 ld r4, HSTATE_KVM_VCPU(r13)
2050 cmpdi r4, 0
2051 li r3, 0
2052 beq 2f
2053 bl kvmhv_accumulate_time
20542:
2055#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +00002056 /* Unset guest mode */
2057 li r0, KVM_GUEST_MODE_NONE
2058 stb r0, HSTATE_IN_GUEST(r13)
2059
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11002060 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10002061 ld r0, SFS+PPC_LR_STKOFF(r1)
2062 addi r1, r1, SFS
Paul Mackerras218309b2013-09-06 13:23:44 +10002063 mtlr r0
2064 blr
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002065
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002066#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2067/*
2068 * Softpatch interrupt for transactional memory emulation cases
2069 * on POWER9 DD2.2. This is early in the guest exit path - we
2070 * haven't saved registers or done a treclaim yet.
2071 */
2072kvmppc_tm_emul:
2073 /* Save instruction image in HEIR */
2074 mfspr r3, SPRN_HEIR
2075 stw r3, VCPU_HEIR(r9)
2076
2077 /*
2078 * The cases we want to handle here are those where the guest
2079 * is in real suspend mode and is trying to transition to
2080 * transactional mode.
2081 */
2082 lbz r0, HSTATE_FAKE_SUSPEND(r13)
2083 cmpwi r0, 0 /* keep exiting guest if in fake suspend */
2084 bne guest_exit_cont
2085 rldicl r3, r11, 64 - MSR_TS_S_LG, 62
2086 cmpwi r3, 1 /* or if not in suspend state */
2087 bne guest_exit_cont
2088
2089 /* Call C code to do the emulation */
2090 mr r3, r9
2091 bl kvmhv_p9_tm_emulation_early
2092 nop
2093 ld r9, HSTATE_KVM_VCPU(r13)
2094 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
2095 cmpwi r3, 0
2096 beq guest_exit_cont /* continue exiting if not handled */
2097 ld r10, VCPU_PC(r9)
2098 ld r11, VCPU_MSR(r9)
2099 b fast_interrupt_c_return /* go back to guest if handled */
2100#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2101
Paul Mackerras697d3892011-12-12 12:36:37 +00002102/*
2103 * Check whether an HDSI is an HPTE not found fault or something else.
2104 * If it is an HPTE not found fault that is due to the guest accessing
2105 * a page that they have mapped but which we have paged out, then
2106 * we continue on with the guest exit path. In all other cases,
2107 * reflect the HDSI to the guest as a DSI.
2108 */
2109kvmppc_hdsi:
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002110 ld r3, VCPU_KVM(r9)
2111 lbz r0, KVM_RADIX(r3)
Paul Mackerras697d3892011-12-12 12:36:37 +00002112 mfspr r4, SPRN_HDAR
2113 mfspr r6, SPRN_HDSISR
Michael Neulinge001fa72017-09-15 15:26:14 +10002114BEGIN_FTR_SECTION
2115 /* Look for DSISR canary. If we find it, retry instruction */
2116 cmpdi r6, 0x7fff
2117 beq 6f
2118END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2119 cmpwi r0, 0
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002120 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
Paul Mackerras4cf302b2011-12-12 12:38:51 +00002121 /* HPTE not found fault or protection fault? */
2122 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
Paul Mackerras697d3892011-12-12 12:36:37 +00002123 beq 1f /* if not, send it to the guest */
Paul Mackerras4e5acdc2017-02-28 11:05:47 +11002124 andi. r0, r11, MSR_DR /* data relocation enabled? */
2125 beq 3f
Paul Mackerrasef8c6402017-01-30 21:21:43 +11002126BEGIN_FTR_SECTION
2127 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2128 b 4f
2129END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras697d3892011-12-12 12:36:37 +00002130 clrrdi r0, r4, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00002131 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11002132 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
2133 bne 7f /* if no SLB entry found */
Paul Mackerras697d3892011-12-12 12:36:37 +000021344: std r4, VCPU_FAULT_DAR(r9)
2135 stw r6, VCPU_FAULT_DSISR(r9)
2136
2137 /* Search the hash table. */
2138 mr r3, r9 /* vcpu pointer */
Paul Mackerras342d3db2011-12-12 12:38:05 +00002139 li r7, 1 /* data fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002140 bl kvmppc_hpte_hv_fault
Paul Mackerras697d3892011-12-12 12:36:37 +00002141 ld r9, HSTATE_KVM_VCPU(r13)
2142 ld r10, VCPU_PC(r9)
2143 ld r11, VCPU_MSR(r9)
2144 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
2145 cmpdi r3, 0 /* retry the instruction */
2146 beq 6f
2147 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002148 beq guest_exit_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00002149 cmpdi r3, -2 /* MMIO emulation; need instr word */
2150 beq 2f
2151
Paul Mackerrascf29b212015-10-27 16:10:20 +11002152 /* Synthesize a DSI (or DSegI) for the guest */
Paul Mackerras697d3892011-12-12 12:36:37 +00002153 ld r4, VCPU_FAULT_DAR(r9)
2154 mr r6, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110021551: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
Paul Mackerras697d3892011-12-12 12:36:37 +00002156 mtspr SPRN_DSISR, r6
Paul Mackerrascf29b212015-10-27 16:10:20 +110021577: mtspr SPRN_DAR, r4
Paul Mackerras697d3892011-12-12 12:36:37 +00002158 mtspr SPRN_SRR0, r10
2159 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11002160 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11002161 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002162fast_interrupt_c_return:
Paul Mackerras697d3892011-12-12 12:36:37 +000021636: ld r7, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10002164 ld r8, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00002165 mtctr r7
2166 mtxer r8
2167 mr r4, r9
2168 b fast_guest_return
2169
21703: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
2171 ld r5, KVM_VRMA_SLB_V(r5)
2172 b 4b
2173
2174 /* If this is for emulated MMIO, load the instruction word */
21752: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
2176
2177 /* Set guest mode to 'jump over instruction' so if lwz faults
2178 * we'll just continue at the next IP. */
2179 li r0, KVM_GUEST_MODE_SKIP
2180 stb r0, HSTATE_IN_GUEST(r13)
2181
2182 /* Do the access with MSR:DR enabled */
2183 mfmsr r3
2184 ori r4, r3, MSR_DR /* Enable paging for data */
2185 mtmsrd r4
2186 lwz r8, 0(r10)
2187 mtmsrd r3
2188
2189 /* Store the result */
2190 stw r8, VCPU_LAST_INST(r9)
2191
2192 /* Unset guest mode. */
Paul Mackerras44a3add2013-10-04 21:45:04 +10002193 li r0, KVM_GUEST_MODE_HOST_HV
Paul Mackerras697d3892011-12-12 12:36:37 +00002194 stb r0, HSTATE_IN_GUEST(r13)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002195 b guest_exit_cont
Paul Mackerrasde56a942011-06-29 00:21:34 +00002196
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002197.Lradix_hdsi:
2198 std r4, VCPU_FAULT_DAR(r9)
2199 stw r6, VCPU_FAULT_DSISR(r9)
2200.Lradix_hisi:
2201 mfspr r5, SPRN_ASDR
2202 std r5, VCPU_FAULT_GPA(r9)
2203 b guest_exit_cont
2204
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002205/*
Paul Mackerras342d3db2011-12-12 12:38:05 +00002206 * Similarly for an HISI, reflect it to the guest as an ISI unless
2207 * it is an HPTE not found fault for a page that we have paged out.
2208 */
2209kvmppc_hisi:
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002210 ld r3, VCPU_KVM(r9)
2211 lbz r0, KVM_RADIX(r3)
2212 cmpwi r0, 0
2213 bne .Lradix_hisi /* for radix, just save ASDR */
Paul Mackerras342d3db2011-12-12 12:38:05 +00002214 andis. r0, r11, SRR1_ISI_NOPT@h
2215 beq 1f
Paul Mackerras4e5acdc2017-02-28 11:05:47 +11002216 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
2217 beq 3f
Paul Mackerrasef8c6402017-01-30 21:21:43 +11002218BEGIN_FTR_SECTION
2219 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2220 b 4f
2221END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras342d3db2011-12-12 12:38:05 +00002222 clrrdi r0, r10, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00002223 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11002224 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2225 bne 7f /* if no SLB entry found */
Paul Mackerras342d3db2011-12-12 12:38:05 +000022264:
2227 /* Search the hash table. */
2228 mr r3, r9 /* vcpu pointer */
2229 mr r4, r10
2230 mr r6, r11
2231 li r7, 0 /* instruction fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002232 bl kvmppc_hpte_hv_fault
Paul Mackerras342d3db2011-12-12 12:38:05 +00002233 ld r9, HSTATE_KVM_VCPU(r13)
2234 ld r10, VCPU_PC(r9)
2235 ld r11, VCPU_MSR(r9)
2236 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2237 cmpdi r3, 0 /* retry the instruction */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002238 beq fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00002239 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002240 beq guest_exit_cont
Paul Mackerras342d3db2011-12-12 12:38:05 +00002241
Paul Mackerrascf29b212015-10-27 16:10:20 +11002242 /* Synthesize an ISI (or ISegI) for the guest */
Paul Mackerras342d3db2011-12-12 12:38:05 +00002243 mr r11, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110022441: li r0, BOOK3S_INTERRUPT_INST_STORAGE
22457: mtspr SPRN_SRR0, r10
Paul Mackerras342d3db2011-12-12 12:38:05 +00002246 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11002247 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11002248 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002249 b fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00002250
22513: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2252 ld r5, KVM_VRMA_SLB_V(r6)
2253 b 4b
2254
2255/*
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002256 * Try to handle an hcall in real mode.
2257 * Returns to the guest if we handle it, or continues on up to
2258 * the kernel if we can't (i.e. if we don't have a handler for
2259 * it, or if the handler returns H_TOO_HARD).
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002260 *
2261 * r5 - r8 contain hcall args,
2262 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002263 */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002264hcall_try_real_mode:
Michael Neulingc75df6f2012-06-25 13:33:10 +00002265 ld r3,VCPU_GPR(R3)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002266 andi. r0,r11,MSR_PR
Liu Ping Fan27025a62013-11-19 14:12:48 +08002267 /* sc 1 from userspace - reflect to guest syscall */
2268 bne sc_1_fast_return
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002269 clrrdi r3,r3,2
2270 cmpldi r3,hcall_real_table_end - hcall_real_table
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002271 bge guest_exit_cont
Paul Mackerras699a0ea2014-06-02 11:02:59 +10002272 /* See if this hcall is enabled for in-kernel handling */
2273 ld r4, VCPU_KVM(r9)
2274 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2275 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2276 add r4, r4, r0
2277 ld r0, KVM_ENABLED_HCALLS(r4)
2278 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2279 srd r0, r0, r4
2280 andi. r0, r0, 1
2281 beq guest_exit_cont
2282 /* Get pointer to handler, if any, and call it */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002283 LOAD_REG_ADDR(r4, hcall_real_table)
Paul Mackerras4baa1d82013-07-08 20:09:53 +10002284 lwax r3,r3,r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002285 cmpwi r3,0
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002286 beq guest_exit_cont
Anton Blanchard05a308c2014-06-12 18:16:10 +10002287 add r12,r3,r4
2288 mtctr r12
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002289 mr r3,r9 /* get vcpu pointer */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002290 ld r4,VCPU_GPR(R4)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002291 bctrl
2292 cmpdi r3,H_TOO_HARD
2293 beq hcall_real_fallback
2294 ld r4,HSTATE_KVM_VCPU(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00002295 std r3,VCPU_GPR(R3)(r4)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002296 ld r10,VCPU_PC(r4)
2297 ld r11,VCPU_MSR(r4)
2298 b fast_guest_return
2299
Liu Ping Fan27025a62013-11-19 14:12:48 +08002300sc_1_fast_return:
2301 mtspr SPRN_SRR0,r10
2302 mtspr SPRN_SRR1,r11
2303 li r10, BOOK3S_INTERRUPT_SYSCALL
Michael Neulinge4e38122014-03-25 10:47:02 +11002304 bl kvmppc_msr_interrupt
Liu Ping Fan27025a62013-11-19 14:12:48 +08002305 mr r4,r9
2306 b fast_guest_return
2307
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002308 /* We've attempted a real mode hcall, but it's punted it back
2309 * to userspace. We need to restore some clobbered volatiles
2310 * before resuming the pass-it-to-qemu path */
2311hcall_real_fallback:
2312 li r12,BOOK3S_INTERRUPT_SYSCALL
2313 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002314
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002315 b guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002316
2317 .globl hcall_real_table
2318hcall_real_table:
2319 .long 0 /* 0 - unused */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002320 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2321 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2322 .long DOTSYM(kvmppc_h_read) - hcall_real_table
Paul Mackerrascdeee512015-06-24 21:18:07 +10002323 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2324 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002325 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2326 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11002327 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002328 .long 0 /* 0x24 - H_SET_SPRG0 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002329 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002330 .long 0 /* 0x2c */
2331 .long 0 /* 0x30 */
2332 .long 0 /* 0x34 */
2333 .long 0 /* 0x38 */
2334 .long 0 /* 0x3c */
2335 .long 0 /* 0x40 */
2336 .long 0 /* 0x44 */
2337 .long 0 /* 0x48 */
2338 .long 0 /* 0x4c */
2339 .long 0 /* 0x50 */
2340 .long 0 /* 0x54 */
2341 .long 0 /* 0x58 */
2342 .long 0 /* 0x5c */
2343 .long 0 /* 0x60 */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00002344#ifdef CONFIG_KVM_XICS
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002345 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2346 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2347 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10002348 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002349 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00002350#else
2351 .long 0 /* 0x64 - H_EOI */
2352 .long 0 /* 0x68 - H_CPPR */
2353 .long 0 /* 0x6c - H_IPI */
2354 .long 0 /* 0x70 - H_IPOLL */
2355 .long 0 /* 0x74 - H_XIRR */
2356#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002357 .long 0 /* 0x78 */
2358 .long 0 /* 0x7c */
2359 .long 0 /* 0x80 */
2360 .long 0 /* 0x84 */
2361 .long 0 /* 0x88 */
2362 .long 0 /* 0x8c */
2363 .long 0 /* 0x90 */
2364 .long 0 /* 0x94 */
2365 .long 0 /* 0x98 */
2366 .long 0 /* 0x9c */
2367 .long 0 /* 0xa0 */
2368 .long 0 /* 0xa4 */
2369 .long 0 /* 0xa8 */
2370 .long 0 /* 0xac */
2371 .long 0 /* 0xb0 */
2372 .long 0 /* 0xb4 */
2373 .long 0 /* 0xb8 */
2374 .long 0 /* 0xbc */
2375 .long 0 /* 0xc0 */
2376 .long 0 /* 0xc4 */
2377 .long 0 /* 0xc8 */
2378 .long 0 /* 0xcc */
2379 .long 0 /* 0xd0 */
2380 .long 0 /* 0xd4 */
2381 .long 0 /* 0xd8 */
2382 .long 0 /* 0xdc */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002383 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
Sam Bobroff90fd09f2014-12-03 13:30:40 +11002384 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002385 .long 0 /* 0xe8 */
2386 .long 0 /* 0xec */
2387 .long 0 /* 0xf0 */
2388 .long 0 /* 0xf4 */
2389 .long 0 /* 0xf8 */
2390 .long 0 /* 0xfc */
2391 .long 0 /* 0x100 */
2392 .long 0 /* 0x104 */
2393 .long 0 /* 0x108 */
2394 .long 0 /* 0x10c */
2395 .long 0 /* 0x110 */
2396 .long 0 /* 0x114 */
2397 .long 0 /* 0x118 */
2398 .long 0 /* 0x11c */
2399 .long 0 /* 0x120 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002400 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
Paul Mackerras8563bf52014-01-08 21:25:29 +11002401 .long 0 /* 0x128 */
2402 .long 0 /* 0x12c */
2403 .long 0 /* 0x130 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002404 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11002405 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +11002406 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
Michael Ellermane928e9c2015-03-20 20:39:41 +11002407 .long 0 /* 0x140 */
2408 .long 0 /* 0x144 */
2409 .long 0 /* 0x148 */
2410 .long 0 /* 0x14c */
2411 .long 0 /* 0x150 */
2412 .long 0 /* 0x154 */
2413 .long 0 /* 0x158 */
2414 .long 0 /* 0x15c */
2415 .long 0 /* 0x160 */
2416 .long 0 /* 0x164 */
2417 .long 0 /* 0x168 */
2418 .long 0 /* 0x16c */
2419 .long 0 /* 0x170 */
2420 .long 0 /* 0x174 */
2421 .long 0 /* 0x178 */
2422 .long 0 /* 0x17c */
2423 .long 0 /* 0x180 */
2424 .long 0 /* 0x184 */
2425 .long 0 /* 0x188 */
2426 .long 0 /* 0x18c */
2427 .long 0 /* 0x190 */
2428 .long 0 /* 0x194 */
2429 .long 0 /* 0x198 */
2430 .long 0 /* 0x19c */
2431 .long 0 /* 0x1a0 */
2432 .long 0 /* 0x1a4 */
2433 .long 0 /* 0x1a8 */
2434 .long 0 /* 0x1ac */
2435 .long 0 /* 0x1b0 */
2436 .long 0 /* 0x1b4 */
2437 .long 0 /* 0x1b8 */
2438 .long 0 /* 0x1bc */
2439 .long 0 /* 0x1c0 */
2440 .long 0 /* 0x1c4 */
2441 .long 0 /* 0x1c8 */
2442 .long 0 /* 0x1cc */
2443 .long 0 /* 0x1d0 */
2444 .long 0 /* 0x1d4 */
2445 .long 0 /* 0x1d8 */
2446 .long 0 /* 0x1dc */
2447 .long 0 /* 0x1e0 */
2448 .long 0 /* 0x1e4 */
2449 .long 0 /* 0x1e8 */
2450 .long 0 /* 0x1ec */
2451 .long 0 /* 0x1f0 */
2452 .long 0 /* 0x1f4 */
2453 .long 0 /* 0x1f8 */
2454 .long 0 /* 0x1fc */
2455 .long 0 /* 0x200 */
2456 .long 0 /* 0x204 */
2457 .long 0 /* 0x208 */
2458 .long 0 /* 0x20c */
2459 .long 0 /* 0x210 */
2460 .long 0 /* 0x214 */
2461 .long 0 /* 0x218 */
2462 .long 0 /* 0x21c */
2463 .long 0 /* 0x220 */
2464 .long 0 /* 0x224 */
2465 .long 0 /* 0x228 */
2466 .long 0 /* 0x22c */
2467 .long 0 /* 0x230 */
2468 .long 0 /* 0x234 */
2469 .long 0 /* 0x238 */
2470 .long 0 /* 0x23c */
2471 .long 0 /* 0x240 */
2472 .long 0 /* 0x244 */
2473 .long 0 /* 0x248 */
2474 .long 0 /* 0x24c */
2475 .long 0 /* 0x250 */
2476 .long 0 /* 0x254 */
2477 .long 0 /* 0x258 */
2478 .long 0 /* 0x25c */
2479 .long 0 /* 0x260 */
2480 .long 0 /* 0x264 */
2481 .long 0 /* 0x268 */
2482 .long 0 /* 0x26c */
2483 .long 0 /* 0x270 */
2484 .long 0 /* 0x274 */
2485 .long 0 /* 0x278 */
2486 .long 0 /* 0x27c */
2487 .long 0 /* 0x280 */
2488 .long 0 /* 0x284 */
2489 .long 0 /* 0x288 */
2490 .long 0 /* 0x28c */
2491 .long 0 /* 0x290 */
2492 .long 0 /* 0x294 */
2493 .long 0 /* 0x298 */
2494 .long 0 /* 0x29c */
2495 .long 0 /* 0x2a0 */
2496 .long 0 /* 0x2a4 */
2497 .long 0 /* 0x2a8 */
2498 .long 0 /* 0x2ac */
2499 .long 0 /* 0x2b0 */
2500 .long 0 /* 0x2b4 */
2501 .long 0 /* 0x2b8 */
2502 .long 0 /* 0x2bc */
2503 .long 0 /* 0x2c0 */
2504 .long 0 /* 0x2c4 */
2505 .long 0 /* 0x2c8 */
2506 .long 0 /* 0x2cc */
2507 .long 0 /* 0x2d0 */
2508 .long 0 /* 0x2d4 */
2509 .long 0 /* 0x2d8 */
2510 .long 0 /* 0x2dc */
2511 .long 0 /* 0x2e0 */
2512 .long 0 /* 0x2e4 */
2513 .long 0 /* 0x2e8 */
2514 .long 0 /* 0x2ec */
2515 .long 0 /* 0x2f0 */
2516 .long 0 /* 0x2f4 */
2517 .long 0 /* 0x2f8 */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10002518#ifdef CONFIG_KVM_XICS
2519 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2520#else
2521 .long 0 /* 0x2fc - H_XIRR_X*/
2522#endif
Michael Ellermane928e9c2015-03-20 20:39:41 +11002523 .long DOTSYM(kvmppc_h_random) - hcall_real_table
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002524 .globl hcall_real_table_end
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002525hcall_real_table_end:
2526
Paul Mackerras8563bf52014-01-08 21:25:29 +11002527_GLOBAL(kvmppc_h_set_xdabr)
2528 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2529 beq 6f
2530 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2531 andc. r0, r5, r0
2532 beq 3f
25336: li r3, H_PARAMETER
2534 blr
2535
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002536_GLOBAL(kvmppc_h_set_dabr)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002537 li r5, DABRX_USER | DABRX_KERNEL
25383:
Michael Neulingeee7ff92014-01-08 21:25:19 +11002539BEGIN_FTR_SECTION
2540 b 2f
2541END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002542 std r4,VCPU_DABR(r3)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002543 stw r5, VCPU_DABRX(r3)
2544 mtspr SPRN_DABRX, r5
Paul Mackerras89436332012-03-02 01:38:23 +00002545 /* Work around P7 bug where DABR can get corrupted on mtspr */
25461: mtspr SPRN_DABR,r4
2547 mfspr r5, SPRN_DABR
2548 cmpd r4, r5
2549 bne 1b
2550 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002551 li r3,0
2552 blr
2553
Michael Neulinge8ebedb2018-03-27 15:37:21 +110025542:
2555BEGIN_FTR_SECTION
2556 /* POWER9 with disabled DAWR */
Aneesh Kumar K.Vca9a16c2018-03-30 17:27:24 +05302557 li r3, H_HARDWARE
Michael Neulinge8ebedb2018-03-27 15:37:21 +11002558 blr
2559END_FTR_SECTION_IFCLR(CPU_FTR_DAWR)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002560 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
Michael Neulinge8ebedb2018-03-27 15:37:21 +11002561 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
Thomas Huth760a7362015-11-20 09:11:45 +01002562 rlwimi r5, r4, 2, DAWRX_WT
Paul Mackerras8563bf52014-01-08 21:25:29 +11002563 clrrdi r4, r4, 3
2564 std r4, VCPU_DAWR(r3)
2565 std r5, VCPU_DAWRX(r3)
2566 mtspr SPRN_DAWR, r4
2567 mtspr SPRN_DAWRX, r5
2568 li r3, 0
Paul Mackerrasde56a942011-06-29 00:21:34 +00002569 blr
2570
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002571_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002572 ori r11,r11,MSR_EE
2573 std r11,VCPU_MSR(r3)
2574 li r0,1
2575 stb r0,VCPU_CEDED(r3)
2576 sync /* order setting ceded vs. testing prodded */
2577 lbz r5,VCPU_PRODDED(r3)
2578 cmpwi r5,0
Paul Mackerras04f995a2012-08-06 00:03:28 +00002579 bne kvm_cede_prodded
Paul Mackerras6af27c82015-03-28 14:21:10 +11002580 li r12,0 /* set trap to 0 to say hcall is handled */
2581 stw r12,VCPU_TRAP(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002582 li r0,H_SUCCESS
Michael Neulingc75df6f2012-06-25 13:33:10 +00002583 std r0,VCPU_GPR(R3)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002584
2585 /*
2586 * Set our bit in the bitmask of napping threads unless all the
2587 * other threads are already napping, in which case we send this
2588 * up to the host.
2589 */
2590 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002591 lbz r6,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002592 lwz r8,VCORE_ENTRY_EXIT(r5)
2593 clrldi r8,r8,56
2594 li r0,1
2595 sld r0,r0,r6
2596 addi r6,r5,VCORE_NAPPING_THREADS
259731: lwarx r4,0,r6
2598 or r4,r4,r0
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002599 cmpw r4,r8
2600 beq kvm_cede_exit
Paul Mackerras19ccb762011-07-23 17:42:46 +10002601 stwcx. r4,0,r6
2602 bne 31b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002603 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11002604 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002605 li r0,NAPPING_CEDE
Paul Mackerras19ccb762011-07-23 17:42:46 +10002606 stb r0,HSTATE_NAPPING(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002607 lwz r7,VCORE_ENTRY_EXIT(r5)
2608 cmpwi r7,0x100
2609 bge 33f /* another thread already exiting */
2610
2611/*
2612 * Although not specifically required by the architecture, POWER7
2613 * preserves the following registers in nap mode, even if an SMT mode
2614 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2615 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2616 */
2617 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002618 std r14, VCPU_GPR(R14)(r3)
2619 std r15, VCPU_GPR(R15)(r3)
2620 std r16, VCPU_GPR(R16)(r3)
2621 std r17, VCPU_GPR(R17)(r3)
2622 std r18, VCPU_GPR(R18)(r3)
2623 std r19, VCPU_GPR(R19)(r3)
2624 std r20, VCPU_GPR(R20)(r3)
2625 std r21, VCPU_GPR(R21)(r3)
2626 std r22, VCPU_GPR(R22)(r3)
2627 std r23, VCPU_GPR(R23)(r3)
2628 std r24, VCPU_GPR(R24)(r3)
2629 std r25, VCPU_GPR(R25)(r3)
2630 std r26, VCPU_GPR(R26)(r3)
2631 std r27, VCPU_GPR(R27)(r3)
2632 std r28, VCPU_GPR(R28)(r3)
2633 std r29, VCPU_GPR(R29)(r3)
2634 std r30, VCPU_GPR(R30)(r3)
2635 std r31, VCPU_GPR(R31)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002636
2637 /* save FP state */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002638 bl kvmppc_save_fp
Paul Mackerras19ccb762011-07-23 17:42:46 +10002639
Paul Mackerras93d17392016-06-22 15:52:55 +10002640#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002641/*
2642 * Branch around the call if both CPU_FTR_TM and
2643 * CPU_FTR_P9_TM_HV_ASSIST are off.
2644 */
Paul Mackerras93d17392016-06-22 15:52:55 +10002645BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002646 b 91f
2647END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10002648 /*
2649 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2650 */
Simon Guo6f597c62018-05-23 15:01:48 +08002651 ld r3, HSTATE_KVM_VCPU(r13)
2652 ld r4, VCPU_MSR(r3)
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002653 bl kvmppc_save_tm_hv
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100265491:
Paul Mackerras93d17392016-06-22 15:52:55 +10002655#endif
2656
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002657 /*
2658 * Set DEC to the smaller of DEC and HDEC, so that we wake
2659 * no later than the end of our timeslice (HDEC interrupts
2660 * don't wake us from nap).
2661 */
2662 mfspr r3, SPRN_DEC
2663 mfspr r4, SPRN_HDEC
2664 mftb r5
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10002665BEGIN_FTR_SECTION
2666 /* On P9 check whether the guest has large decrementer mode enabled */
2667 ld r6, HSTATE_KVM_VCORE(r13)
2668 ld r6, VCORE_LPCR(r6)
2669 andis. r6, r6, LPCR_LD@h
2670 bne 68f
2671END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras2f272462017-05-22 16:25:14 +10002672 extsw r3, r3
Paul Mackerras1bc3fe82017-05-22 16:55:16 +1000267368: EXTEND_HDEC(r4)
Paul Mackerras2f272462017-05-22 16:25:14 +10002674 cmpd r3, r4
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002675 ble 67f
2676 mtspr SPRN_DEC, r4
267767:
2678 /* save expiry time of guest decrementer */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002679 add r3, r3, r5
2680 ld r4, HSTATE_KVM_VCPU(r13)
2681 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10002682 ld r6, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002683 subf r3, r6, r3 /* convert to host TB value */
2684 std r3, VCPU_DEC_EXPIRES(r4)
2685
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002686#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2687 ld r4, HSTATE_KVM_VCPU(r13)
2688 addi r3, r4, VCPU_TB_CEDE
2689 bl kvmhv_accumulate_time
2690#endif
2691
Paul Mackerrasccc07772015-03-28 14:21:07 +11002692 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2693
Paul Mackerras19ccb762011-07-23 17:42:46 +10002694 /*
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002695 * Take a nap until a decrementer or external or doobell interrupt
Paul Mackerrasccc07772015-03-28 14:21:07 +11002696 * occurs, with PECE1 and PECE0 set in LPCR.
Paul Mackerras66feed62015-03-28 14:21:12 +11002697 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
Paul Mackerrasccc07772015-03-28 14:21:07 +11002698 * Also clear the runlatch bit before napping.
Paul Mackerras19ccb762011-07-23 17:42:46 +10002699 */
Paul Mackerras56548fc2014-12-03 14:48:40 +11002700kvm_do_nap:
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002701 mfspr r0, SPRN_CTRLF
2702 clrrdi r0, r0, 1
2703 mtspr SPRN_CTRLT, r0
Preeti U Murthy582b9102014-04-11 16:02:08 +05302704
Paul Mackerrasf0888f72012-02-03 00:54:17 +00002705 li r0,1
2706 stb r0,HSTATE_HWTHREAD_REQ(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002707 mfspr r5,SPRN_LPCR
2708 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002709BEGIN_FTR_SECTION
Paul Mackerras66feed62015-03-28 14:21:12 +11002710 ori r5, r5, LPCR_PECEDH
Paul Mackerrasccc07772015-03-28 14:21:07 +11002711 rlwimi r5, r3, 0, LPCR_PECEDP
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002712END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002713
2714kvm_nap_sequence: /* desired LPCR value in r5 */
2715BEGIN_FTR_SECTION
2716 /*
2717 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2718 * enable state loss = 1 (allow SMT mode switch)
2719 * requested level = 0 (just stop dispatching)
2720 */
2721 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2722 mtspr SPRN_PSSCR, r3
2723 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2724 li r4, LPCR_PECE_HVEE@higher
2725 sldi r4, r4, 32
2726 or r5, r5, r4
2727END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002728 mtspr SPRN_LPCR,r5
2729 isync
2730 li r0, 0
2731 std r0, HSTATE_SCRATCH0(r13)
2732 ptesync
2733 ld r0, HSTATE_SCRATCH0(r13)
27341: cmpd r0, r0
2735 bne 1b
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002736BEGIN_FTR_SECTION
Paul Mackerras19ccb762011-07-23 17:42:46 +10002737 nap
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002738FTR_SECTION_ELSE
2739 PPC_STOP
2740ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002741 b .
2742
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100274333: mr r4, r3
2744 li r3, 0
2745 li r12, 0
2746 b 34f
2747
Paul Mackerras19ccb762011-07-23 17:42:46 +10002748kvm_end_cede:
Paul Mackerras4619ac82013-04-17 20:31:41 +00002749 /* get vcpu pointer */
2750 ld r4, HSTATE_KVM_VCPU(r13)
2751
Paul Mackerras19ccb762011-07-23 17:42:46 +10002752 /* Woken by external or decrementer interrupt */
2753 ld r1, HSTATE_HOST_R1(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002754
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002755#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2756 addi r3, r4, VCPU_TB_RMINTR
2757 bl kvmhv_accumulate_time
2758#endif
2759
Paul Mackerras93d17392016-06-22 15:52:55 +10002760#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002761/*
2762 * Branch around the call if both CPU_FTR_TM and
2763 * CPU_FTR_P9_TM_HV_ASSIST are off.
2764 */
Paul Mackerras93d17392016-06-22 15:52:55 +10002765BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002766 b 91f
2767END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10002768 /*
2769 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2770 */
Simon Guo6f597c62018-05-23 15:01:48 +08002771 mr r3, r4
2772 ld r4, VCPU_MSR(r3)
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002773 bl kvmppc_restore_tm_hv
Simon Guo6f597c62018-05-23 15:01:48 +08002774 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100277591:
Paul Mackerras93d17392016-06-22 15:52:55 +10002776#endif
2777
Paul Mackerras19ccb762011-07-23 17:42:46 +10002778 /* load up FP state */
2779 bl kvmppc_load_fp
2780
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002781 /* Restore guest decrementer */
2782 ld r3, VCPU_DEC_EXPIRES(r4)
2783 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10002784 ld r6, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002785 add r3, r3, r6 /* convert host TB to guest TB value */
2786 mftb r7
2787 subf r3, r7, r3
2788 mtspr SPRN_DEC, r3
2789
Paul Mackerras19ccb762011-07-23 17:42:46 +10002790 /* Load NV GPRS */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002791 ld r14, VCPU_GPR(R14)(r4)
2792 ld r15, VCPU_GPR(R15)(r4)
2793 ld r16, VCPU_GPR(R16)(r4)
2794 ld r17, VCPU_GPR(R17)(r4)
2795 ld r18, VCPU_GPR(R18)(r4)
2796 ld r19, VCPU_GPR(R19)(r4)
2797 ld r20, VCPU_GPR(R20)(r4)
2798 ld r21, VCPU_GPR(R21)(r4)
2799 ld r22, VCPU_GPR(R22)(r4)
2800 ld r23, VCPU_GPR(R23)(r4)
2801 ld r24, VCPU_GPR(R24)(r4)
2802 ld r25, VCPU_GPR(R25)(r4)
2803 ld r26, VCPU_GPR(R26)(r4)
2804 ld r27, VCPU_GPR(R27)(r4)
2805 ld r28, VCPU_GPR(R28)(r4)
2806 ld r29, VCPU_GPR(R29)(r4)
2807 ld r30, VCPU_GPR(R30)(r4)
2808 ld r31, VCPU_GPR(R31)(r4)
Suresh Warrier37f55d32016-08-19 15:35:46 +10002809
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002810 /* Check the wake reason in SRR1 to see why we got here */
2811 bl kvmppc_check_wake_reason
Paul Mackerras19ccb762011-07-23 17:42:46 +10002812
Suresh Warrier37f55d32016-08-19 15:35:46 +10002813 /*
2814 * Restore volatile registers since we could have called a
2815 * C routine in kvmppc_check_wake_reason
2816 * r4 = VCPU
2817 * r3 tells us whether we need to return to host or not
2818 * WARNING: it gets checked further down:
2819 * should not modify r3 until this check is done.
2820 */
2821 ld r4, HSTATE_KVM_VCPU(r13)
2822
Paul Mackerras19ccb762011-07-23 17:42:46 +10002823 /* clear our bit in vcore->napping_threads */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100282434: ld r5,HSTATE_KVM_VCORE(r13)
2825 lbz r7,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002826 li r0,1
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002827 sld r0,r0,r7
Paul Mackerras19ccb762011-07-23 17:42:46 +10002828 addi r6,r5,VCORE_NAPPING_THREADS
282932: lwarx r7,0,r6
2830 andc r7,r7,r0
2831 stwcx. r7,0,r6
2832 bne 32b
2833 li r0,0
2834 stb r0,HSTATE_NAPPING(r13)
2835
Suresh Warrier37f55d32016-08-19 15:35:46 +10002836 /* See if the wake reason saved in r3 means we need to exit */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002837 stw r12, VCPU_TRAP(r4)
Paul Mackerras4619ac82013-04-17 20:31:41 +00002838 mr r9, r4
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002839 cmpdi r3, 0
2840 bgt guest_exit_cont
Paul Mackerras4619ac82013-04-17 20:31:41 +00002841
Paul Mackerras19ccb762011-07-23 17:42:46 +10002842 /* see if any other thread is already exiting */
2843 lwz r0,VCORE_ENTRY_EXIT(r5)
2844 cmpwi r0,0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002845 bge guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002846
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002847 b kvmppc_cede_reentry /* if not go back to guest */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002848
2849 /* cede when already previously prodded case */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002850kvm_cede_prodded:
2851 li r0,0
Paul Mackerras19ccb762011-07-23 17:42:46 +10002852 stb r0,VCPU_PRODDED(r3)
2853 sync /* order testing prodded vs. clearing ceded */
2854 stb r0,VCPU_CEDED(r3)
2855 li r3,H_SUCCESS
2856 blr
2857
2858 /* we've ceded but we want to give control to the host */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002859kvm_cede_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +11002860 ld r9, HSTATE_KVM_VCPU(r13)
Benjamin Herrenschmidt9b9b13a2018-01-12 13:37:16 +11002861#ifdef CONFIG_KVM_XICS
2862 /* Abort if we still have a pending escalation */
2863 lbz r5, VCPU_XIVE_ESC_ON(r9)
2864 cmpwi r5, 0
2865 beq 1f
2866 li r0, 0
2867 stb r0, VCPU_CEDED(r9)
28681: /* Enable XIVE escalation */
2869 li r5, XIVE_ESB_SET_PQ_00
2870 mfmsr r0
2871 andi. r0, r0, MSR_DR /* in real mode? */
2872 beq 1f
2873 ld r10, VCPU_XIVE_ESC_VADDR(r9)
2874 cmpdi r10, 0
2875 beq 3f
2876 ldx r0, r10, r5
2877 b 2f
28781: ld r10, VCPU_XIVE_ESC_RADDR(r9)
2879 cmpdi r10, 0
2880 beq 3f
2881 ldcix r0, r10, r5
28822: sync
2883 li r0, 1
2884 stb r0, VCPU_XIVE_ESC_ON(r9)
2885#endif /* CONFIG_KVM_XICS */
28863: b guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002887
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002888 /* Try to handle a machine check in real mode */
2889machine_check_realmode:
2890 mr r3, r9 /* get vcpu pointer */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002891 bl kvmppc_realmode_machine_check
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002892 nop
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002893 ld r9, HSTATE_KVM_VCPU(r13)
2894 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302895 /*
Aravinda Prasade20bbd32017-05-11 16:33:37 +05302896 * For the guest that is FWNMI capable, deliver all the MCE errors
2897 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
2898 * reason. This new approach injects machine check errors in guest
2899 * address space to guest with additional information in the form
2900 * of RTAS event, thus enabling guest kernel to suitably handle
2901 * such errors.
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302902 *
Aravinda Prasade20bbd32017-05-11 16:33:37 +05302903 * For the guest that is not FWNMI capable (old QEMU) fallback
2904 * to old behaviour for backward compatibility:
2905 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
2906 * through machine check interrupt (set HSRR0 to 0x200).
2907 * For handled errors (no-fatal), just go back to guest execution
2908 * with current HSRR0.
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302909 * if we receive machine check with MSR(RI=0) then deliver it to
2910 * guest as machine check causing guest to crash.
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302911 */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302912 ld r11, VCPU_MSR(r9)
Paul Mackerras1c9e3d52015-11-12 16:43:48 +11002913 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2914 bne mc_cont /* if so, exit to host */
Aravinda Prasade20bbd32017-05-11 16:33:37 +05302915 /* Check if guest is capable of handling NMI exit */
2916 ld r10, VCPU_KVM(r9)
2917 lbz r10, KVM_FWNMI(r10)
2918 cmpdi r10, 1 /* FWNMI capable? */
2919 beq mc_cont /* if so, exit with KVM_EXIT_NMI. */
2920
2921 /* if not, fall through for backward compatibility. */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302922 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2923 beq 1f /* Deliver a machine check to guest */
2924 ld r10, VCPU_PC(r9)
2925 cmpdi r3, 0 /* Did we handle MCE ? */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302926 bne 2f /* Continue guest execution. */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002927 /* If not, deliver a machine check. SRR0/1 are already set */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +053029281: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
Michael Neulinge4e38122014-03-25 10:47:02 +11002929 bl kvmppc_msr_interrupt
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +053029302: b fast_interrupt_c_return
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002931
Paul Mackerrasde56a942011-06-29 00:21:34 +00002932/*
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002933 * Check the reason we woke from nap, and take appropriate action.
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002934 * Returns (in r3):
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002935 * 0 if nothing needs to be done
2936 * 1 if something happened that needs to be handled by the host
Paul Mackerras66feed62015-03-28 14:21:12 +11002937 * -1 if there was a guest wakeup (IPI or msgsnd)
Suresh Warriere3c13e52016-08-19 15:35:51 +10002938 * -2 if we handled a PCI passthrough interrupt (returned by
2939 * kvmppc_read_intr only)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002940 *
2941 * Also sets r12 to the interrupt vector for any interrupt that needs
2942 * to be handled now by the host (0x500 for external interrupt), or zero.
Suresh Warrier37f55d32016-08-19 15:35:46 +10002943 * Modifies all volatile registers (since it may call a C function).
2944 * This routine calls kvmppc_read_intr, a C function, if an external
2945 * interrupt is pending.
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002946 */
2947kvmppc_check_wake_reason:
2948 mfspr r6, SPRN_SRR1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002949BEGIN_FTR_SECTION
2950 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2951FTR_SECTION_ELSE
2952 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2953ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2954 cmpwi r6, 8 /* was it an external interrupt? */
Suresh Warrier37f55d32016-08-19 15:35:46 +10002955 beq 7f /* if so, see what it was */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002956 li r3, 0
2957 li r12, 0
2958 cmpwi r6, 6 /* was it the decrementer? */
2959 beq 0f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002960BEGIN_FTR_SECTION
2961 cmpwi r6, 5 /* privileged doorbell? */
2962 beq 0f
Paul Mackerras5d00f662014-01-08 21:25:28 +11002963 cmpwi r6, 3 /* hypervisor doorbell? */
2964 beq 3f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002965END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302966 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2967 beq 4f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002968 li r3, 1 /* anything else, return 1 */
29690: blr
2970
Paul Mackerras5d00f662014-01-08 21:25:28 +11002971 /* hypervisor doorbell */
29723: li r12, BOOK3S_INTERRUPT_H_DOORBELL
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05302973
2974 /*
2975 * Clear the doorbell as we will invoke the handler
2976 * explicitly in the guest exit path.
2977 */
2978 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2979 PPC_MSGCLR(6)
Paul Mackerras66feed62015-03-28 14:21:12 +11002980 /* see if it's a host IPI */
Paul Mackerras5d00f662014-01-08 21:25:28 +11002981 li r3, 1
Nicholas Piggin2cde3712017-10-10 20:18:28 +10002982BEGIN_FTR_SECTION
2983 PPC_MSGSYNC
2984 lwsync
2985END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras66feed62015-03-28 14:21:12 +11002986 lbz r0, HSTATE_HOST_IPI(r13)
2987 cmpwi r0, 0
2988 bnelr
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05302989 /* if not, return -1 */
Paul Mackerras66feed62015-03-28 14:21:12 +11002990 li r3, -1
Paul Mackerras5d00f662014-01-08 21:25:28 +11002991 blr
2992
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302993 /* Woken up due to Hypervisor maintenance interrupt */
29944: li r12, BOOK3S_INTERRUPT_HMI
2995 li r3, 1
2996 blr
2997
Suresh Warrier37f55d32016-08-19 15:35:46 +10002998 /* external interrupt - create a stack frame so we can call C */
29997: mflr r0
3000 std r0, PPC_LR_STKOFF(r1)
3001 stdu r1, -PPC_MIN_STKFRM(r1)
3002 bl kvmppc_read_intr
3003 nop
3004 li r12, BOOK3S_INTERRUPT_EXTERNAL
Suresh Warrierf7af5202016-08-19 15:35:52 +10003005 cmpdi r3, 1
3006 ble 1f
3007
3008 /*
3009 * Return code of 2 means PCI passthrough interrupt, but
3010 * we need to return back to host to complete handling the
3011 * interrupt. Trap reason is expected in r12 by guest
3012 * exit code.
3013 */
3014 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
30151:
Suresh Warrier37f55d32016-08-19 15:35:46 +10003016 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
3017 addi r1, r1, PPC_MIN_STKFRM
3018 mtlr r0
3019 blr
Paul Mackerrasde56a942011-06-29 00:21:34 +00003020
3021/*
3022 * Save away FP, VMX and VSX registers.
3023 * r3 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11003024 * N.B. r30 and r31 are volatile across this function,
3025 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00003026 */
Paul Mackerras595e4f72013-10-15 20:43:04 +11003027kvmppc_save_fp:
3028 mflr r30
3029 mr r31,r3
Paul Mackerras89436332012-03-02 01:38:23 +00003030 mfmsr r5
3031 ori r8,r5,MSR_FP
Paul Mackerrasde56a942011-06-29 00:21:34 +00003032#ifdef CONFIG_ALTIVEC
3033BEGIN_FTR_SECTION
3034 oris r8,r8,MSR_VEC@h
3035END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3036#endif
3037#ifdef CONFIG_VSX
3038BEGIN_FTR_SECTION
3039 oris r8,r8,MSR_VSX@h
3040END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3041#endif
3042 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11003043 addi r3,r3,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02003044 bl store_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00003045#ifdef CONFIG_ALTIVEC
3046BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11003047 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02003048 bl store_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00003049END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3050#endif
3051 mfspr r6,SPRN_VRSAVE
Paul Mackerrase724f082014-03-13 20:02:48 +11003052 stw r6,VCPU_VRSAVE(r31)
Paul Mackerras595e4f72013-10-15 20:43:04 +11003053 mtlr r30
Paul Mackerrasde56a942011-06-29 00:21:34 +00003054 blr
3055
3056/*
3057 * Load up FP, VMX and VSX registers
3058 * r4 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11003059 * N.B. r30 and r31 are volatile across this function,
3060 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00003061 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00003062kvmppc_load_fp:
Paul Mackerras595e4f72013-10-15 20:43:04 +11003063 mflr r30
3064 mr r31,r4
Paul Mackerrasde56a942011-06-29 00:21:34 +00003065 mfmsr r9
3066 ori r8,r9,MSR_FP
3067#ifdef CONFIG_ALTIVEC
3068BEGIN_FTR_SECTION
3069 oris r8,r8,MSR_VEC@h
3070END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3071#endif
3072#ifdef CONFIG_VSX
3073BEGIN_FTR_SECTION
3074 oris r8,r8,MSR_VSX@h
3075END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3076#endif
3077 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11003078 addi r3,r4,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02003079 bl load_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00003080#ifdef CONFIG_ALTIVEC
3081BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11003082 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02003083 bl load_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00003084END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3085#endif
Paul Mackerrase724f082014-03-13 20:02:48 +11003086 lwz r7,VCPU_VRSAVE(r31)
Paul Mackerrasde56a942011-06-29 00:21:34 +00003087 mtspr SPRN_VRSAVE,r7
Paul Mackerras595e4f72013-10-15 20:43:04 +11003088 mtlr r30
3089 mr r4,r31
Paul Mackerrasde56a942011-06-29 00:21:34 +00003090 blr
Paul Mackerras44a3add2013-10-04 21:45:04 +10003091
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003092#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
3093/*
3094 * Save transactional state and TM-related registers.
Simon Guo6f597c62018-05-23 15:01:48 +08003095 * Called with r3 pointing to the vcpu struct and r4 containing
3096 * the guest MSR value.
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003097 * This can modify all checkpointed registers, but
Simon Guo6f597c62018-05-23 15:01:48 +08003098 * restores r1 and r2 before exit.
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003099 */
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003100kvmppc_save_tm_hv:
3101 /* See if we need to handle fake suspend mode */
3102BEGIN_FTR_SECTION
Simon Guocaa3be92018-05-23 15:01:50 +08003103 b __kvmppc_save_tm
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003104END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3105
3106 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
3107 cmpwi r0, 0
Simon Guocaa3be92018-05-23 15:01:50 +08003108 beq __kvmppc_save_tm
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003109
3110 /* The following code handles the fake_suspend = 1 case */
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003111 mflr r0
3112 std r0, PPC_LR_STKOFF(r1)
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003113 stdu r1, -PPC_MIN_STKFRM(r1)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003114
3115 /* Turn on TM. */
3116 mfmsr r8
3117 li r0, 1
3118 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
3119 mtmsrd r8
3120
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003121 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
3122 beq 4f
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003123BEGIN_FTR_SECTION
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003124 bl pnv_power9_force_smt4_catch
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003125END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003126 nop
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003127
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003128 std r1, HSTATE_HOST_R1(r13)
3129
3130 /* Clear the MSR RI since r1, r13 may be foobar. */
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003131 li r5, 0
3132 mtmsrd r5, 1
3133
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003134 /* We have to treclaim here because that's the only way to do S->N */
3135 li r3, TM_CAUSE_KVM_RESCHED
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003136 TRECLAIM(R3)
3137
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003138 /*
3139 * We were in fake suspend, so we are not going to save the
3140 * register state as the guest checkpointed state (since
3141 * we already have it), therefore we can now use any volatile GPR.
3142 */
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003143 /* Reload PACA pointer, stack pointer and TOC. */
3144 GET_PACA(r13)
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003145 ld r1, HSTATE_HOST_R1(r13)
3146 ld r2, PACATOC(r13)
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003147
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003148 /* Set MSR RI now we have r1 and r13 back. */
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003149 li r5, MSR_RI
3150 mtmsrd r5, 1
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003151
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003152 HMT_MEDIUM
3153 ld r6, HSTATE_DSCR(r13)
3154 mtspr SPRN_DSCR, r6
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003155BEGIN_FTR_SECTION_NESTED(96)
3156 bl pnv_power9_force_smt4_release
3157END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
3158 nop
3159
31604:
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003161 mfspr r3, SPRN_PSSCR
3162 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */
3163 li r0, PSSCR_FAKE_SUSPEND
3164 andc r3, r3, r0
3165 mtspr SPRN_PSSCR, r3
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003166
Paul Mackerras681c6172018-03-21 21:32:03 +11003167 /* Don't save TEXASR, use value from last exit in real suspend state */
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003168 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003169 mfspr r5, SPRN_TFHAR
3170 mfspr r6, SPRN_TFIAR
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003171 std r5, VCPU_TFHAR(r9)
3172 std r6, VCPU_TFIAR(r9)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003173
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003174 addi r1, r1, PPC_MIN_STKFRM
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003175 ld r0, PPC_LR_STKOFF(r1)
3176 mtlr r0
3177 blr
3178
3179/*
3180 * Restore transactional state and TM-related registers.
Simon Guo6f597c62018-05-23 15:01:48 +08003181 * Called with r3 pointing to the vcpu struct
3182 * and r4 containing the guest MSR value.
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003183 * This potentially modifies all checkpointed registers.
Simon Guo6f597c62018-05-23 15:01:48 +08003184 * It restores r1 and r2 from the PACA.
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003185 */
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003186kvmppc_restore_tm_hv:
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003187 /*
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003188 * If we are doing TM emulation for the guest on a POWER9 DD2,
3189 * then we don't actually do a trechkpt -- we either set up
3190 * fake-suspend mode, or emulate a TM rollback.
3191 */
3192BEGIN_FTR_SECTION
Simon Guocaa3be92018-05-23 15:01:50 +08003193 b __kvmppc_restore_tm
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003194END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3195 mflr r0
3196 std r0, PPC_LR_STKOFF(r1)
3197
3198 li r0, 0
3199 stb r0, HSTATE_FAKE_SUSPEND(r13)
3200
3201 /* Turn on TM so we can restore TM SPRs */
3202 mfmsr r5
3203 li r0, 1
3204 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG
3205 mtmsrd r5
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003206
3207 /*
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003208 * The user may change these outside of a transaction, so they must
3209 * always be context switched.
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003210 */
Simon Guo6f597c62018-05-23 15:01:48 +08003211 ld r5, VCPU_TFHAR(r3)
3212 ld r6, VCPU_TFIAR(r3)
3213 ld r7, VCPU_TEXASR(r3)
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003214 mtspr SPRN_TFHAR, r5
3215 mtspr SPRN_TFIAR, r6
3216 mtspr SPRN_TEXASR, r7
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003217
Simon Guo6f597c62018-05-23 15:01:48 +08003218 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003219 beqlr /* TM not active in guest */
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003220
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003221 /* Make sure the failure summary is set */
3222 oris r7, r7, (TEXASR_FS)@h
3223 mtspr SPRN_TEXASR, r7
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003224
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003225 cmpwi r5, 1 /* check for suspended state */
3226 bgt 10f
3227 stb r5, HSTATE_FAKE_SUSPEND(r13)
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003228 b 9f /* and return */
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100322910: stdu r1, -PPC_MIN_STKFRM(r1)
3230 /* guest is in transactional state, so simulate rollback */
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003231 bl kvmhv_emulate_tm_rollback
3232 nop
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003233 addi r1, r1, PPC_MIN_STKFRM
Paul Mackerras7b0e8272018-05-30 20:07:52 +100032349: ld r0, PPC_LR_STKOFF(r1)
3235 mtlr r0
3236 blr
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003237#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003238
Paul Mackerras44a3add2013-10-04 21:45:04 +10003239/*
3240 * We come here if we get any exception or interrupt while we are
3241 * executing host real mode code while in guest MMU context.
Paul Mackerras857b99e2017-09-01 16:17:27 +10003242 * r12 is (CR << 32) | vector
3243 * r13 points to our PACA
3244 * r12 is saved in HSTATE_SCRATCH0(r13)
3245 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE
3246 * r9 is saved in HSTATE_SCRATCH2(r13)
3247 * r13 is saved in HSPRG1
3248 * cfar is saved in HSTATE_CFAR(r13)
3249 * ppr is saved in HSTATE_PPR(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +10003250 */
3251kvmppc_bad_host_intr:
Paul Mackerras857b99e2017-09-01 16:17:27 +10003252 /*
3253 * Switch to the emergency stack, but start half-way down in
3254 * case we were already on it.
3255 */
3256 mr r9, r1
3257 std r1, PACAR1(r13)
3258 ld r1, PACAEMERGSP(r13)
3259 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
3260 std r9, 0(r1)
3261 std r0, GPR0(r1)
3262 std r9, GPR1(r1)
3263 std r2, GPR2(r1)
3264 SAVE_4GPRS(3, r1)
3265 SAVE_2GPRS(7, r1)
3266 srdi r0, r12, 32
3267 clrldi r12, r12, 32
3268 std r0, _CCR(r1)
3269 std r12, _TRAP(r1)
3270 andi. r0, r12, 2
3271 beq 1f
3272 mfspr r3, SPRN_HSRR0
3273 mfspr r4, SPRN_HSRR1
3274 mfspr r5, SPRN_HDAR
3275 mfspr r6, SPRN_HDSISR
3276 b 2f
32771: mfspr r3, SPRN_SRR0
3278 mfspr r4, SPRN_SRR1
3279 mfspr r5, SPRN_DAR
3280 mfspr r6, SPRN_DSISR
32812: std r3, _NIP(r1)
3282 std r4, _MSR(r1)
3283 std r5, _DAR(r1)
3284 std r6, _DSISR(r1)
3285 ld r9, HSTATE_SCRATCH2(r13)
3286 ld r12, HSTATE_SCRATCH0(r13)
3287 GET_SCRATCH0(r0)
3288 SAVE_4GPRS(9, r1)
3289 std r0, GPR13(r1)
3290 SAVE_NVGPRS(r1)
3291 ld r5, HSTATE_CFAR(r13)
3292 std r5, ORIG_GPR3(r1)
3293 mflr r3
3294#ifdef CONFIG_RELOCATABLE
3295 ld r4, HSTATE_SCRATCH1(r13)
3296#else
3297 mfctr r4
3298#endif
3299 mfxer r5
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +05303300 lbz r6, PACAIRQSOFTMASK(r13)
Paul Mackerras857b99e2017-09-01 16:17:27 +10003301 std r3, _LINK(r1)
3302 std r4, _CTR(r1)
3303 std r5, _XER(r1)
3304 std r6, SOFTE(r1)
3305 ld r2, PACATOC(r13)
3306 LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
3307 std r3, STACK_FRAME_OVERHEAD-16(r1)
3308
3309 /*
3310 * On POWER9 do a minimal restore of the MMU and call C code,
3311 * which will print a message and panic.
3312 * XXX On POWER7 and POWER8, we just spin here since we don't
3313 * know what the other threads are doing (and we don't want to
3314 * coordinate with them) - but at least we now have register state
3315 * in memory that we might be able to look at from another CPU.
3316 */
3317BEGIN_FTR_SECTION
Paul Mackerras44a3add2013-10-04 21:45:04 +10003318 b .
Paul Mackerras857b99e2017-09-01 16:17:27 +10003319END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
3320 ld r9, HSTATE_KVM_VCPU(r13)
3321 ld r10, VCPU_KVM(r9)
3322
3323 li r0, 0
3324 mtspr SPRN_AMR, r0
3325 mtspr SPRN_IAMR, r0
3326 mtspr SPRN_CIABR, r0
3327 mtspr SPRN_DAWRX, r0
3328
Paul Mackerras857b99e2017-09-01 16:17:27 +10003329BEGIN_MMU_FTR_SECTION
3330 b 4f
3331END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3332
3333 slbmte r0, r0
3334 slbia
3335 ptesync
3336 ld r8, PACA_SLBSHADOWPTR(r13)
3337 .rept SLB_NUM_BOLTED
3338 li r3, SLBSHADOW_SAVEAREA
3339 LDX_BE r5, r8, r3
3340 addi r3, r3, 8
3341 LDX_BE r6, r8, r3
3342 andis. r7, r5, SLB_ESID_V@h
3343 beq 3f
3344 slbmte r6, r5
33453: addi r8, r8, 16
3346 .endr
3347
33484: lwz r7, KVM_HOST_LPID(r10)
3349 mtspr SPRN_LPID, r7
3350 mtspr SPRN_PID, r0
3351 ld r8, KVM_HOST_LPCR(r10)
3352 mtspr SPRN_LPCR, r8
3353 isync
3354 li r0, KVM_GUEST_MODE_NONE
3355 stb r0, HSTATE_IN_GUEST(r13)
3356
3357 /*
3358 * Turn on the MMU and jump to C code
3359 */
3360 bcl 20, 31, .+4
33615: mflr r3
3362 addi r3, r3, 9f - 5b
Nicholas Piggineadce3b2018-05-18 03:49:43 +10003363 li r4, -1
3364 rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */
Paul Mackerras857b99e2017-09-01 16:17:27 +10003365 ld r4, PACAKMSR(r13)
3366 mtspr SPRN_SRR0, r3
3367 mtspr SPRN_SRR1, r4
Nicholas Piggin222f20f2018-01-10 03:07:15 +11003368 RFI_TO_KERNEL
Paul Mackerras857b99e2017-09-01 16:17:27 +100033699: addi r3, r1, STACK_FRAME_OVERHEAD
3370 bl kvmppc_bad_interrupt
3371 b 9b
Michael Neulinge4e38122014-03-25 10:47:02 +11003372
3373/*
3374 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3375 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3376 * r11 has the guest MSR value (in/out)
3377 * r9 has a vcpu pointer (in)
3378 * r0 is used as a scratch register
3379 */
3380kvmppc_msr_interrupt:
3381 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3382 cmpwi r0, 2 /* Check if we are in transactional state.. */
3383 ld r11, VCPU_INTR_MSR(r9)
3384 bne 1f
3385 /* ... if transactional, change to suspended */
3386 li r0, 1
33871: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3388 blr
Paul Mackerras9bc01a92014-05-26 19:48:40 +10003389
3390/*
3391 * This works around a hardware bug on POWER8E processors, where
3392 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3393 * performance monitor interrupt. Instead, when we need to have
3394 * an interrupt pending, we have to arrange for a counter to overflow.
3395 */
3396kvmppc_fix_pmao:
3397 li r3, 0
3398 mtspr SPRN_MMCR2, r3
3399 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3400 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3401 mtspr SPRN_MMCR0, r3
3402 lis r3, 0x7fff
3403 ori r3, r3, 0xffff
3404 mtspr SPRN_PMC6, r3
3405 isync
3406 blr
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003407
3408#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3409/*
3410 * Start timing an activity
3411 * r3 = pointer to time accumulation struct, r4 = vcpu
3412 */
3413kvmhv_start_timing:
3414 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10003415 ld r6, VCORE_TB_OFFSET_APPL(r5)
3416 mftb r5
3417 subf r5, r6, r5 /* subtract current timebase offset */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003418 std r3, VCPU_CUR_ACTIVITY(r4)
3419 std r5, VCPU_ACTIVITY_START(r4)
3420 blr
3421
3422/*
3423 * Accumulate time to one activity and start another.
3424 * r3 = pointer to new time accumulation struct, r4 = vcpu
3425 */
3426kvmhv_accumulate_time:
3427 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10003428 ld r8, VCORE_TB_OFFSET_APPL(r5)
3429 ld r5, VCPU_CUR_ACTIVITY(r4)
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003430 ld r6, VCPU_ACTIVITY_START(r4)
3431 std r3, VCPU_CUR_ACTIVITY(r4)
3432 mftb r7
Paul Mackerras57b8daa2018-04-20 22:51:11 +10003433 subf r7, r8, r7 /* subtract current timebase offset */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003434 std r7, VCPU_ACTIVITY_START(r4)
3435 cmpdi r5, 0
3436 beqlr
3437 subf r3, r6, r7
3438 ld r8, TAS_SEQCOUNT(r5)
3439 cmpdi r8, 0
3440 addi r8, r8, 1
3441 std r8, TAS_SEQCOUNT(r5)
3442 lwsync
3443 ld r7, TAS_TOTAL(r5)
3444 add r7, r7, r3
3445 std r7, TAS_TOTAL(r5)
3446 ld r6, TAS_MIN(r5)
3447 ld r7, TAS_MAX(r5)
3448 beq 3f
3449 cmpd r3, r6
3450 bge 1f
34513: std r3, TAS_MIN(r5)
34521: cmpd r3, r7
3453 ble 2f
3454 std r3, TAS_MAX(r5)
34552: lwsync
3456 addi r8, r8, 1
3457 std r8, TAS_SEQCOUNT(r5)
3458 blr
3459#endif