blob: 32a4b4d412b92bee96ab73346bbecf63175d024e [file] [log] [blame]
Thomas Gleixner1802d0b2019-05-27 08:55:21 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002/*
Paul Mackerrasde56a942011-06-29 00:21:34 +00003 *
4 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5 *
6 * Derived from book3s_rmhandlers.S and other files, which are:
7 *
8 * Copyright SUSE Linux Products GmbH 2009
9 *
10 * Authors: Alexander Graf <agraf@suse.de>
11 */
12
13#include <asm/ppc_asm.h>
Michael Ellermanaf2e8c62019-11-13 21:05:44 +110014#include <asm/code-patching-asm.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000015#include <asm/kvm_asm.h>
16#include <asm/reg.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100017#include <asm/mmu.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000018#include <asm/page.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100019#include <asm/ptrace.h>
20#include <asm/hvcall.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000021#include <asm/asm-offsets.h>
22#include <asm/exception-64s.h>
Paul Mackerrasf0888f72012-02-03 00:54:17 +000023#include <asm/kvm_book3s_asm.h>
Aneesh Kumar K.Vf64e8082016-03-01 12:59:20 +053024#include <asm/book3s/64/mmu-hash.h>
Paul Mackerras41f4e632018-10-08 16:30:51 +110025#include <asm/export.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110026#include <asm/tm.h>
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +053027#include <asm/opal.h>
Paul Mackerras857b99e2017-09-01 16:17:27 +100028#include <asm/thread_info.h>
Christophe Leroyec0c4642018-07-05 16:24:57 +000029#include <asm/asm-compat.h>
Christophe Leroy2c86cd12018-07-05 16:25:01 +000030#include <asm/feature-fixups.h>
Nicholas Piggin10d91612019-04-13 00:30:52 +100031#include <asm/cpuidle.h>
Paul Mackerras2f272462017-05-22 16:25:14 +100032
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110033/* Values in HSTATE_NAPPING(r13) */
34#define NAPPING_CEDE 1
35#define NAPPING_NOVCPU 2
Nicholas Piggin10d91612019-04-13 00:30:52 +100036#define NAPPING_UNSPLIT 3
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110037
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100038/* Stack frame offsets for kvmppc_hv_entry */
Nicholas Piggin89d35b22021-05-28 19:07:34 +100039#define SFS 160
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100040#define STACK_SLOT_TRAP (SFS-4)
41#define STACK_SLOT_TID (SFS-16)
42#define STACK_SLOT_PSSCR (SFS-24)
43#define STACK_SLOT_PID (SFS-32)
44#define STACK_SLOT_IAMR (SFS-40)
45#define STACK_SLOT_CIABR (SFS-48)
Ravi Bangoria122954ed72020-12-16 16:12:17 +053046#define STACK_SLOT_DAWR0 (SFS-56)
47#define STACK_SLOT_DAWRX0 (SFS-64)
Paul Mackerras769377f2017-02-15 14:30:17 +110048#define STACK_SLOT_HFSCR (SFS-72)
Michael Ellermanc3c7470c2019-02-22 13:22:08 +110049#define STACK_SLOT_AMR (SFS-80)
50#define STACK_SLOT_UAMOR (SFS-88)
Nicholas Pigginfae5c9f2021-05-28 19:07:52 +100051#define STACK_SLOT_FSCR (SFS-96)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100052
Paul Mackerrasde56a942011-06-29 00:21:34 +000053/*
Paul Mackerras19ccb762011-07-23 17:42:46 +100054 * Call kvmppc_hv_entry in real mode.
Paul Mackerrasde56a942011-06-29 00:21:34 +000055 * Must be called with interrupts hard-disabled.
56 *
57 * Input Registers:
58 *
59 * LR = return address to continue at after eventually re-enabling MMU
60 */
Anton Blanchard6ed179b2014-06-12 18:16:53 +100061_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
Paul Mackerras218309b2013-09-06 13:23:44 +100062 mflr r0
63 std r0, PPC_LR_STKOFF(r1)
64 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +000065 mfmsr r10
Paul Mackerras8b24e692017-06-26 15:45:51 +100066 std r10, HSTATE_HOST_MSR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100067 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
Paul Mackerrasde56a942011-06-29 00:21:34 +000068 li r0,MSR_RI
69 andc r0,r10,r0
70 li r6,MSR_IR | MSR_DR
71 andc r6,r10,r6
72 mtmsrd r0,1 /* clear RI in MSR */
73 mtsrr0 r5
74 mtsrr1 r6
Nicholas Piggin222f20f2018-01-10 03:07:15 +110075 RFI_TO_KERNEL
Paul Mackerrasde56a942011-06-29 00:21:34 +000076
Paul Mackerras218309b2013-09-06 13:23:44 +100077kvmppc_call_hv_entry:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110078 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100079 bl kvmppc_hv_entry
80
81 /* Back from guest - restore host state and return to caller */
82
Michael Neulingeee7ff92014-01-08 21:25:19 +110083BEGIN_FTR_SECTION
Paul Mackerras218309b2013-09-06 13:23:44 +100084 /* Restore host DABR and DABRX */
85 ld r5,HSTATE_DABR(r13)
86 li r6,7
87 mtspr SPRN_DABR,r5
88 mtspr SPRN_DABRX,r6
Michael Neulingeee7ff92014-01-08 21:25:19 +110089END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +100090
91 /* Restore SPRG3 */
Scott Wood9d378df2014-03-10 17:29:38 -050092 ld r3,PACA_SPRG_VDSO(r13)
93 mtspr SPRN_SPRG_VDSO_WRITE,r3
Paul Mackerras218309b2013-09-06 13:23:44 +100094
Paul Mackerras218309b2013-09-06 13:23:44 +100095 /* Reload the host's PMU registers */
Paul Mackerras41f4e632018-10-08 16:30:51 +110096 bl kvmhv_load_host_pmu
Paul Mackerras218309b2013-09-06 13:23:44 +100097
98 /*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110099 * Reload DEC. HDEC interrupts were disabled when
100 * we reloaded the host's LPCR value.
101 */
102 ld r3, HSTATE_DECEXP(r13)
103 mftb r4
104 subf r4, r4, r3
105 mtspr SPRN_DEC, r4
106
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000107 /* hwthread_req may have got set by cede or no vcpu, so clear it */
108 li r0, 0
109 stb r0, HSTATE_HWTHREAD_REQ(r13)
110
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100111 /*
Aravinda Prasade20bbd32017-05-11 16:33:37 +0530112 * For external interrupts we need to call the Linux
113 * handler to process the interrupt. We do that by jumping
114 * to absolute address 0x500 for external interrupts.
115 * The [h]rfid at the end of the handler will return to
116 * the book3s_hv_interrupts.S code. For other interrupts
117 * we do the rfid to get back to the book3s_hv_interrupts.S
118 * code here.
Paul Mackerras218309b2013-09-06 13:23:44 +1000119 */
120 ld r8, 112+PPC_LR_STKOFF(r1)
121 addi r1, r1, 112
122 ld r7, HSTATE_HOST_MSR(r13)
123
Paul Mackerras8b24e692017-06-26 15:45:51 +1000124 /* Return the trap number on this thread as the return value */
125 mr r3, r12
126
Paul Mackerras8b24e692017-06-26 15:45:51 +1000127 /* RFI into the highmem handler */
Paul Mackerras218309b2013-09-06 13:23:44 +1000128 mfmsr r6
129 li r0, MSR_RI
130 andc r6, r6, r0
131 mtmsrd r6, 1 /* Clear RI in MSR */
132 mtsrr0 r8
133 mtsrr1 r7
Nicholas Piggin222f20f2018-01-10 03:07:15 +1100134 RFI_TO_KERNEL
Paul Mackerras218309b2013-09-06 13:23:44 +1000135
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100136kvmppc_primary_no_guest:
137 /* We handle this much like a ceded vcpu */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100138 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
Paul Mackerras2f272462017-05-22 16:25:14 +1000139 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
140 /* HDEC value came from DEC in the first place, it will fit */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100141 mfspr r3, SPRN_HDEC
142 mtspr SPRN_DEC, r3
Paul Mackerras6af27c82015-03-28 14:21:10 +1100143 /*
144 * Make sure the primary has finished the MMU switch.
145 * We should never get here on a secondary thread, but
146 * check it for robustness' sake.
147 */
148 ld r5, HSTATE_KVM_VCORE(r13)
14965: lbz r0, VCORE_IN_GUEST(r5)
150 cmpwi r0, 0
151 beq 65b
152 /* Set LPCR. */
153 ld r8,VCORE_LPCR(r5)
154 mtspr SPRN_LPCR,r8
155 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100156 /* set our bit in napping_threads */
157 ld r5, HSTATE_KVM_VCORE(r13)
158 lbz r7, HSTATE_PTID(r13)
159 li r0, 1
160 sld r0, r0, r7
161 addi r6, r5, VCORE_NAPPING_THREADS
1621: lwarx r3, 0, r6
163 or r3, r3, r0
164 stwcx. r3, 0, r6
165 bne 1b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100166 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100167 isync
168 li r12, 0
169 lwz r7, VCORE_ENTRY_EXIT(r5)
170 cmpwi r7, 0x100
171 bge kvm_novcpu_exit /* another thread already exiting */
172 li r3, NAPPING_NOVCPU
173 stb r3, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100174
Paul Mackerrasccc07772015-03-28 14:21:07 +1100175 li r3, 0 /* Don't wake on privileged (OS) doorbell */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100176 b kvm_do_nap
177
Suresh Warrier37f55d32016-08-19 15:35:46 +1000178/*
179 * kvm_novcpu_wakeup
180 * Entered from kvm_start_guest if kvm_hstate.napping is set
181 * to NAPPING_NOVCPU
182 * r2 = kernel TOC
183 * r13 = paca
184 */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100185kvm_novcpu_wakeup:
186 ld r1, HSTATE_HOST_R1(r13)
187 ld r5, HSTATE_KVM_VCORE(r13)
188 li r0, 0
189 stb r0, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100190
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100191 /* check the wake reason */
192 bl kvmppc_check_wake_reason
Paul Mackerras6af27c82015-03-28 14:21:10 +1100193
Suresh Warrier37f55d32016-08-19 15:35:46 +1000194 /*
195 * Restore volatile registers since we could have called
196 * a C routine in kvmppc_check_wake_reason.
197 * r5 = VCORE
198 */
199 ld r5, HSTATE_KVM_VCORE(r13)
200
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100201 /* see if any other thread is already exiting */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100202 lwz r0, VCORE_ENTRY_EXIT(r5)
203 cmpwi r0, 0x100
204 bge kvm_novcpu_exit
205
206 /* clear our bit in napping_threads */
207 lbz r7, HSTATE_PTID(r13)
208 li r0, 1
209 sld r0, r0, r7
210 addi r6, r5, VCORE_NAPPING_THREADS
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002114: lwarx r7, 0, r6
212 andc r7, r7, r0
213 stwcx. r7, 0, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100214 bne 4b
215
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100216 /* See if the wake reason means we need to exit */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100217 cmpdi r3, 0
218 bge kvm_novcpu_exit
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100219
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100220 /* See if our timeslice has expired (HDEC is negative) */
221 mfspr r0, SPRN_HDEC
Nicholas Pigginfae5c9f2021-05-28 19:07:52 +1000222 extsw r0, r0
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100223 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerras2f272462017-05-22 16:25:14 +1000224 cmpdi r0, 0
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100225 blt kvm_novcpu_exit
226
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100227 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
228 ld r4, HSTATE_KVM_VCPU(r13)
229 cmpdi r4, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100230 beq kvmppc_primary_no_guest
231
232#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
233 addi r3, r4, VCPU_TB_RMENTRY
234 bl kvmhv_start_timing
235#endif
236 b kvmppc_got_guest
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100237
238kvm_novcpu_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100239#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
240 ld r4, HSTATE_KVM_VCPU(r13)
241 cmpdi r4, 0
242 beq 13f
243 addi r3, r4, VCPU_TB_RMEXIT
244 bl kvmhv_accumulate_time
245#endif
Paul Mackerraseddb60f2015-03-28 14:21:11 +110024613: mr r3, r12
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000247 stw r12, STACK_SLOT_TRAP(r1)
Paul Mackerraseddb60f2015-03-28 14:21:11 +1100248 bl kvmhv_commence_exit
249 nop
Paul Mackerras6af27c82015-03-28 14:21:10 +1100250 b kvmhv_switch_to_host
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100251
Paul Mackerras371fefd2011-06-29 00:23:08 +0000252/*
Nicholas Piggin10d91612019-04-13 00:30:52 +1000253 * We come in here when wakened from Linux offline idle code.
254 * Relocation is off
Nicholas Piggin9d292502017-06-13 23:05:51 +1000255 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000256 */
Nicholas Piggin10d91612019-04-13 00:30:52 +1000257_GLOBAL(idle_kvm_start_guest)
Nicholas Piggin10d91612019-04-13 00:30:52 +1000258 mfcr r5
259 mflr r0
Michael Ellerman9b4416c2021-10-15 23:01:48 +1100260 std r5, 8(r1) // Save CR in caller's frame
261 std r0, 16(r1) // Save LR in caller's frame
262 // Create frame on emergency stack
263 ld r4, PACAEMERGSP(r13)
264 stdu r1, -SWITCH_FRAME_SIZE(r4)
265 // Switch to new frame on emergency stack
266 mr r1, r4
Michael Ellermancdeb5d72021-10-15 23:02:08 +1100267 std r3, 32(r1) // Save SRR1 wakeup value
Nicholas Piggin10d91612019-04-13 00:30:52 +1000268 SAVE_NVGPRS(r1)
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530269
Nicholas Piggin9d292502017-06-13 23:05:51 +1000270 /*
271 * Could avoid this and pass it through in r3. For now,
272 * code expects it to be in SRR1.
273 */
274 mtspr SPRN_SRR1,r3
275
Naveen N. Raoa4bc64d2018-04-19 12:34:05 +0530276 li r0,0
277 stb r0,PACA_FTRACE_ENABLED(r13)
278
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000279 li r0,KVM_HWTHREAD_IN_KVM
280 stb r0,HSTATE_HWTHREAD_STATE(r13)
281
Nicholas Piggin10d91612019-04-13 00:30:52 +1000282 /* kvm cede / napping does not come through here */
Paul Mackerras4619ac82013-04-17 20:31:41 +0000283 lbz r0,HSTATE_NAPPING(r13)
Nicholas Piggin10d91612019-04-13 00:30:52 +1000284 twnei r0,0
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100285
Nicholas Piggin10d91612019-04-13 00:30:52 +1000286 b 1f
287
288kvm_unsplit_wakeup:
289 li r0, 0
290 stb r0, HSTATE_NAPPING(r13)
291
2921:
Paul Mackerras4619ac82013-04-17 20:31:41 +0000293
294 /*
295 * We weren't napping due to cede, so this must be a secondary
296 * thread being woken up to run a guest, or being woken up due
297 * to a stray IPI. (Or due to some machine check or hypervisor
298 * maintenance interrupt while the core is in KVM.)
299 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000300
301 /* Check the wake reason in SRR1 to see why we got here */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100302 bl kvmppc_check_wake_reason
Suresh Warrier37f55d32016-08-19 15:35:46 +1000303 /*
304 * kvmppc_check_wake_reason could invoke a C routine, but we
305 * have no volatile registers to restore when we return.
306 */
307
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100308 cmpdi r3, 0
309 bge kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000310
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000311 /* get vcore pointer, NULL if we have nothing to run */
312 ld r5,HSTATE_KVM_VCORE(r13)
313 cmpdi r5,0
314 /* if we have no vcore to run, go back to sleep */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000315 beq kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000316
Paul Mackerras56548fc2014-12-03 14:48:40 +1100317kvm_secondary_got_guest:
318
Michael Ellermancdeb5d72021-10-15 23:02:08 +1100319 // About to go to guest, clear saved SRR1
320 li r0, 0
321 std r0, 32(r1)
322
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100323 /* Set HSTATE_DSCR(r13) to something sensible */
Anshuman Khandual1db36522015-05-21 12:13:03 +0530324 ld r6, PACA_DSCR_DEFAULT(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100325 std r6, HSTATE_DSCR(r13)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000326
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000327 /* On thread 0 of a subcore, set HDEC to max */
328 lbz r4, HSTATE_PTID(r13)
329 cmpwi r4, 0
330 bne 63f
Nicholas Pigginfae5c9f2021-05-28 19:07:52 +1000331 lis r6,0x7fff /* MAX_INT@h */
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000332 mtspr SPRN_HDEC, r6
333 /* and set per-LPAR registers, if doing dynamic micro-threading */
334 ld r6, HSTATE_SPLIT_MODE(r13)
335 cmpdi r6, 0
336 beq 63f
337 ld r0, KVM_SPLIT_RPR(r6)
338 mtspr SPRN_RPR, r0
339 ld r0, KVM_SPLIT_PMMAR(r6)
340 mtspr SPRN_PMMAR, r0
341 ld r0, KVM_SPLIT_LDBAR(r6)
342 mtspr SPRN_LDBAR, r0
343 isync
34463:
345 /* Order load of vcpu after load of vcore */
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100346 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000347 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100348 bl kvmppc_hv_entry
Paul Mackerras218309b2013-09-06 13:23:44 +1000349
350 /* Back from the guest, go back to nap */
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000351 /* Clear our vcpu and vcore pointers so we don't come back in early */
Paul Mackerras218309b2013-09-06 13:23:44 +1000352 li r0, 0
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000353 std r0, HSTATE_KVM_VCPU(r13)
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100354 /*
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000355 * Once we clear HSTATE_KVM_VCORE(r13), the code in
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100356 * kvmppc_run_core() is going to assume that all our vcpu
357 * state is visible in memory. This lwsync makes sure
358 * that that is true.
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100359 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000360 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000361 std r0, HSTATE_KVM_VCORE(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000362
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530363 /*
364 * All secondaries exiting guest will fall through this path.
365 * Before proceeding, just check for HMI interrupt and
366 * invoke opal hmi handler. By now we are sure that the
367 * primary thread on this core/subcore has already made partition
368 * switch/TB resync and we are good to call opal hmi handler.
369 */
370 cmpwi r12, BOOK3S_INTERRUPT_HMI
371 bne kvm_no_guest
372
373 li r3,0 /* NULL argument */
374 bl hmi_exception_realmode
Paul Mackerras56548fc2014-12-03 14:48:40 +1100375/*
376 * At this point we have finished executing in the guest.
377 * We need to wait for hwthread_req to become zero, since
378 * we may not turn on the MMU while hwthread_req is non-zero.
379 * While waiting we also need to check if we get given a vcpu to run.
380 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000381kvm_no_guest:
Paul Mackerras56548fc2014-12-03 14:48:40 +1100382 lbz r3, HSTATE_HWTHREAD_REQ(r13)
383 cmpwi r3, 0
384 bne 53f
385 HMT_MEDIUM
386 li r0, KVM_HWTHREAD_IN_KERNEL
Paul Mackerras218309b2013-09-06 13:23:44 +1000387 stb r0, HSTATE_HWTHREAD_STATE(r13)
Paul Mackerras56548fc2014-12-03 14:48:40 +1100388 /* need to recheck hwthread_req after a barrier, to avoid race */
389 sync
390 lbz r3, HSTATE_HWTHREAD_REQ(r13)
391 cmpwi r3, 0
392 bne 54f
Nicholas Piggin10d91612019-04-13 00:30:52 +1000393
394 /*
395 * Jump to idle_return_gpr_loss, which returns to the
396 * idle_kvm_start_guest caller.
397 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000398 li r3, LPCR_PECE0
399 mfspr r4, SPRN_LPCR
400 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
401 mtspr SPRN_LPCR, r4
Michael Ellermancdeb5d72021-10-15 23:02:08 +1100402 // Return SRR1 wakeup value, or 0 if we went into the guest
403 ld r3, 32(r1)
Nicholas Piggin10d91612019-04-13 00:30:52 +1000404 REST_NVGPRS(r1)
Michael Ellerman9b4416c2021-10-15 23:01:48 +1100405 ld r1, 0(r1) // Switch back to caller stack
406 ld r0, 16(r1) // Reload LR
407 ld r5, 8(r1) // Reload CR
Nicholas Piggin10d91612019-04-13 00:30:52 +1000408 mtlr r0
409 mtcr r5
410 blr
Paul Mackerras56548fc2014-12-03 14:48:40 +1100411
Nicholas Pigginb1b16972021-01-18 16:28:06 +100041253:
Nicholas Pigginb1b16972021-01-18 16:28:06 +1000413 HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000414 ld r5, HSTATE_KVM_VCORE(r13)
415 cmpdi r5, 0
416 bne 60f
417 ld r3, HSTATE_SPLIT_MODE(r13)
418 cmpdi r3, 0
419 beq kvm_no_guest
420 lbz r0, KVM_SPLIT_DO_NAP(r3)
421 cmpwi r0, 0
Paul Mackerras56548fc2014-12-03 14:48:40 +1100422 beq kvm_no_guest
423 HMT_MEDIUM
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000424 b kvm_unsplit_nap
42560: HMT_MEDIUM
Paul Mackerras56548fc2014-12-03 14:48:40 +1100426 b kvm_secondary_got_guest
427
42854: li r0, KVM_HWTHREAD_IN_KVM
429 stb r0, HSTATE_HWTHREAD_STATE(r13)
430 b kvm_no_guest
Paul Mackerras218309b2013-09-06 13:23:44 +1000431
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000432/*
433 * Here the primary thread is trying to return the core to
434 * whole-core mode, so we need to nap.
435 */
436kvm_unsplit_nap:
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530437 /*
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530438 * When secondaries are napping in kvm_unsplit_nap() with
439 * hwthread_req = 1, HMI goes ignored even though subcores are
440 * already exited the guest. Hence HMI keeps waking up secondaries
441 * from nap in a loop and secondaries always go back to nap since
442 * no vcore is assigned to them. This makes impossible for primary
443 * thread to get hold of secondary threads resulting into a soft
444 * lockup in KVM path.
445 *
446 * Let us check if HMI is pending and handle it before we go to nap.
447 */
448 cmpwi r12, BOOK3S_INTERRUPT_HMI
449 bne 55f
450 li r3, 0 /* NULL argument */
451 bl hmi_exception_realmode
45255:
453 /*
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530454 * Ensure that secondary doesn't nap when it has
455 * its vcore pointer set.
456 */
457 sync /* matches smp_mb() before setting split_info.do_nap */
458 ld r0, HSTATE_KVM_VCORE(r13)
459 cmpdi r0, 0
460 bne kvm_no_guest
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000461 /* clear any pending message */
462BEGIN_FTR_SECTION
463 lis r6, (PPC_DBELL_SERVER << (63-36))@h
464 PPC_MSGCLR(6)
465END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
466 /* Set kvm_split_mode.napped[tid] = 1 */
467 ld r3, HSTATE_SPLIT_MODE(r13)
468 li r0, 1
Nicholas Pigginb1b16972021-01-18 16:28:06 +1000469 lhz r4, PACAPACAINDEX(r13)
470 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000471 addi r4, r4, KVM_SPLIT_NAPPED
472 stbx r0, r3, r4
473 /* Check the do_nap flag again after setting napped[] */
474 sync
475 lbz r0, KVM_SPLIT_DO_NAP(r3)
476 cmpwi r0, 0
477 beq 57f
Nicholas Piggin10d91612019-04-13 00:30:52 +1000478 li r3, NAPPING_UNSPLIT
479 stb r3, HSTATE_NAPPING(r13)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000480 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
Paul Mackerrasbf53c882016-11-18 14:34:07 +1100481 mfspr r5, SPRN_LPCR
482 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
483 b kvm_nap_sequence
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000484
48557: li r0, 0
486 stbx r0, r3, r4
487 b kvm_no_guest
488
Paul Mackerras218309b2013-09-06 13:23:44 +1000489/******************************************************************************
490 * *
491 * Entry code *
492 * *
493 *****************************************************************************/
494
Paul Mackerrasde56a942011-06-29 00:21:34 +0000495.global kvmppc_hv_entry
496kvmppc_hv_entry:
497
498 /* Required state:
499 *
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100500 * R4 = vcpu pointer (or NULL)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000501 * MSR = ~IR|DR
502 * R13 = PACA
503 * R1 = host R1
Michael Neuling06a29e42014-08-19 14:59:30 +1000504 * R2 = TOC
Paul Mackerrasde56a942011-06-29 00:21:34 +0000505 * all other volatile GPRS = free
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100506 * Does not preserve non-volatile GPRs or CR fields
Paul Mackerrasde56a942011-06-29 00:21:34 +0000507 */
508 mflr r0
Paul Mackerras218309b2013-09-06 13:23:44 +1000509 std r0, PPC_LR_STKOFF(r1)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000510 stdu r1, -SFS(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000511
Paul Mackerrasde56a942011-06-29 00:21:34 +0000512 /* Save R1 in the PACA */
513 std r1, HSTATE_HOST_R1(r13)
514
Paul Mackerras44a3add2013-10-04 21:45:04 +1000515 li r6, KVM_GUEST_MODE_HOST_HV
516 stb r6, HSTATE_IN_GUEST(r13)
517
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100518#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
519 /* Store initial timestamp */
520 cmpdi r4, 0
521 beq 1f
522 addi r3, r4, VCPU_TB_RMENTRY
523 bl kvmhv_start_timing
5241:
525#endif
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100526
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100527 ld r5, HSTATE_KVM_VCORE(r13)
528 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100529
Paul Mackerras9e368f22011-06-29 00:40:08 +0000530 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100531 * POWER7/POWER8 host -> guest partition switch code.
Paul Mackerras9e368f22011-06-29 00:40:08 +0000532 * We don't have to lock against concurrent tlbies,
533 * but we do have to coordinate across hardware threads.
534 */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100535 /* Set bit in entry map iff exit map is zero. */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100536 li r7, 1
537 lbz r6, HSTATE_PTID(r13)
538 sld r7, r7, r6
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100539 addi r8, r5, VCORE_ENTRY_EXIT
54021: lwarx r3, 0, r8
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100541 cmpwi r3, 0x100 /* any threads starting to exit? */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000542 bge secondary_too_late /* if so we're too late to the party */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100543 or r3, r3, r7
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100544 stwcx. r3, 0, r8
Paul Mackerras371fefd2011-06-29 00:23:08 +0000545 bne 21b
546
547 /* Primary thread switches to guest partition. */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000548 cmpwi r6,0
Paul Mackerras6af27c82015-03-28 14:21:10 +1100549 bne 10f
Nicholas Piggin9a4506e2018-05-17 17:06:29 +1000550
Paul Mackerrasde56a942011-06-29 00:21:34 +0000551 lwz r7,KVM_LPID(r9)
Paul Mackerras7a840842016-11-16 22:25:20 +1100552 ld r6,KVM_SDR1(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000553 li r0,LPID_RSVD /* switch to reserved LPID */
554 mtspr SPRN_LPID,r0
555 ptesync
556 mtspr SPRN_SDR1,r6 /* switch to partition page table */
557 mtspr SPRN_LPID,r7
558 isync
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000559
Paul Mackerras70ea13f2019-04-29 19:02:58 +1000560 /* See if we need to flush the TLB. */
Paul Mackerras2940ba02019-04-29 19:00:40 +1000561 mr r3, r9 /* kvm pointer */
Paul Mackerras70ea13f2019-04-29 19:02:58 +1000562 lhz r4, PACAPACAINDEX(r13) /* physical cpu number */
563 li r5, 0 /* nested vcpu pointer */
564 bl kvmppc_check_need_tlb_flush
Paul Mackerras2940ba02019-04-29 19:00:40 +1000565 nop
566 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000567
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000568 /* Add timebase offset onto timebase */
56922: ld r8,VCORE_TB_OFFSET(r5)
570 cmpdi r8,0
571 beq 37f
Paul Mackerras57b8daa2018-04-20 22:51:11 +1000572 std r8, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000573 mftb r6 /* current host timebase */
574 add r8,r8,r6
575 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
576 mftb r7 /* check if lower 24 bits overflowed */
577 clrldi r6,r6,40
578 clrldi r7,r7,40
579 cmpld r7,r6
580 bge 37f
581 addis r8,r8,0x100 /* if so, increment upper 40 bits */
582 mtspr SPRN_TBU40,r8
583
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000584 /* Load guest PCR value to select appropriate compat mode */
58537: ld r7, VCORE_PCR(r5)
Jordan Niethe13c7bb32019-09-17 10:46:05 +1000586 LOAD_REG_IMMEDIATE(r6, PCR_MASK)
587 cmpld r7, r6
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000588 beq 38f
Jordan Niethe13c7bb32019-09-17 10:46:05 +1000589 or r7, r7, r6
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000590 mtspr SPRN_PCR, r7
59138:
Michael Neulingb005255e2014-01-08 21:25:21 +1100592
593BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000594 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +1100595 ld r8, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000596 ld r7, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +1100597 mtspr SPRN_DPDES, r8
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000598 mtspr SPRN_VTB, r7
Michael Neulingb005255e2014-01-08 21:25:21 +1100599END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
600
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530601 /* Mark the subcore state as inside guest */
602 bl kvmppc_subcore_enter_guest
603 nop
604 ld r5, HSTATE_KVM_VCORE(r13)
605 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000606 li r0,1
Paul Mackerras371fefd2011-06-29 00:23:08 +0000607 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000608
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100609 /* Do we have a guest vcpu to run? */
Paul Mackerras6af27c82015-03-28 14:21:10 +110061010: cmpdi r4, 0
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100611 beq kvmppc_primary_no_guest
612kvmppc_got_guest:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100613 /* Increment yield count if they have a VPA */
614 ld r3, VCPU_VPA(r4)
615 cmpdi r3, 0
616 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +0200617 li r6, LPPACA_YIELDCOUNT
618 LWZX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100619 addi r5, r5, 1
Alexander Graf0865a582014-06-11 10:36:17 +0200620 STWX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100621 li r6, 1
622 stb r6, VCPU_VPA_DIRTY(r4)
62325:
624
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100625 /* Save purr/spurr */
626 mfspr r5,SPRN_PURR
627 mfspr r6,SPRN_SPURR
628 std r5,HSTATE_PURR(r13)
629 std r6,HSTATE_SPURR(r13)
630 ld r7,VCPU_PURR(r4)
631 ld r8,VCPU_SPURR(r4)
632 mtspr SPRN_PURR,r7
633 mtspr SPRN_SPURR,r8
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100634
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100635 /* Save host values of some registers */
636BEGIN_FTR_SECTION
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000637 mfspr r5, SPRN_CIABR
Ravi Bangoria09f82b02020-05-14 16:47:26 +0530638 mfspr r6, SPRN_DAWR0
639 mfspr r7, SPRN_DAWRX0
Michael Ellermanc3c7470c2019-02-22 13:22:08 +1100640 mfspr r8, SPRN_IAMR
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000641 std r5, STACK_SLOT_CIABR(r1)
Ravi Bangoria122954ed72020-12-16 16:12:17 +0530642 std r6, STACK_SLOT_DAWR0(r1)
643 std r7, STACK_SLOT_DAWRX0(r1)
Michael Ellermanc3c7470c2019-02-22 13:22:08 +1100644 std r8, STACK_SLOT_IAMR(r1)
Nicholas Piggin6ba53312021-05-26 22:58:51 +1000645 mfspr r5, SPRN_FSCR
646 std r5, STACK_SLOT_FSCR(r1)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000647END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100648
Michael Ellermanc3c7470c2019-02-22 13:22:08 +1100649 mfspr r5, SPRN_AMR
650 std r5, STACK_SLOT_AMR(r1)
651 mfspr r6, SPRN_UAMOR
652 std r6, STACK_SLOT_UAMOR(r1)
653
Michael Neulingeee7ff92014-01-08 21:25:19 +1100654BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000655 /* Set partition DABR */
656 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
Paul Mackerras8563bf52014-01-08 21:25:29 +1100657 lwz r5,VCPU_DABRX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000658 ld r6,VCPU_DABR(r4)
659 mtspr SPRN_DABRX,r5
660 mtspr SPRN_DABR,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000661 isync
Michael Neulingeee7ff92014-01-08 21:25:19 +1100662END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000663
Michael Neulinge4e38122014-03-25 10:47:02 +1100664#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
665BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100666 b 91f
Nicholas Pigginfae5c9f2021-05-28 19:07:52 +1000667END_FTR_SECTION_IFCLR(CPU_FTR_TM)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +1000668 /*
Paul Mackerras7854f752018-10-08 16:30:53 +1100669 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +1000670 */
Simon Guo6f597c62018-05-23 15:01:48 +0800671 mr r3, r4
672 ld r4, VCPU_MSR(r3)
Paul Mackerras7854f752018-10-08 16:30:53 +1100673 li r5, 0 /* don't preserve non-vol regs */
Paul Mackerras7b0e8272018-05-30 20:07:52 +1000674 bl kvmppc_restore_tm_hv
Paul Mackerras7854f752018-10-08 16:30:53 +1100675 nop
Simon Guo6f597c62018-05-23 15:01:48 +0800676 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +110067791:
Michael Neulinge4e38122014-03-25 10:47:02 +1100678#endif
679
Paul Mackerras41f4e632018-10-08 16:30:51 +1100680 /* Load guest PMU registers; r4 = vcpu pointer here */
681 mr r3, r4
682 bl kvmhv_load_guest_pmu
Paul Mackerrasde56a942011-06-29 00:21:34 +0000683
684 /* Load up FP, VMX and VSX registers */
Paul Mackerras41f4e632018-10-08 16:30:51 +1100685 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000686 bl kvmppc_load_fp
687
688 ld r14, VCPU_GPR(R14)(r4)
689 ld r15, VCPU_GPR(R15)(r4)
690 ld r16, VCPU_GPR(R16)(r4)
691 ld r17, VCPU_GPR(R17)(r4)
692 ld r18, VCPU_GPR(R18)(r4)
693 ld r19, VCPU_GPR(R19)(r4)
694 ld r20, VCPU_GPR(R20)(r4)
695 ld r21, VCPU_GPR(R21)(r4)
696 ld r22, VCPU_GPR(R22)(r4)
697 ld r23, VCPU_GPR(R23)(r4)
698 ld r24, VCPU_GPR(R24)(r4)
699 ld r25, VCPU_GPR(R25)(r4)
700 ld r26, VCPU_GPR(R26)(r4)
701 ld r27, VCPU_GPR(R27)(r4)
702 ld r28, VCPU_GPR(R28)(r4)
703 ld r29, VCPU_GPR(R29)(r4)
704 ld r30, VCPU_GPR(R30)(r4)
705 ld r31, VCPU_GPR(R31)(r4)
706
Paul Mackerrasde56a942011-06-29 00:21:34 +0000707 /* Switch DSCR to guest value */
708 ld r5, VCPU_DSCR(r4)
709 mtspr SPRN_DSCR, r5
Paul Mackerrasde56a942011-06-29 00:21:34 +0000710
Michael Neulingb005255e2014-01-08 21:25:21 +1100711BEGIN_FTR_SECTION
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100712 /* Skip next section on POWER7 */
Michael Neulingb005255e2014-01-08 21:25:21 +1100713 b 8f
714END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +1100715 /* Load up POWER8-specific registers */
716 ld r5, VCPU_IAMR(r4)
717 lwz r6, VCPU_PSPB(r4)
718 ld r7, VCPU_FSCR(r4)
719 mtspr SPRN_IAMR, r5
720 mtspr SPRN_PSPB, r6
721 mtspr SPRN_FSCR, r7
Michael Neulingb53221e2018-03-27 15:37:22 +1100722 /*
723 * Handle broken DAWR case by not writing it. This means we
724 * can still store the DAWR register for migration.
725 */
Michael Neulingc1fe1902019-04-01 17:03:12 +1100726 LOAD_REG_ADDR(r5, dawr_force_enable)
727 lbz r5, 0(r5)
728 cmpdi r5, 0
729 beq 1f
Ravi Bangoria122954ed72020-12-16 16:12:17 +0530730 ld r5, VCPU_DAWR0(r4)
731 ld r6, VCPU_DAWRX0(r4)
Ravi Bangoria09f82b02020-05-14 16:47:26 +0530732 mtspr SPRN_DAWR0, r5
733 mtspr SPRN_DAWRX0, r6
Michael Neulingc1fe1902019-04-01 17:03:12 +11007341:
735 ld r7, VCPU_CIABR(r4)
736 ld r8, VCPU_TAR(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100737 mtspr SPRN_CIABR, r7
738 mtspr SPRN_TAR, r8
739 ld r5, VCPU_IC(r4)
Michael Neuling7b490412014-01-08 21:25:32 +1100740 ld r8, VCPU_EBBHR(r4)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000741 mtspr SPRN_IC, r5
Michael Neulingb005255e2014-01-08 21:25:21 +1100742 mtspr SPRN_EBBHR, r8
743 ld r5, VCPU_EBBRR(r4)
744 ld r6, VCPU_BESCR(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100745 lwz r7, VCPU_GUEST_PID(r4)
746 ld r8, VCPU_WORT(r4)
Paul Mackerras83677f52016-11-16 22:33:27 +1100747 mtspr SPRN_EBBRR, r5
748 mtspr SPRN_BESCR, r6
Michael Neulingb005255e2014-01-08 21:25:21 +1100749 mtspr SPRN_PID, r7
750 mtspr SPRN_WORT, r8
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100751 /* POWER8-only registers */
Paul Mackerras83677f52016-11-16 22:33:27 +1100752 ld r5, VCPU_TCSCR(r4)
753 ld r6, VCPU_ACOP(r4)
754 ld r7, VCPU_CSIGR(r4)
755 ld r8, VCPU_TACR(r4)
756 mtspr SPRN_TCSCR, r5
757 mtspr SPRN_ACOP, r6
758 mtspr SPRN_CSIGR, r7
759 mtspr SPRN_TACR, r8
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100760 nop
Michael Neulingb005255e2014-01-08 21:25:21 +11007618:
762
Paul Mackerrasde56a942011-06-29 00:21:34 +0000763 ld r5, VCPU_SPRG0(r4)
764 ld r6, VCPU_SPRG1(r4)
765 ld r7, VCPU_SPRG2(r4)
766 ld r8, VCPU_SPRG3(r4)
767 mtspr SPRN_SPRG0, r5
768 mtspr SPRN_SPRG1, r6
769 mtspr SPRN_SPRG2, r7
770 mtspr SPRN_SPRG3, r8
771
Paul Mackerrasde56a942011-06-29 00:21:34 +0000772 /* Load up DAR and DSISR */
773 ld r5, VCPU_DAR(r4)
774 lwz r6, VCPU_DSISR(r4)
775 mtspr SPRN_DAR, r5
776 mtspr SPRN_DSISR, r6
777
Paul Mackerrasde56a942011-06-29 00:21:34 +0000778 /* Restore AMR and UAMOR, set AMOR to all 1s */
779 ld r5,VCPU_AMR(r4)
780 ld r6,VCPU_UAMOR(r4)
781 li r7,-1
782 mtspr SPRN_AMR,r5
783 mtspr SPRN_UAMOR,r6
784 mtspr SPRN_AMOR,r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000785
786 /* Restore state of CTRL run bit; assume 1 on entry */
787 lwz r5,VCPU_CTRL(r4)
788 andi. r5,r5,1
789 bne 4f
790 mfspr r6,SPRN_CTRLF
791 clrrdi r6,r6,1
792 mtspr SPRN_CTRLT,r6
7934:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100794 /* Secondary threads wait for primary to have done partition switch */
795 ld r5, HSTATE_KVM_VCORE(r13)
796 lbz r6, HSTATE_PTID(r13)
797 cmpwi r6, 0
798 beq 21f
799 lbz r0, VCORE_IN_GUEST(r5)
800 cmpwi r0, 0
801 bne 21f
802 HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +100080320: lwz r3, VCORE_ENTRY_EXIT(r5)
804 cmpwi r3, 0x100
805 bge no_switch_exit
806 lbz r0, VCORE_IN_GUEST(r5)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100807 cmpwi r0, 0
808 beq 20b
809 HMT_MEDIUM
81021:
811 /* Set LPCR. */
812 ld r8,VCORE_LPCR(r5)
813 mtspr SPRN_LPCR,r8
814 isync
815
Paul Mackerras57b8daa2018-04-20 22:51:11 +1000816 /*
817 * Set the decrementer to the guest decrementer.
818 */
819 ld r8,VCPU_DEC_EXPIRES(r4)
820 /* r8 is a host timebase value here, convert to guest TB */
821 ld r5,HSTATE_KVM_VCORE(r13)
822 ld r6,VCORE_TB_OFFSET_APPL(r5)
823 add r8,r8,r6
824 mftb r7
825 subf r3,r7,r8
826 mtspr SPRN_DEC,r3
827
Paul Mackerras6af27c82015-03-28 14:21:10 +1100828 /* Check if HDEC expires soon */
829 mfspr r3, SPRN_HDEC
Nicholas Pigginfae5c9f2021-05-28 19:07:52 +1000830 extsw r3, r3
Paul Mackerras2f272462017-05-22 16:25:14 +1000831 cmpdi r3, 512 /* 1 microsecond */
Paul Mackerras6af27c82015-03-28 14:21:10 +1100832 blt hdec_soon
833
Nicholas Piggin079a09a2021-05-28 19:07:50 +1000834 /* Clear out and reload the SLB */
Paul Mackerras6964e6a2018-01-11 14:51:02 +1100835 li r6, 0
836 slbmte r6, r6
Nicholas Piggin7a7f94a2021-01-18 16:28:09 +1000837 PPC_SLBIA(6)
Paul Mackerras6964e6a2018-01-11 14:51:02 +1100838 ptesync
839
840 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
841 lwz r5,VCPU_SLB_MAX(r4)
842 cmpwi r5,0
843 beq 9f
844 mtctr r5
845 addi r6,r4,VCPU_SLB
8461: ld r8,VCPU_SLB_E(r6)
847 ld r9,VCPU_SLB_V(r6)
848 slbmte r9,r8
849 addi r6,r6,VCPU_SLB_SIZE
850 bdnz 1b
8519:
852
Paul Mackerrasdf709a22018-10-08 16:30:52 +1100853deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */
Paul Mackerrasf7035ce2018-10-08 16:30:50 +1100854 /* Check if we can deliver an external or decrementer interrupt now */
855 ld r0, VCPU_PENDING_EXC(r4)
Paul Mackerrasf7035ce2018-10-08 16:30:50 +1100856 cmpdi r0, 0
857 beq 71f
858 mr r3, r4
859 bl kvmppc_guest_entry_inject_int
860 ld r4, HSTATE_KVM_VCPU(r13)
86171:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000862 ld r6, VCPU_SRR0(r4)
863 ld r7, VCPU_SRR1(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100864 mtspr SPRN_SRR0, r6
865 mtspr SPRN_SRR1, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000866
Paul Mackerras95a64322018-10-08 16:30:55 +1100867 ld r10, VCPU_PC(r4)
868 ld r11, VCPU_MSR(r4)
Paul Mackerras4619ac82013-04-17 20:31:41 +0000869 /* r11 = vcpu->arch.msr & ~MSR_HV */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000870 rldicl r11, r11, 63 - MSR_HV_LG, 1
871 rotldi r11, r11, 1 + MSR_HV_LG
872 ori r11, r11, MSR_ME
873
Paul Mackerrasf7035ce2018-10-08 16:30:50 +1100874 ld r6, VCPU_CTR(r4)
875 ld r7, VCPU_XER(r4)
876 mtctr r6
877 mtxer r7
Paul Mackerras19ccb762011-07-23 17:42:46 +1000878
Liu Ping Fan27025a62013-11-19 14:12:48 +0800879/*
880 * Required state:
881 * R4 = vcpu
882 * R10: value for HSRR0
883 * R11: value for HSRR1
884 * R13 = PACA
885 */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000886fast_guest_return:
Paul Mackerras4619ac82013-04-17 20:31:41 +0000887 li r0,0
888 stb r0,VCPU_CEDED(r4) /* cancel cede */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000889 mtspr SPRN_HSRR0,r10
890 mtspr SPRN_HSRR1,r11
891
892 /* Activate guest mode, so faults get handled by KVM */
Paul Mackerras44a3add2013-10-04 21:45:04 +1000893 li r9, KVM_GUEST_MODE_GUEST_HV
Paul Mackerrasde56a942011-06-29 00:21:34 +0000894 stb r9, HSTATE_IN_GUEST(r13)
895
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100896#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
897 /* Accumulate timing */
898 addi r3, r4, VCPU_TB_GUEST
899 bl kvmhv_accumulate_time
900#endif
901
Paul Mackerrasde56a942011-06-29 00:21:34 +0000902 /* Enter guest */
903
Paul Mackerras0acb9112013-02-04 18:10:51 +0000904BEGIN_FTR_SECTION
905 ld r5, VCPU_CFAR(r4)
906 mtspr SPRN_CFAR, r5
907END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +1000908BEGIN_FTR_SECTION
909 ld r0, VCPU_PPR(r4)
910END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerras0acb9112013-02-04 18:10:51 +0000911
Paul Mackerrasde56a942011-06-29 00:21:34 +0000912 ld r5, VCPU_LR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000913 mtlr r5
Paul Mackerrasde56a942011-06-29 00:21:34 +0000914
Michael Neulingc75df6f2012-06-25 13:33:10 +0000915 ld r1, VCPU_GPR(R1)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000916 ld r5, VCPU_GPR(R5)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000917 ld r8, VCPU_GPR(R8)(r4)
918 ld r9, VCPU_GPR(R9)(r4)
919 ld r10, VCPU_GPR(R10)(r4)
920 ld r11, VCPU_GPR(R11)(r4)
921 ld r12, VCPU_GPR(R12)(r4)
922 ld r13, VCPU_GPR(R13)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000923
Paul Mackerras4b8473c2013-09-20 14:52:39 +1000924BEGIN_FTR_SECTION
925 mtspr SPRN_PPR, r0
926END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Michael Neulinge001fa72017-09-15 15:26:14 +1000927
Sukadev Bhattiprolu6c85b7bc2019-08-22 00:48:38 -0300928 ld r6, VCPU_GPR(R6)(r4)
929 ld r7, VCPU_GPR(R7)(r4)
Sukadev Bhattiprolu6c85b7bc2019-08-22 00:48:38 -0300930
Marcus Comstedt228b6072019-12-15 10:49:00 +0100931 ld r0, VCPU_CR(r4)
Sukadev Bhattiprolu6c85b7bc2019-08-22 00:48:38 -0300932 mtcr r0
933
Paul Mackerras4b8473c2013-09-20 14:52:39 +1000934 ld r0, VCPU_GPR(R0)(r4)
Sukadev Bhattiprolu6c85b7bc2019-08-22 00:48:38 -0300935 ld r2, VCPU_GPR(R2)(r4)
936 ld r3, VCPU_GPR(R3)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000937 ld r4, VCPU_GPR(R4)(r4)
Nicholas Piggin222f20f2018-01-10 03:07:15 +1100938 HRFI_TO_GUEST
Paul Mackerrasde56a942011-06-29 00:21:34 +0000939 b .
940
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100941secondary_too_late:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100942 li r12, 0
Paul Mackerrasa8b48a42018-03-07 22:17:20 +1100943 stw r12, STACK_SLOT_TRAP(r1)
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100944 cmpdi r4, 0
945 beq 11f
Paul Mackerras6af27c82015-03-28 14:21:10 +1100946 stw r12, VCPU_TRAP(r4)
947#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100948 addi r3, r4, VCPU_TB_RMEXIT
949 bl kvmhv_accumulate_time
Paul Mackerras6af27c82015-03-28 14:21:10 +1100950#endif
Paul Mackerrasb6c295d2015-03-28 14:21:02 +110095111: b kvmhv_switch_to_host
952
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000953no_switch_exit:
954 HMT_MEDIUM
955 li r12, 0
956 b 12f
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100957hdec_soon:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100958 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerrasb4deba52015-07-02 20:38:16 +100095912: stw r12, VCPU_TRAP(r4)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100960 mr r9, r4
961#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100962 addi r3, r4, VCPU_TB_RMEXIT
963 bl kvmhv_accumulate_time
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100964#endif
Paul Mackerras6964e6a2018-01-11 14:51:02 +1100965 b guest_bypass
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100966
Paul Mackerrasde56a942011-06-29 00:21:34 +0000967/******************************************************************************
968 * *
969 * Exit code *
970 * *
971 *****************************************************************************/
972
973/*
974 * We come here from the first-level interrupt handlers.
975 */
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +0530976 .globl kvmppc_interrupt_hv
977kvmppc_interrupt_hv:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000978 /*
979 * Register contents:
Nicholas Piggin1b5821c2021-05-28 19:07:26 +1000980 * R9 = HSTATE_IN_GUEST
Nicholas Piggind3918e72016-12-22 04:29:25 +1000981 * R12 = (guest CR << 32) | interrupt vector
Paul Mackerrasde56a942011-06-29 00:21:34 +0000982 * R13 = PACA
Nicholas Piggind3918e72016-12-22 04:29:25 +1000983 * guest R12 saved in shadow VCPU SCRATCH0
Paul Mackerrasde56a942011-06-29 00:21:34 +0000984 * guest R13 saved in SPRN_SCRATCH0
Nicholas Pigginf3601152021-05-28 19:07:21 +1000985 * guest R9 saved in HSTATE_SCRATCH2
Paul Mackerrasde56a942011-06-29 00:21:34 +0000986 */
Paul Mackerras44a3add2013-10-04 21:45:04 +1000987 /* We're now back in the host but in guest MMU context */
Nicholas Piggin1b5821c2021-05-28 19:07:26 +1000988 cmpwi r9,KVM_GUEST_MODE_HOST_HV
989 beq kvmppc_bad_host_intr
Paul Mackerras44a3add2013-10-04 21:45:04 +1000990 li r9, KVM_GUEST_MODE_HOST_HV
991 stb r9, HSTATE_IN_GUEST(r13)
992
Paul Mackerrasde56a942011-06-29 00:21:34 +0000993 ld r9, HSTATE_KVM_VCPU(r13)
994
995 /* Save registers */
996
Michael Neulingc75df6f2012-06-25 13:33:10 +0000997 std r0, VCPU_GPR(R0)(r9)
998 std r1, VCPU_GPR(R1)(r9)
999 std r2, VCPU_GPR(R2)(r9)
1000 std r3, VCPU_GPR(R3)(r9)
1001 std r4, VCPU_GPR(R4)(r9)
1002 std r5, VCPU_GPR(R5)(r9)
1003 std r6, VCPU_GPR(R6)(r9)
1004 std r7, VCPU_GPR(R7)(r9)
1005 std r8, VCPU_GPR(R8)(r9)
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001006 ld r0, HSTATE_SCRATCH2(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001007 std r0, VCPU_GPR(R9)(r9)
1008 std r10, VCPU_GPR(R10)(r9)
1009 std r11, VCPU_GPR(R11)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001010 ld r3, HSTATE_SCRATCH0(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001011 std r3, VCPU_GPR(R12)(r9)
Nicholas Piggind3918e72016-12-22 04:29:25 +10001012 /* CR is in the high half of r12 */
1013 srdi r4, r12, 32
Paul Mackerrasfd0944b2018-10-08 16:30:58 +11001014 std r4, VCPU_CR(r9)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001015BEGIN_FTR_SECTION
1016 ld r3, HSTATE_CFAR(r13)
1017 std r3, VCPU_CFAR(r9)
1018END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001019BEGIN_FTR_SECTION
1020 ld r4, HSTATE_PPR(r13)
1021 std r4, VCPU_PPR(r9)
1022END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001023
1024 /* Restore R1/R2 so we can handle faults */
1025 ld r1, HSTATE_HOST_R1(r13)
1026 ld r2, PACATOC(r13)
1027
1028 mfspr r10, SPRN_SRR0
1029 mfspr r11, SPRN_SRR1
1030 std r10, VCPU_SRR0(r9)
1031 std r11, VCPU_SRR1(r9)
Nicholas Piggind3918e72016-12-22 04:29:25 +10001032 /* trap is in the low half of r12, clear CR from the high half */
1033 clrldi r12, r12, 32
Paul Mackerrasde56a942011-06-29 00:21:34 +00001034 andi. r0, r12, 2 /* need to read HSRR0/1? */
1035 beq 1f
1036 mfspr r10, SPRN_HSRR0
1037 mfspr r11, SPRN_HSRR1
1038 clrrdi r12, r12, 2
10391: std r10, VCPU_PC(r9)
1040 std r11, VCPU_MSR(r9)
1041
1042 GET_SCRATCH0(r3)
1043 mflr r4
Michael Neulingc75df6f2012-06-25 13:33:10 +00001044 std r3, VCPU_GPR(R13)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001045 std r4, VCPU_LR(r9)
1046
Paul Mackerrasde56a942011-06-29 00:21:34 +00001047 stw r12,VCPU_TRAP(r9)
1048
Paul Mackerras8b24e692017-06-26 15:45:51 +10001049 /*
1050 * Now that we have saved away SRR0/1 and HSRR0/1,
1051 * interrupts are recoverable in principle, so set MSR_RI.
1052 * This becomes important for relocation-on interrupts from
1053 * the guest, which we can get in radix mode on POWER9.
1054 */
1055 li r0, MSR_RI
1056 mtmsrd r0, 1
1057
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001058#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1059 addi r3, r9, VCPU_TB_RMINTR
1060 mr r4, r9
1061 bl kvmhv_accumulate_time
1062 ld r5, VCPU_GPR(R5)(r9)
1063 ld r6, VCPU_GPR(R6)(r9)
1064 ld r7, VCPU_GPR(R7)(r9)
1065 ld r8, VCPU_GPR(R8)(r9)
1066#endif
1067
Paul Mackerras4a157d62014-12-03 13:30:39 +11001068 /* Save HEIR (HV emulation assist reg) in emul_inst
Paul Mackerras697d3892011-12-12 12:36:37 +00001069 if this is an HEI (HV emulation interrupt, e40) */
1070 li r3,KVM_INST_FETCH_FAILED
Paul Mackerras2bf27602015-03-20 20:39:40 +11001071 stw r3,VCPU_LAST_INST(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001072 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1073 bne 11f
1074 mfspr r3,SPRN_HEIR
Paul Mackerras4a157d62014-12-03 13:30:39 +1100107511: stw r3,VCPU_HEIR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001076
1077 /* these are volatile across C function calls */
1078 mfctr r3
1079 mfxer r4
1080 std r3, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001081 std r4, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001082
Paul Mackerrasdf709a22018-10-08 16:30:52 +11001083 /* Save more register state */
1084 mfdar r3
1085 mfdsisr r4
1086 std r3, VCPU_DAR(r9)
1087 stw r4, VCPU_DSISR(r9)
1088
1089 /* If this is a page table miss then see if it's theirs or ours */
1090 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1091 beq kvmppc_hdsi
1092 std r3, VCPU_FAULT_DAR(r9)
1093 stw r4, VCPU_FAULT_DSISR(r9)
1094 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1095 beq kvmppc_hisi
1096
Paul Mackerrasde56a942011-06-29 00:21:34 +00001097 /* See if this is a leftover HDEC interrupt */
1098 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1099 bne 2f
1100 mfspr r3,SPRN_HDEC
Nicholas Pigginfae5c9f2021-05-28 19:07:52 +10001101 extsw r3, r3
Paul Mackerrasa4faf2e2017-08-25 19:52:12 +10001102 cmpdi r3,0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001103 mr r4,r9
1104 bge fast_guest_return
Paul Mackerrasde56a942011-06-29 00:21:34 +000011052:
Paul Mackerras697d3892011-12-12 12:36:37 +00001106 /* See if this is an hcall we can handle in real mode */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001107 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1108 beq hcall_try_real_mode
Paul Mackerrasde56a942011-06-29 00:21:34 +00001109
Paul Mackerras66feed62015-03-28 14:21:12 +11001110 /* Hypervisor doorbell - exit only if host IPI flag set */
1111 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1112 bne 3f
1113 lbz r0, HSTATE_HOST_IPI(r13)
Gautham R. Shenoy06554d92015-08-07 17:41:20 +05301114 cmpwi r0, 0
Paul Mackerrasdf709a22018-10-08 16:30:52 +11001115 beq maybe_reenter_guest
Paul Mackerras66feed62015-03-28 14:21:12 +11001116 b guest_exit_cont
11173:
Paul Mackerras769377f2017-02-15 14:30:17 +11001118 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1119 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1120 bne 14f
1121 mfspr r3, SPRN_HFSCR
1122 std r3, VCPU_HFSCR(r9)
1123 b guest_exit_cont
112414:
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001125 /* External interrupt ? */
1126 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerrasdf709a22018-10-08 16:30:52 +11001127 beq kvmppc_guest_external
Paul Mackerras43ff3f62018-01-11 14:31:43 +11001128 /* See if it is a machine check */
1129 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1130 beq machine_check_realmode
Paul Mackerrasdf709a22018-10-08 16:30:52 +11001131 /* Or a hypervisor maintenance interrupt */
1132 cmpwi r12, BOOK3S_INTERRUPT_HMI
1133 beq hmi_realmode
1134
1135guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1136
Paul Mackerras43ff3f62018-01-11 14:31:43 +11001137#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1138 addi r3, r9, VCPU_TB_RMEXIT
1139 mr r4, r9
1140 bl kvmhv_accumulate_time
1141#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +00001142
Michael Ellermanaf2e8c62019-11-13 21:05:44 +11001143 /*
1144 * Possibly flush the link stack here, before we do a blr in
Nicholas Piggin89d35b22021-05-28 19:07:34 +10001145 * kvmhv_switch_to_host.
Michael Ellermanaf2e8c62019-11-13 21:05:44 +11001146 */
11471: nop
1148 patch_site 1b patch__call_kvm_flush_link_stack
1149
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001150 /* For hash guest, read the guest SLB and save it away */
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001151 li r5, 0
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001152 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1153 mtctr r0
1154 li r6,0
1155 addi r7,r9,VCPU_SLB
11561: slbmfee r8,r6
1157 andis. r0,r8,SLB_ESID_V@h
1158 beq 2f
1159 add r8,r8,r6 /* put index in */
1160 slbmfev r3,r6
1161 std r8,VCPU_SLB_E(r7)
1162 std r3,VCPU_SLB_V(r7)
1163 addi r7,r7,VCPU_SLB_SIZE
1164 addi r5,r5,1
11652: addi r6,r6,1
1166 bdnz 1b
1167 /* Finally clear out the SLB */
1168 li r0,0
1169 slbmte r0,r0
Nicholas Piggin7a7f94a2021-01-18 16:28:09 +10001170 PPC_SLBIA(6)
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001171 ptesync
Nicholas Piggin68ad28a2021-01-18 16:28:07 +10001172 stw r5,VCPU_SLB_MAX(r9)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001173
Paul Mackerrascda4a142018-03-22 09:48:54 +11001174 /* load host SLB entries */
Paul Mackerrascda4a142018-03-22 09:48:54 +11001175 ld r8,PACA_SLBSHADOWPTR(r13)
1176
1177 .rept SLB_NUM_BOLTED
1178 li r3, SLBSHADOW_SAVEAREA
1179 LDX_BE r5, r8, r3
1180 addi r3, r3, 8
1181 LDX_BE r6, r8, r3
1182 andis. r7,r5,SLB_ESID_V@h
1183 beq 1f
1184 slbmte r6,r5
11851: addi r8,r8,16
1186 .endr
Paul Mackerrascda4a142018-03-22 09:48:54 +11001187
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001188guest_bypass:
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11001189 stw r12, STACK_SLOT_TRAP(r1)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10001190
1191 /* Save DEC */
1192 /* Do this before kvmhv_commence_exit so we know TB is guest TB */
1193 ld r3, HSTATE_KVM_VCORE(r13)
1194 mfspr r5,SPRN_DEC
1195 mftb r6
Paul Mackerras57b8daa2018-04-20 22:51:11 +10001196 extsw r5,r5
119716: add r5,r5,r6
1198 /* r5 is a guest timebase value here, convert to host TB */
1199 ld r4,VCORE_TB_OFFSET_APPL(r3)
1200 subf r5,r4,r5
1201 std r5,VCPU_DEC_EXPIRES(r9)
1202
Paul Mackerras6af27c82015-03-28 14:21:10 +11001203 /* Increment exit count, poke other threads to exit */
Paul Mackerras57b8daa2018-04-20 22:51:11 +10001204 mr r3, r12
Paul Mackerras6af27c82015-03-28 14:21:10 +11001205 bl kvmhv_commence_exit
Paul Mackerraseddb60f2015-03-28 14:21:11 +11001206 nop
1207 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001208
Paul Mackerrasec257162015-06-24 21:18:03 +10001209 /* Stop others sending VCPU interrupts to this physical CPU */
1210 li r0, -1
1211 stw r0, VCPU_CPU(r9)
1212 stw r0, VCPU_THREAD_CPU(r9)
1213
Paul Mackerrasde56a942011-06-29 00:21:34 +00001214 /* Save guest CTRL register, set runlatch to 1 */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001215 mfspr r6,SPRN_CTRLF
Paul Mackerrasde56a942011-06-29 00:21:34 +00001216 stw r6,VCPU_CTRL(r9)
1217 andi. r0,r6,1
1218 bne 4f
1219 ori r6,r6,1
1220 mtspr SPRN_CTRLT,r6
12214:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001222 /*
1223 * Save the guest PURR/SPURR
1224 */
1225 mfspr r5,SPRN_PURR
1226 mfspr r6,SPRN_SPURR
1227 ld r7,VCPU_PURR(r9)
1228 ld r8,VCPU_SPURR(r9)
1229 std r5,VCPU_PURR(r9)
1230 std r6,VCPU_SPURR(r9)
1231 subf r5,r7,r5
1232 subf r6,r8,r6
1233
1234 /*
1235 * Restore host PURR/SPURR and add guest times
1236 * so that the time in the guest gets accounted.
1237 */
1238 ld r3,HSTATE_PURR(r13)
1239 ld r4,HSTATE_SPURR(r13)
1240 add r3,r3,r5
1241 add r4,r4,r6
1242 mtspr SPRN_PURR,r3
1243 mtspr SPRN_SPURR,r4
1244
Michael Neulingb005255e2014-01-08 21:25:21 +11001245BEGIN_FTR_SECTION
1246 b 8f
1247END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +11001248 /* Save POWER8-specific registers */
1249 mfspr r5, SPRN_IAMR
1250 mfspr r6, SPRN_PSPB
1251 mfspr r7, SPRN_FSCR
1252 std r5, VCPU_IAMR(r9)
1253 stw r6, VCPU_PSPB(r9)
1254 std r7, VCPU_FSCR(r9)
1255 mfspr r5, SPRN_IC
Michael Neulingb005255e2014-01-08 21:25:21 +11001256 mfspr r7, SPRN_TAR
1257 std r5, VCPU_IC(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001258 std r7, VCPU_TAR(r9)
Michael Neuling7b490412014-01-08 21:25:32 +11001259 mfspr r8, SPRN_EBBHR
Michael Neulingb005255e2014-01-08 21:25:21 +11001260 std r8, VCPU_EBBHR(r9)
1261 mfspr r5, SPRN_EBBRR
1262 mfspr r6, SPRN_BESCR
Michael Neulingb005255e2014-01-08 21:25:21 +11001263 mfspr r7, SPRN_PID
1264 mfspr r8, SPRN_WORT
Paul Mackerras83677f52016-11-16 22:33:27 +11001265 std r5, VCPU_EBBRR(r9)
1266 std r6, VCPU_BESCR(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001267 stw r7, VCPU_GUEST_PID(r9)
1268 std r8, VCPU_WORT(r9)
Paul Mackerras83677f52016-11-16 22:33:27 +11001269 mfspr r5, SPRN_TCSCR
1270 mfspr r6, SPRN_ACOP
1271 mfspr r7, SPRN_CSIGR
1272 mfspr r8, SPRN_TACR
1273 std r5, VCPU_TCSCR(r9)
1274 std r6, VCPU_ACOP(r9)
1275 std r7, VCPU_CSIGR(r9)
1276 std r8, VCPU_TACR(r9)
Nicholas Piggin6ba53312021-05-26 22:58:51 +10001277BEGIN_FTR_SECTION
1278 ld r5, STACK_SLOT_FSCR(r1)
1279 mtspr SPRN_FSCR, r5
1280END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasccec4452016-03-05 19:34:39 +11001281 /*
1282 * Restore various registers to 0, where non-zero values
1283 * set by the guest could disrupt the host.
1284 */
1285 li r0, 0
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001286 mtspr SPRN_PSPB, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001287 mtspr SPRN_WORT, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001288 mtspr SPRN_TCSCR, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001289 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1290 li r0, 1
1291 sldi r0, r0, 31
1292 mtspr SPRN_MMCRS, r0
Michael Neulingb005255e2014-01-08 21:25:21 +11001293
Michael Ellermanc3c7470c2019-02-22 13:22:08 +11001294 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
1295 ld r8, STACK_SLOT_IAMR(r1)
1296 mtspr SPRN_IAMR, r8
1297
12988: /* Power7 jumps back in here */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001299 mfspr r5,SPRN_AMR
1300 mfspr r6,SPRN_UAMOR
1301 std r5,VCPU_AMR(r9)
1302 std r6,VCPU_UAMOR(r9)
Michael Ellermanc3c7470c2019-02-22 13:22:08 +11001303 ld r5,STACK_SLOT_AMR(r1)
1304 ld r6,STACK_SLOT_UAMOR(r1)
1305 mtspr SPRN_AMR, r5
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001306 mtspr SPRN_UAMOR, r6
Paul Mackerrasde56a942011-06-29 00:21:34 +00001307
Paul Mackerrasde56a942011-06-29 00:21:34 +00001308 /* Switch DSCR back to host value */
1309 mfspr r8, SPRN_DSCR
1310 ld r7, HSTATE_DSCR(r13)
Paul Mackerrascfc86022013-09-21 09:53:28 +10001311 std r8, VCPU_DSCR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001312 mtspr SPRN_DSCR, r7
1313
1314 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001315 std r14, VCPU_GPR(R14)(r9)
1316 std r15, VCPU_GPR(R15)(r9)
1317 std r16, VCPU_GPR(R16)(r9)
1318 std r17, VCPU_GPR(R17)(r9)
1319 std r18, VCPU_GPR(R18)(r9)
1320 std r19, VCPU_GPR(R19)(r9)
1321 std r20, VCPU_GPR(R20)(r9)
1322 std r21, VCPU_GPR(R21)(r9)
1323 std r22, VCPU_GPR(R22)(r9)
1324 std r23, VCPU_GPR(R23)(r9)
1325 std r24, VCPU_GPR(R24)(r9)
1326 std r25, VCPU_GPR(R25)(r9)
1327 std r26, VCPU_GPR(R26)(r9)
1328 std r27, VCPU_GPR(R27)(r9)
1329 std r28, VCPU_GPR(R28)(r9)
1330 std r29, VCPU_GPR(R29)(r9)
1331 std r30, VCPU_GPR(R30)(r9)
1332 std r31, VCPU_GPR(R31)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001333
1334 /* Save SPRGs */
1335 mfspr r3, SPRN_SPRG0
1336 mfspr r4, SPRN_SPRG1
1337 mfspr r5, SPRN_SPRG2
1338 mfspr r6, SPRN_SPRG3
1339 std r3, VCPU_SPRG0(r9)
1340 std r4, VCPU_SPRG1(r9)
1341 std r5, VCPU_SPRG2(r9)
1342 std r6, VCPU_SPRG3(r9)
1343
Paul Mackerras89436332012-03-02 01:38:23 +00001344 /* save FP state */
1345 mr r3, r9
Paul Mackerras595e4f72013-10-15 20:43:04 +11001346 bl kvmppc_save_fp
Paul Mackerras89436332012-03-02 01:38:23 +00001347
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001348#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1349BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11001350 b 91f
Nicholas Pigginfae5c9f2021-05-28 19:07:52 +10001351END_FTR_SECTION_IFCLR(CPU_FTR_TM)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10001352 /*
Paul Mackerras7854f752018-10-08 16:30:53 +11001353 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10001354 */
Simon Guo6f597c62018-05-23 15:01:48 +08001355 mr r3, r9
1356 ld r4, VCPU_MSR(r3)
Paul Mackerras7854f752018-10-08 16:30:53 +11001357 li r5, 0 /* don't preserve non-vol regs */
Paul Mackerras7b0e8272018-05-30 20:07:52 +10001358 bl kvmppc_save_tm_hv
Paul Mackerras7854f752018-10-08 16:30:53 +11001359 nop
Simon Guo6f597c62018-05-23 15:01:48 +08001360 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100136191:
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001362#endif
1363
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001364 /* Increment yield count if they have a VPA */
1365 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1366 cmpdi r8, 0
1367 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +02001368 li r4, LPPACA_YIELDCOUNT
1369 LWZX_BE r3, r8, r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001370 addi r3, r3, 1
Alexander Graf0865a582014-06-11 10:36:17 +02001371 STWX_BE r3, r8, r4
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001372 li r3, 1
1373 stb r3, VCPU_VPA_DIRTY(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000137425:
1375 /* Save PMU registers if requested */
1376 /* r8 and cr0.eq are live here */
Paul Mackerras41f4e632018-10-08 16:30:51 +11001377 mr r3, r9
1378 li r4, 1
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001379 beq 21f /* if no VPA, save PMU stuff anyway */
Paul Mackerras41f4e632018-10-08 16:30:51 +11001380 lbz r4, LPPACA_PMCINUSE(r8)
138121: bl kvmhv_save_guest_pmu
1382 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001383
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001384 /* Restore host values of some registers */
1385BEGIN_FTR_SECTION
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10001386 ld r5, STACK_SLOT_CIABR(r1)
Ravi Bangoria122954ed72020-12-16 16:12:17 +05301387 ld r6, STACK_SLOT_DAWR0(r1)
1388 ld r7, STACK_SLOT_DAWRX0(r1)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10001389 mtspr SPRN_CIABR, r5
Michael Neulingb53221e2018-03-27 15:37:22 +11001390 /*
1391 * If the DAWR doesn't work, it's ok to write these here as
1392 * this value should always be zero
1393 */
Ravi Bangoria09f82b02020-05-14 16:47:26 +05301394 mtspr SPRN_DAWR0, r6
1395 mtspr SPRN_DAWRX0, r7
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10001396END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Nicholas Piggindc462262020-08-25 17:55:35 +10001397
1398 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001399 * POWER7/POWER8 guest -> host partition switch code.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001400 * We don't have to lock against tlbies but we do
1401 * have to coordinate the hardware threads.
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11001402 * Here STACK_SLOT_TRAP(r1) contains the trap number.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001403 */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001404kvmhv_switch_to_host:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001405 /* Secondary threads wait for primary to do partition switch */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001406 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001407 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1408 lbz r3,HSTATE_PTID(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001409 cmpwi r3,0
1410 beq 15f
1411 HMT_LOW
141213: lbz r3,VCORE_IN_GUEST(r5)
1413 cmpwi r3,0
1414 bne 13b
1415 HMT_MEDIUM
1416 b 16f
1417
1418 /* Primary thread waits for all the secondaries to exit guest */
141915: lwz r3,VCORE_ENTRY_EXIT(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001420 rlwinm r0,r3,32-8,0xff
Paul Mackerrasde56a942011-06-29 00:21:34 +00001421 clrldi r3,r3,56
1422 cmpw r3,r0
1423 bne 15b
1424 isync
1425
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001426 /* Did we actually switch to the guest at all? */
1427 lbz r6, VCORE_IN_GUEST(r5)
1428 cmpwi r6, 0
1429 beq 19f
1430
Paul Mackerrasde56a942011-06-29 00:21:34 +00001431 /* Primary thread switches back to host partition */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001432 lwz r7,KVM_HOST_LPID(r4)
Paul Mackerras7a840842016-11-16 22:25:20 +11001433 ld r6,KVM_HOST_SDR1(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001434 li r8,LPID_RSVD /* switch to reserved LPID */
1435 mtspr SPRN_LPID,r8
1436 ptesync
Paul Mackerras7a840842016-11-16 22:25:20 +11001437 mtspr SPRN_SDR1,r6 /* switch to host page table */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001438 mtspr SPRN_LPID,r7
1439 isync
1440
Michael Neulingb005255e2014-01-08 21:25:21 +11001441BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001442 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +11001443 mfspr r7, SPRN_DPDES
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001444 mfspr r8, SPRN_VTB
Michael Neulingb005255e2014-01-08 21:25:21 +11001445 std r7, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001446 std r8, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +11001447 /* clear DPDES so we don't get guest doorbells in the host */
1448 li r8, 0
1449 mtspr SPRN_DPDES, r8
1450END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1451
Paul Mackerrasde56a942011-06-29 00:21:34 +00001452 /* Subtract timebase offset from timebase */
Paul Mackerras57b8daa2018-04-20 22:51:11 +10001453 ld r8, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001454 cmpdi r8,0
1455 beq 17f
Paul Mackerras57b8daa2018-04-20 22:51:11 +10001456 li r0, 0
1457 std r0, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001458 mftb r6 /* current guest timebase */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001459 subf r8,r8,r6
1460 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1461 mftb r7 /* check if lower 24 bits overflowed */
1462 clrldi r6,r6,40
1463 clrldi r7,r7,40
1464 cmpld r7,r6
1465 bge 17f
1466 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1467 mtspr SPRN_TBU40,r8
1468
Paul Mackerrasdf709a22018-10-08 16:30:52 +1100146917:
1470 /*
1471 * If this is an HMI, we called kvmppc_realmode_hmi_handler
1472 * above, which may or may not have already called
1473 * kvmppc_subcore_exit_guest. Fortunately, all that
1474 * kvmppc_subcore_exit_guest does is clear a flag, so calling
1475 * it again here is benign even if kvmppc_realmode_hmi_handler
1476 * has already called it.
1477 */
1478 bl kvmppc_subcore_exit_guest
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301479 nop
148030: ld r5,HSTATE_KVM_VCORE(r13)
1481 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1482
Paul Mackerrasde56a942011-06-29 00:21:34 +00001483 /* Reset PCR */
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301484 ld r0, VCORE_PCR(r5)
Jordan Niethe13c7bb32019-09-17 10:46:05 +10001485 LOAD_REG_IMMEDIATE(r6, PCR_MASK)
1486 cmpld r0, r6
Paul Mackerrasde56a942011-06-29 00:21:34 +00001487 beq 18f
Jordan Niethe13c7bb32019-09-17 10:46:05 +10001488 mtspr SPRN_PCR, r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000148918:
1490 /* Signal secondary CPUs to continue */
Jordan Niethe7fe4e112019-10-04 12:53:17 +10001491 li r0, 0
Paul Mackerrasde56a942011-06-29 00:21:34 +00001492 stb r0,VCORE_IN_GUEST(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000149319: lis r8,0x7fff /* MAX_INT@h */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001494 mtspr SPRN_HDEC,r8
1495
Nicholas Pigginb1b16972021-01-18 16:28:06 +1000149616: ld r8,KVM_HOST_LPCR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001497 mtspr SPRN_LPCR,r8
1498 isync
Nicholas Pigginb1b16972021-01-18 16:28:06 +10001499
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001500#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1501 /* Finish timing, if we have a vcpu */
1502 ld r4, HSTATE_KVM_VCPU(r13)
1503 cmpdi r4, 0
1504 li r3, 0
1505 beq 2f
1506 bl kvmhv_accumulate_time
15072:
1508#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +00001509 /* Unset guest mode */
1510 li r0, KVM_GUEST_MODE_NONE
1511 stb r0, HSTATE_IN_GUEST(r13)
1512
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11001513 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10001514 ld r0, SFS+PPC_LR_STKOFF(r1)
1515 addi r1, r1, SFS
Paul Mackerras218309b2013-09-06 13:23:44 +10001516 mtlr r0
1517 blr
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001518
Michael Ellermanaf2e8c62019-11-13 21:05:44 +11001519.balign 32
1520.global kvm_flush_link_stack
1521kvm_flush_link_stack:
1522 /* Save LR into r0 */
1523 mflr r0
1524
1525 /* Flush the link stack. On Power8 it's up to 32 entries in size. */
1526 .rept 32
1527 bl .+4
1528 .endr
1529
1530 /* And on Power9 it's up to 64. */
1531BEGIN_FTR_SECTION
1532 .rept 32
1533 bl .+4
1534 .endr
1535END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1536
1537 /* Restore LR */
1538 mtlr r0
1539 blr
1540
Paul Mackerrasdf709a22018-10-08 16:30:52 +11001541kvmppc_guest_external:
1542 /* External interrupt, first check for host_ipi. If this is
1543 * set, we know the host wants us out so let's do it now
1544 */
1545 bl kvmppc_read_intr
1546
1547 /*
1548 * Restore the active volatile registers after returning from
1549 * a C function.
1550 */
1551 ld r9, HSTATE_KVM_VCPU(r13)
1552 li r12, BOOK3S_INTERRUPT_EXTERNAL
1553
1554 /*
1555 * kvmppc_read_intr return codes:
1556 *
1557 * Exit to host (r3 > 0)
1558 * 1 An interrupt is pending that needs to be handled by the host
1559 * Exit guest and return to host by branching to guest_exit_cont
1560 *
1561 * 2 Passthrough that needs completion in the host
1562 * Exit guest and return to host by branching to guest_exit_cont
1563 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1564 * to indicate to the host to complete handling the interrupt
1565 *
1566 * Before returning to guest, we check if any CPU is heading out
1567 * to the host and if so, we head out also. If no CPUs are heading
1568 * check return values <= 0.
1569 *
1570 * Return to guest (r3 <= 0)
1571 * 0 No external interrupt is pending
1572 * -1 A guest wakeup IPI (which has now been cleared)
1573 * In either case, we return to guest to deliver any pending
1574 * guest interrupts.
1575 *
1576 * -2 A PCI passthrough external interrupt was handled
1577 * (interrupt was delivered directly to guest)
1578 * Return to guest to deliver any pending guest interrupts.
1579 */
1580
1581 cmpdi r3, 1
1582 ble 1f
1583
1584 /* Return code = 2 */
1585 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1586 stw r12, VCPU_TRAP(r9)
1587 b guest_exit_cont
1588
15891: /* Return code <= 1 */
1590 cmpdi r3, 0
1591 bgt guest_exit_cont
1592
1593 /* Return code <= 0 */
1594maybe_reenter_guest:
1595 ld r5, HSTATE_KVM_VCORE(r13)
1596 lwz r0, VCORE_ENTRY_EXIT(r5)
1597 cmpwi r0, 0x100
1598 mr r4, r9
1599 blt deliver_guest_interrupt
1600 b guest_exit_cont
1601
Paul Mackerras697d3892011-12-12 12:36:37 +00001602/*
1603 * Check whether an HDSI is an HPTE not found fault or something else.
1604 * If it is an HPTE not found fault that is due to the guest accessing
1605 * a page that they have mapped but which we have paged out, then
1606 * we continue on with the guest exit path. In all other cases,
1607 * reflect the HDSI to the guest as a DSI.
1608 */
1609kvmppc_hdsi:
1610 mfspr r4, SPRN_HDAR
1611 mfspr r6, SPRN_HDSISR
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001612 /* HPTE not found fault or protection fault? */
1613 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
Paul Mackerras697d3892011-12-12 12:36:37 +00001614 beq 1f /* if not, send it to the guest */
Paul Mackerras4e5acdc2017-02-28 11:05:47 +11001615 andi. r0, r11, MSR_DR /* data relocation enabled? */
1616 beq 3f
Paul Mackerras697d3892011-12-12 12:36:37 +00001617 clrrdi r0, r4, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001618 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11001619 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
1620 bne 7f /* if no SLB entry found */
Paul Mackerras697d3892011-12-12 12:36:37 +000016214: std r4, VCPU_FAULT_DAR(r9)
1622 stw r6, VCPU_FAULT_DSISR(r9)
1623
1624 /* Search the hash table. */
1625 mr r3, r9 /* vcpu pointer */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001626 li r7, 1 /* data fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001627 bl kvmppc_hpte_hv_fault
Paul Mackerras697d3892011-12-12 12:36:37 +00001628 ld r9, HSTATE_KVM_VCPU(r13)
1629 ld r10, VCPU_PC(r9)
1630 ld r11, VCPU_MSR(r9)
1631 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1632 cmpdi r3, 0 /* retry the instruction */
1633 beq 6f
1634 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001635 beq guest_exit_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001636 cmpdi r3, -2 /* MMIO emulation; need instr word */
1637 beq 2f
1638
Paul Mackerrascf29b212015-10-27 16:10:20 +11001639 /* Synthesize a DSI (or DSegI) for the guest */
Paul Mackerras697d3892011-12-12 12:36:37 +00001640 ld r4, VCPU_FAULT_DAR(r9)
1641 mr r6, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110016421: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
Paul Mackerras697d3892011-12-12 12:36:37 +00001643 mtspr SPRN_DSISR, r6
Paul Mackerrascf29b212015-10-27 16:10:20 +110016447: mtspr SPRN_DAR, r4
Paul Mackerras697d3892011-12-12 12:36:37 +00001645 mtspr SPRN_SRR0, r10
1646 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11001647 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11001648 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001649fast_interrupt_c_return:
Paul Mackerras697d3892011-12-12 12:36:37 +000016506: ld r7, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001651 ld r8, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001652 mtctr r7
1653 mtxer r8
1654 mr r4, r9
1655 b fast_guest_return
1656
16573: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1658 ld r5, KVM_VRMA_SLB_V(r5)
1659 b 4b
1660
1661 /* If this is for emulated MMIO, load the instruction word */
16622: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1663
1664 /* Set guest mode to 'jump over instruction' so if lwz faults
1665 * we'll just continue at the next IP. */
1666 li r0, KVM_GUEST_MODE_SKIP
1667 stb r0, HSTATE_IN_GUEST(r13)
1668
1669 /* Do the access with MSR:DR enabled */
1670 mfmsr r3
1671 ori r4, r3, MSR_DR /* Enable paging for data */
1672 mtmsrd r4
1673 lwz r8, 0(r10)
1674 mtmsrd r3
1675
1676 /* Store the result */
1677 stw r8, VCPU_LAST_INST(r9)
1678
1679 /* Unset guest mode. */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001680 li r0, KVM_GUEST_MODE_HOST_HV
Paul Mackerras697d3892011-12-12 12:36:37 +00001681 stb r0, HSTATE_IN_GUEST(r13)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001682 b guest_exit_cont
Paul Mackerrasde56a942011-06-29 00:21:34 +00001683
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001684/*
Paul Mackerras342d3db2011-12-12 12:38:05 +00001685 * Similarly for an HISI, reflect it to the guest as an ISI unless
1686 * it is an HPTE not found fault for a page that we have paged out.
1687 */
1688kvmppc_hisi:
1689 andis. r0, r11, SRR1_ISI_NOPT@h
1690 beq 1f
Paul Mackerras4e5acdc2017-02-28 11:05:47 +11001691 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1692 beq 3f
Paul Mackerras342d3db2011-12-12 12:38:05 +00001693 clrrdi r0, r10, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001694 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11001695 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
1696 bne 7f /* if no SLB entry found */
Paul Mackerras342d3db2011-12-12 12:38:05 +000016974:
1698 /* Search the hash table. */
1699 mr r3, r9 /* vcpu pointer */
1700 mr r4, r10
1701 mr r6, r11
1702 li r7, 0 /* instruction fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001703 bl kvmppc_hpte_hv_fault
Paul Mackerras342d3db2011-12-12 12:38:05 +00001704 ld r9, HSTATE_KVM_VCPU(r13)
1705 ld r10, VCPU_PC(r9)
1706 ld r11, VCPU_MSR(r9)
1707 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1708 cmpdi r3, 0 /* retry the instruction */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001709 beq fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001710 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001711 beq guest_exit_cont
Paul Mackerras342d3db2011-12-12 12:38:05 +00001712
Paul Mackerrascf29b212015-10-27 16:10:20 +11001713 /* Synthesize an ISI (or ISegI) for the guest */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001714 mr r11, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110017151: li r0, BOOK3S_INTERRUPT_INST_STORAGE
17167: mtspr SPRN_SRR0, r10
Paul Mackerras342d3db2011-12-12 12:38:05 +00001717 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11001718 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11001719 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001720 b fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001721
17223: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1723 ld r5, KVM_VRMA_SLB_V(r6)
1724 b 4b
1725
1726/*
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001727 * Try to handle an hcall in real mode.
1728 * Returns to the guest if we handle it, or continues on up to
1729 * the kernel if we can't (i.e. if we don't have a handler for
1730 * it, or if the handler returns H_TOO_HARD).
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001731 *
1732 * r5 - r8 contain hcall args,
1733 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001734 */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001735hcall_try_real_mode:
Michael Neulingc75df6f2012-06-25 13:33:10 +00001736 ld r3,VCPU_GPR(R3)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001737 andi. r0,r11,MSR_PR
Liu Ping Fan27025a62013-11-19 14:12:48 +08001738 /* sc 1 from userspace - reflect to guest syscall */
1739 bne sc_1_fast_return
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001740 clrrdi r3,r3,2
1741 cmpldi r3,hcall_real_table_end - hcall_real_table
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001742 bge guest_exit_cont
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001743 /* See if this hcall is enabled for in-kernel handling */
1744 ld r4, VCPU_KVM(r9)
1745 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1746 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1747 add r4, r4, r0
1748 ld r0, KVM_ENABLED_HCALLS(r4)
1749 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1750 srd r0, r0, r4
1751 andi. r0, r0, 1
1752 beq guest_exit_cont
1753 /* Get pointer to handler, if any, and call it */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001754 LOAD_REG_ADDR(r4, hcall_real_table)
Paul Mackerras4baa1d82013-07-08 20:09:53 +10001755 lwax r3,r3,r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001756 cmpwi r3,0
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001757 beq guest_exit_cont
Anton Blanchard05a308c2014-06-12 18:16:10 +10001758 add r12,r3,r4
1759 mtctr r12
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001760 mr r3,r9 /* get vcpu pointer */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001761 ld r4,VCPU_GPR(R4)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001762 bctrl
1763 cmpdi r3,H_TOO_HARD
1764 beq hcall_real_fallback
1765 ld r4,HSTATE_KVM_VCPU(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001766 std r3,VCPU_GPR(R3)(r4)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001767 ld r10,VCPU_PC(r4)
1768 ld r11,VCPU_MSR(r4)
1769 b fast_guest_return
1770
Liu Ping Fan27025a62013-11-19 14:12:48 +08001771sc_1_fast_return:
1772 mtspr SPRN_SRR0,r10
1773 mtspr SPRN_SRR1,r11
1774 li r10, BOOK3S_INTERRUPT_SYSCALL
Michael Neulinge4e38122014-03-25 10:47:02 +11001775 bl kvmppc_msr_interrupt
Liu Ping Fan27025a62013-11-19 14:12:48 +08001776 mr r4,r9
1777 b fast_guest_return
1778
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001779 /* We've attempted a real mode hcall, but it's punted it back
1780 * to userspace. We need to restore some clobbered volatiles
1781 * before resuming the pass-it-to-qemu path */
1782hcall_real_fallback:
1783 li r12,BOOK3S_INTERRUPT_SYSCALL
1784 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001785
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001786 b guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001787
1788 .globl hcall_real_table
1789hcall_real_table:
1790 .long 0 /* 0 - unused */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001791 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1792 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1793 .long DOTSYM(kvmppc_h_read) - hcall_real_table
Paul Mackerrascdeee512015-06-24 21:18:07 +10001794 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
1795 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001796 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
Jordan Niethee40542a2019-02-21 14:28:48 +11001797#ifdef CONFIG_SPAPR_TCE_IOMMU
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001798 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11001799 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
Jordan Niethee40542a2019-02-21 14:28:48 +11001800#else
1801 .long 0 /* 0x1c */
1802 .long 0 /* 0x20 */
1803#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001804 .long 0 /* 0x24 - H_SET_SPRG0 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001805 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
Suraj Jitindar Singheadfb1c2019-03-22 17:05:45 +11001806 .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001807 .long 0 /* 0x30 */
1808 .long 0 /* 0x34 */
1809 .long 0 /* 0x38 */
1810 .long 0 /* 0x3c */
1811 .long 0 /* 0x40 */
1812 .long 0 /* 0x44 */
1813 .long 0 /* 0x48 */
1814 .long 0 /* 0x4c */
1815 .long 0 /* 0x50 */
1816 .long 0 /* 0x54 */
1817 .long 0 /* 0x58 */
1818 .long 0 /* 0x5c */
1819 .long 0 /* 0x60 */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001820#ifdef CONFIG_KVM_XICS
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001821 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1822 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1823 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001824 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001825 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001826#else
1827 .long 0 /* 0x64 - H_EOI */
1828 .long 0 /* 0x68 - H_CPPR */
1829 .long 0 /* 0x6c - H_IPI */
1830 .long 0 /* 0x70 - H_IPOLL */
1831 .long 0 /* 0x74 - H_XIRR */
1832#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001833 .long 0 /* 0x78 */
1834 .long 0 /* 0x7c */
1835 .long 0 /* 0x80 */
1836 .long 0 /* 0x84 */
1837 .long 0 /* 0x88 */
1838 .long 0 /* 0x8c */
1839 .long 0 /* 0x90 */
1840 .long 0 /* 0x94 */
1841 .long 0 /* 0x98 */
1842 .long 0 /* 0x9c */
1843 .long 0 /* 0xa0 */
1844 .long 0 /* 0xa4 */
1845 .long 0 /* 0xa8 */
1846 .long 0 /* 0xac */
1847 .long 0 /* 0xb0 */
1848 .long 0 /* 0xb4 */
1849 .long 0 /* 0xb8 */
1850 .long 0 /* 0xbc */
1851 .long 0 /* 0xc0 */
1852 .long 0 /* 0xc4 */
1853 .long 0 /* 0xc8 */
1854 .long 0 /* 0xcc */
1855 .long 0 /* 0xd0 */
1856 .long 0 /* 0xd4 */
1857 .long 0 /* 0xd8 */
1858 .long 0 /* 0xdc */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001859 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
Sam Bobroff90fd09f2014-12-03 13:30:40 +11001860 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001861 .long 0 /* 0xe8 */
1862 .long 0 /* 0xec */
1863 .long 0 /* 0xf0 */
1864 .long 0 /* 0xf4 */
1865 .long 0 /* 0xf8 */
1866 .long 0 /* 0xfc */
1867 .long 0 /* 0x100 */
1868 .long 0 /* 0x104 */
1869 .long 0 /* 0x108 */
1870 .long 0 /* 0x10c */
1871 .long 0 /* 0x110 */
1872 .long 0 /* 0x114 */
1873 .long 0 /* 0x118 */
1874 .long 0 /* 0x11c */
1875 .long 0 /* 0x120 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001876 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
Paul Mackerras8563bf52014-01-08 21:25:29 +11001877 .long 0 /* 0x128 */
1878 .long 0 /* 0x12c */
1879 .long 0 /* 0x130 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001880 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
Jordan Niethee40542a2019-02-21 14:28:48 +11001881#ifdef CONFIG_SPAPR_TCE_IOMMU
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11001882 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +11001883 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
Jordan Niethee40542a2019-02-21 14:28:48 +11001884#else
1885 .long 0 /* 0x138 */
1886 .long 0 /* 0x13c */
1887#endif
Michael Ellermane928e9c2015-03-20 20:39:41 +11001888 .long 0 /* 0x140 */
1889 .long 0 /* 0x144 */
1890 .long 0 /* 0x148 */
1891 .long 0 /* 0x14c */
1892 .long 0 /* 0x150 */
1893 .long 0 /* 0x154 */
1894 .long 0 /* 0x158 */
1895 .long 0 /* 0x15c */
1896 .long 0 /* 0x160 */
1897 .long 0 /* 0x164 */
1898 .long 0 /* 0x168 */
1899 .long 0 /* 0x16c */
1900 .long 0 /* 0x170 */
1901 .long 0 /* 0x174 */
1902 .long 0 /* 0x178 */
1903 .long 0 /* 0x17c */
1904 .long 0 /* 0x180 */
1905 .long 0 /* 0x184 */
1906 .long 0 /* 0x188 */
1907 .long 0 /* 0x18c */
1908 .long 0 /* 0x190 */
1909 .long 0 /* 0x194 */
1910 .long 0 /* 0x198 */
1911 .long 0 /* 0x19c */
1912 .long 0 /* 0x1a0 */
1913 .long 0 /* 0x1a4 */
1914 .long 0 /* 0x1a8 */
1915 .long 0 /* 0x1ac */
1916 .long 0 /* 0x1b0 */
1917 .long 0 /* 0x1b4 */
1918 .long 0 /* 0x1b8 */
1919 .long 0 /* 0x1bc */
1920 .long 0 /* 0x1c0 */
1921 .long 0 /* 0x1c4 */
1922 .long 0 /* 0x1c8 */
1923 .long 0 /* 0x1cc */
1924 .long 0 /* 0x1d0 */
1925 .long 0 /* 0x1d4 */
1926 .long 0 /* 0x1d8 */
1927 .long 0 /* 0x1dc */
1928 .long 0 /* 0x1e0 */
1929 .long 0 /* 0x1e4 */
1930 .long 0 /* 0x1e8 */
1931 .long 0 /* 0x1ec */
1932 .long 0 /* 0x1f0 */
1933 .long 0 /* 0x1f4 */
1934 .long 0 /* 0x1f8 */
1935 .long 0 /* 0x1fc */
1936 .long 0 /* 0x200 */
1937 .long 0 /* 0x204 */
1938 .long 0 /* 0x208 */
1939 .long 0 /* 0x20c */
1940 .long 0 /* 0x210 */
1941 .long 0 /* 0x214 */
1942 .long 0 /* 0x218 */
1943 .long 0 /* 0x21c */
1944 .long 0 /* 0x220 */
1945 .long 0 /* 0x224 */
1946 .long 0 /* 0x228 */
1947 .long 0 /* 0x22c */
1948 .long 0 /* 0x230 */
1949 .long 0 /* 0x234 */
1950 .long 0 /* 0x238 */
1951 .long 0 /* 0x23c */
1952 .long 0 /* 0x240 */
1953 .long 0 /* 0x244 */
1954 .long 0 /* 0x248 */
1955 .long 0 /* 0x24c */
1956 .long 0 /* 0x250 */
1957 .long 0 /* 0x254 */
1958 .long 0 /* 0x258 */
1959 .long 0 /* 0x25c */
1960 .long 0 /* 0x260 */
1961 .long 0 /* 0x264 */
1962 .long 0 /* 0x268 */
1963 .long 0 /* 0x26c */
1964 .long 0 /* 0x270 */
1965 .long 0 /* 0x274 */
1966 .long 0 /* 0x278 */
1967 .long 0 /* 0x27c */
1968 .long 0 /* 0x280 */
1969 .long 0 /* 0x284 */
1970 .long 0 /* 0x288 */
1971 .long 0 /* 0x28c */
1972 .long 0 /* 0x290 */
1973 .long 0 /* 0x294 */
1974 .long 0 /* 0x298 */
1975 .long 0 /* 0x29c */
1976 .long 0 /* 0x2a0 */
1977 .long 0 /* 0x2a4 */
1978 .long 0 /* 0x2a8 */
1979 .long 0 /* 0x2ac */
1980 .long 0 /* 0x2b0 */
1981 .long 0 /* 0x2b4 */
1982 .long 0 /* 0x2b8 */
1983 .long 0 /* 0x2bc */
1984 .long 0 /* 0x2c0 */
1985 .long 0 /* 0x2c4 */
1986 .long 0 /* 0x2c8 */
1987 .long 0 /* 0x2cc */
1988 .long 0 /* 0x2d0 */
1989 .long 0 /* 0x2d4 */
1990 .long 0 /* 0x2d8 */
1991 .long 0 /* 0x2dc */
1992 .long 0 /* 0x2e0 */
1993 .long 0 /* 0x2e4 */
1994 .long 0 /* 0x2e8 */
1995 .long 0 /* 0x2ec */
1996 .long 0 /* 0x2f0 */
1997 .long 0 /* 0x2f4 */
1998 .long 0 /* 0x2f8 */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001999#ifdef CONFIG_KVM_XICS
2000 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2001#else
2002 .long 0 /* 0x2fc - H_XIRR_X*/
2003#endif
Nicholas Piggindcbac732021-05-28 19:07:44 +10002004 .long DOTSYM(kvmppc_rm_h_random) - hcall_real_table
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002005 .globl hcall_real_table_end
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002006hcall_real_table_end:
2007
Michael Ellermandae58182021-09-24 01:10:31 +10002008_GLOBAL_TOC(kvmppc_h_set_xdabr)
Paul Mackerras4bad7772018-10-08 16:31:06 +11002009EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002010 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2011 beq 6f
2012 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2013 andc. r0, r5, r0
2014 beq 3f
20156: li r3, H_PARAMETER
2016 blr
2017
Michael Ellermandae58182021-09-24 01:10:31 +10002018_GLOBAL_TOC(kvmppc_h_set_dabr)
Paul Mackerras4bad7772018-10-08 16:31:06 +11002019EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002020 li r5, DABRX_USER | DABRX_KERNEL
20213:
Michael Neulingeee7ff92014-01-08 21:25:19 +11002022BEGIN_FTR_SECTION
2023 b 2f
2024END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002025 std r4,VCPU_DABR(r3)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002026 stw r5, VCPU_DABRX(r3)
2027 mtspr SPRN_DABRX, r5
Paul Mackerras89436332012-03-02 01:38:23 +00002028 /* Work around P7 bug where DABR can get corrupted on mtspr */
20291: mtspr SPRN_DABR,r4
2030 mfspr r5, SPRN_DABR
2031 cmpd r4, r5
2032 bne 1b
2033 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002034 li r3,0
2035 blr
2036
Michael Neulinge8ebedb2018-03-27 15:37:21 +110020372:
Michael Neulingc1fe1902019-04-01 17:03:12 +11002038 LOAD_REG_ADDR(r11, dawr_force_enable)
2039 lbz r11, 0(r11)
2040 cmpdi r11, 0
Michael Neulingfabb2ef2019-06-17 17:16:18 +10002041 bne 3f
Aneesh Kumar K.Vca9a16c2018-03-30 17:27:24 +05302042 li r3, H_HARDWARE
Michael Neulingfabb2ef2019-06-17 17:16:18 +10002043 blr
20443:
Paul Mackerras8563bf52014-01-08 21:25:29 +11002045 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
Michael Neulinge8ebedb2018-03-27 15:37:21 +11002046 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
Thomas Huth760a7362015-11-20 09:11:45 +01002047 rlwimi r5, r4, 2, DAWRX_WT
Paul Mackerras8563bf52014-01-08 21:25:29 +11002048 clrrdi r4, r4, 3
Ravi Bangoria122954ed72020-12-16 16:12:17 +05302049 std r4, VCPU_DAWR0(r3)
2050 std r5, VCPU_DAWRX0(r3)
Suraj Jitindar Singh84b02822019-06-17 17:16:19 +10002051 /*
2052 * If came in through the real mode hcall handler then it is necessary
2053 * to write the registers since the return path won't. Otherwise it is
2054 * sufficient to store then in the vcpu struct as they will be loaded
2055 * next time the vcpu is run.
2056 */
2057 mfmsr r6
2058 andi. r6, r6, MSR_DR /* in real mode? */
2059 bne 4f
Ravi Bangoria09f82b02020-05-14 16:47:26 +05302060 mtspr SPRN_DAWR0, r4
2061 mtspr SPRN_DAWRX0, r5
Suraj Jitindar Singh84b02822019-06-17 17:16:19 +100020624: li r3, 0
Paul Mackerrasde56a942011-06-29 00:21:34 +00002063 blr
2064
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002065_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002066 ori r11,r11,MSR_EE
2067 std r11,VCPU_MSR(r3)
2068 li r0,1
2069 stb r0,VCPU_CEDED(r3)
2070 sync /* order setting ceded vs. testing prodded */
2071 lbz r5,VCPU_PRODDED(r3)
2072 cmpwi r5,0
Paul Mackerras04f995a2012-08-06 00:03:28 +00002073 bne kvm_cede_prodded
Paul Mackerras6af27c82015-03-28 14:21:10 +11002074 li r12,0 /* set trap to 0 to say hcall is handled */
2075 stw r12,VCPU_TRAP(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002076 li r0,H_SUCCESS
Michael Neulingc75df6f2012-06-25 13:33:10 +00002077 std r0,VCPU_GPR(R3)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002078
2079 /*
2080 * Set our bit in the bitmask of napping threads unless all the
2081 * other threads are already napping, in which case we send this
2082 * up to the host.
2083 */
2084 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002085 lbz r6,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002086 lwz r8,VCORE_ENTRY_EXIT(r5)
2087 clrldi r8,r8,56
2088 li r0,1
2089 sld r0,r0,r6
2090 addi r6,r5,VCORE_NAPPING_THREADS
209131: lwarx r4,0,r6
2092 or r4,r4,r0
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002093 cmpw r4,r8
2094 beq kvm_cede_exit
Paul Mackerras19ccb762011-07-23 17:42:46 +10002095 stwcx. r4,0,r6
2096 bne 31b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002097 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11002098 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002099 li r0,NAPPING_CEDE
Paul Mackerras19ccb762011-07-23 17:42:46 +10002100 stb r0,HSTATE_NAPPING(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002101 lwz r7,VCORE_ENTRY_EXIT(r5)
2102 cmpwi r7,0x100
2103 bge 33f /* another thread already exiting */
2104
2105/*
2106 * Although not specifically required by the architecture, POWER7
2107 * preserves the following registers in nap mode, even if an SMT mode
2108 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2109 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2110 */
2111 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002112 std r14, VCPU_GPR(R14)(r3)
2113 std r15, VCPU_GPR(R15)(r3)
2114 std r16, VCPU_GPR(R16)(r3)
2115 std r17, VCPU_GPR(R17)(r3)
2116 std r18, VCPU_GPR(R18)(r3)
2117 std r19, VCPU_GPR(R19)(r3)
2118 std r20, VCPU_GPR(R20)(r3)
2119 std r21, VCPU_GPR(R21)(r3)
2120 std r22, VCPU_GPR(R22)(r3)
2121 std r23, VCPU_GPR(R23)(r3)
2122 std r24, VCPU_GPR(R24)(r3)
2123 std r25, VCPU_GPR(R25)(r3)
2124 std r26, VCPU_GPR(R26)(r3)
2125 std r27, VCPU_GPR(R27)(r3)
2126 std r28, VCPU_GPR(R28)(r3)
2127 std r29, VCPU_GPR(R29)(r3)
2128 std r30, VCPU_GPR(R30)(r3)
2129 std r31, VCPU_GPR(R31)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002130
2131 /* save FP state */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002132 bl kvmppc_save_fp
Paul Mackerras19ccb762011-07-23 17:42:46 +10002133
Paul Mackerras93d17392016-06-22 15:52:55 +10002134#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2135BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002136 b 91f
Nicholas Pigginfae5c9f2021-05-28 19:07:52 +10002137END_FTR_SECTION_IFCLR(CPU_FTR_TM)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10002138 /*
Paul Mackerras7854f752018-10-08 16:30:53 +11002139 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10002140 */
Simon Guo6f597c62018-05-23 15:01:48 +08002141 ld r3, HSTATE_KVM_VCPU(r13)
2142 ld r4, VCPU_MSR(r3)
Paul Mackerras7854f752018-10-08 16:30:53 +11002143 li r5, 0 /* don't preserve non-vol regs */
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002144 bl kvmppc_save_tm_hv
Paul Mackerras7854f752018-10-08 16:30:53 +11002145 nop
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100214691:
Paul Mackerras93d17392016-06-22 15:52:55 +10002147#endif
2148
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002149 /*
2150 * Set DEC to the smaller of DEC and HDEC, so that we wake
2151 * no later than the end of our timeslice (HDEC interrupts
2152 * don't wake us from nap).
2153 */
2154 mfspr r3, SPRN_DEC
2155 mfspr r4, SPRN_HDEC
2156 mftb r5
Paul Mackerras2f272462017-05-22 16:25:14 +10002157 extsw r3, r3
Nicholas Pigginfae5c9f2021-05-28 19:07:52 +10002158 extsw r4, r4
Paul Mackerras2f272462017-05-22 16:25:14 +10002159 cmpd r3, r4
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002160 ble 67f
2161 mtspr SPRN_DEC, r4
216267:
2163 /* save expiry time of guest decrementer */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002164 add r3, r3, r5
2165 ld r4, HSTATE_KVM_VCPU(r13)
2166 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10002167 ld r6, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002168 subf r3, r6, r3 /* convert to host TB value */
2169 std r3, VCPU_DEC_EXPIRES(r4)
2170
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002171#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2172 ld r4, HSTATE_KVM_VCPU(r13)
2173 addi r3, r4, VCPU_TB_CEDE
2174 bl kvmhv_accumulate_time
2175#endif
2176
Paul Mackerrasccc07772015-03-28 14:21:07 +11002177 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2178
Nicholas Piggin10d91612019-04-13 00:30:52 +10002179 /* Go back to host stack */
2180 ld r1, HSTATE_HOST_R1(r13)
2181
Paul Mackerras19ccb762011-07-23 17:42:46 +10002182 /*
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002183 * Take a nap until a decrementer or external or doobell interrupt
Paul Mackerrasccc07772015-03-28 14:21:07 +11002184 * occurs, with PECE1 and PECE0 set in LPCR.
Paul Mackerras66feed62015-03-28 14:21:12 +11002185 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
Paul Mackerrasccc07772015-03-28 14:21:07 +11002186 * Also clear the runlatch bit before napping.
Paul Mackerras19ccb762011-07-23 17:42:46 +10002187 */
Paul Mackerras56548fc2014-12-03 14:48:40 +11002188kvm_do_nap:
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002189 mfspr r0, SPRN_CTRLF
2190 clrrdi r0, r0, 1
2191 mtspr SPRN_CTRLT, r0
Preeti U Murthy582b9102014-04-11 16:02:08 +05302192
Paul Mackerrasf0888f72012-02-03 00:54:17 +00002193 li r0,1
2194 stb r0,HSTATE_HWTHREAD_REQ(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002195 mfspr r5,SPRN_LPCR
2196 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002197BEGIN_FTR_SECTION
Paul Mackerras66feed62015-03-28 14:21:12 +11002198 ori r5, r5, LPCR_PECEDH
Paul Mackerrasccc07772015-03-28 14:21:07 +11002199 rlwimi r5, r3, 0, LPCR_PECEDP
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002200END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002201
2202kvm_nap_sequence: /* desired LPCR value in r5 */
Nicholas Piggin10d91612019-04-13 00:30:52 +10002203 li r3, PNV_THREAD_NAP
Paul Mackerras19ccb762011-07-23 17:42:46 +10002204 mtspr SPRN_LPCR,r5
2205 isync
Nicholas Piggin10d91612019-04-13 00:30:52 +10002206
Nicholas Piggin10d91612019-04-13 00:30:52 +10002207 bl isa206_idle_insn_mayloss
Nicholas Piggin10d91612019-04-13 00:30:52 +10002208
2209 mfspr r0, SPRN_CTRLF
2210 ori r0, r0, 1
2211 mtspr SPRN_CTRLT, r0
2212
2213 mtspr SPRN_SRR1, r3
2214
2215 li r0, 0
2216 stb r0, PACA_FTRACE_ENABLED(r13)
2217
2218 li r0, KVM_HWTHREAD_IN_KVM
2219 stb r0, HSTATE_HWTHREAD_STATE(r13)
2220
2221 lbz r0, HSTATE_NAPPING(r13)
2222 cmpwi r0, NAPPING_CEDE
2223 beq kvm_end_cede
2224 cmpwi r0, NAPPING_NOVCPU
2225 beq kvm_novcpu_wakeup
2226 cmpwi r0, NAPPING_UNSPLIT
2227 beq kvm_unsplit_wakeup
2228 twi 31,0,0 /* Nap state must not be zero */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002229
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100223033: mr r4, r3
2231 li r3, 0
2232 li r12, 0
2233 b 34f
2234
Paul Mackerras19ccb762011-07-23 17:42:46 +10002235kvm_end_cede:
Nicholas Piggin10d91612019-04-13 00:30:52 +10002236 /* Woken by external or decrementer interrupt */
2237
Paul Mackerras4619ac82013-04-17 20:31:41 +00002238 /* get vcpu pointer */
2239 ld r4, HSTATE_KVM_VCPU(r13)
2240
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002241#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2242 addi r3, r4, VCPU_TB_RMINTR
2243 bl kvmhv_accumulate_time
2244#endif
2245
Paul Mackerras93d17392016-06-22 15:52:55 +10002246#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2247BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002248 b 91f
Nicholas Pigginfae5c9f2021-05-28 19:07:52 +10002249END_FTR_SECTION_IFCLR(CPU_FTR_TM)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10002250 /*
Paul Mackerras7854f752018-10-08 16:30:53 +11002251 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10002252 */
Simon Guo6f597c62018-05-23 15:01:48 +08002253 mr r3, r4
2254 ld r4, VCPU_MSR(r3)
Paul Mackerras7854f752018-10-08 16:30:53 +11002255 li r5, 0 /* don't preserve non-vol regs */
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002256 bl kvmppc_restore_tm_hv
Paul Mackerras7854f752018-10-08 16:30:53 +11002257 nop
Simon Guo6f597c62018-05-23 15:01:48 +08002258 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100225991:
Paul Mackerras93d17392016-06-22 15:52:55 +10002260#endif
2261
Paul Mackerras19ccb762011-07-23 17:42:46 +10002262 /* load up FP state */
2263 bl kvmppc_load_fp
2264
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002265 /* Restore guest decrementer */
2266 ld r3, VCPU_DEC_EXPIRES(r4)
2267 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10002268 ld r6, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002269 add r3, r3, r6 /* convert host TB to guest TB value */
2270 mftb r7
2271 subf r3, r7, r3
2272 mtspr SPRN_DEC, r3
2273
Paul Mackerras19ccb762011-07-23 17:42:46 +10002274 /* Load NV GPRS */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002275 ld r14, VCPU_GPR(R14)(r4)
2276 ld r15, VCPU_GPR(R15)(r4)
2277 ld r16, VCPU_GPR(R16)(r4)
2278 ld r17, VCPU_GPR(R17)(r4)
2279 ld r18, VCPU_GPR(R18)(r4)
2280 ld r19, VCPU_GPR(R19)(r4)
2281 ld r20, VCPU_GPR(R20)(r4)
2282 ld r21, VCPU_GPR(R21)(r4)
2283 ld r22, VCPU_GPR(R22)(r4)
2284 ld r23, VCPU_GPR(R23)(r4)
2285 ld r24, VCPU_GPR(R24)(r4)
2286 ld r25, VCPU_GPR(R25)(r4)
2287 ld r26, VCPU_GPR(R26)(r4)
2288 ld r27, VCPU_GPR(R27)(r4)
2289 ld r28, VCPU_GPR(R28)(r4)
2290 ld r29, VCPU_GPR(R29)(r4)
2291 ld r30, VCPU_GPR(R30)(r4)
2292 ld r31, VCPU_GPR(R31)(r4)
Suresh Warrier37f55d32016-08-19 15:35:46 +10002293
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002294 /* Check the wake reason in SRR1 to see why we got here */
2295 bl kvmppc_check_wake_reason
Paul Mackerras19ccb762011-07-23 17:42:46 +10002296
Suresh Warrier37f55d32016-08-19 15:35:46 +10002297 /*
2298 * Restore volatile registers since we could have called a
2299 * C routine in kvmppc_check_wake_reason
2300 * r4 = VCPU
2301 * r3 tells us whether we need to return to host or not
2302 * WARNING: it gets checked further down:
2303 * should not modify r3 until this check is done.
2304 */
2305 ld r4, HSTATE_KVM_VCPU(r13)
2306
Paul Mackerras19ccb762011-07-23 17:42:46 +10002307 /* clear our bit in vcore->napping_threads */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100230834: ld r5,HSTATE_KVM_VCORE(r13)
2309 lbz r7,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002310 li r0,1
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002311 sld r0,r0,r7
Paul Mackerras19ccb762011-07-23 17:42:46 +10002312 addi r6,r5,VCORE_NAPPING_THREADS
231332: lwarx r7,0,r6
2314 andc r7,r7,r0
2315 stwcx. r7,0,r6
2316 bne 32b
2317 li r0,0
2318 stb r0,HSTATE_NAPPING(r13)
2319
Suresh Warrier37f55d32016-08-19 15:35:46 +10002320 /* See if the wake reason saved in r3 means we need to exit */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002321 stw r12, VCPU_TRAP(r4)
Paul Mackerras4619ac82013-04-17 20:31:41 +00002322 mr r9, r4
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002323 cmpdi r3, 0
2324 bgt guest_exit_cont
Paul Mackerrasdf709a22018-10-08 16:30:52 +11002325 b maybe_reenter_guest
Paul Mackerras19ccb762011-07-23 17:42:46 +10002326
2327 /* cede when already previously prodded case */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002328kvm_cede_prodded:
2329 li r0,0
Paul Mackerras19ccb762011-07-23 17:42:46 +10002330 stb r0,VCPU_PRODDED(r3)
2331 sync /* order testing prodded vs. clearing ceded */
2332 stb r0,VCPU_CEDED(r3)
2333 li r3,H_SUCCESS
2334 blr
2335
2336 /* we've ceded but we want to give control to the host */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002337kvm_cede_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +11002338 ld r9, HSTATE_KVM_VCPU(r13)
Nicholas Pigginfae5c9f2021-05-28 19:07:52 +10002339 b guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002340
Paul Mackerras884dfb72019-02-21 13:38:49 +11002341 /* Try to do machine check recovery in real mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002342machine_check_realmode:
2343 mr r3, r9 /* get vcpu pointer */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002344 bl kvmppc_realmode_machine_check
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002345 nop
Paul Mackerras884dfb72019-02-21 13:38:49 +11002346 /* all machine checks go to virtual mode for further handling */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002347 ld r9, HSTATE_KVM_VCPU(r13)
2348 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
Paul Mackerras884dfb72019-02-21 13:38:49 +11002349 b guest_exit_cont
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002350
Paul Mackerrasde56a942011-06-29 00:21:34 +00002351/*
Paul Mackerrasdf709a22018-10-08 16:30:52 +11002352 * Call C code to handle a HMI in real mode.
2353 * Only the primary thread does the call, secondary threads are handled
2354 * by calling hmi_exception_realmode() after kvmppc_hv_entry returns.
2355 * r9 points to the vcpu on entry
2356 */
2357hmi_realmode:
2358 lbz r0, HSTATE_PTID(r13)
2359 cmpwi r0, 0
2360 bne guest_exit_cont
2361 bl kvmppc_realmode_hmi_handler
2362 ld r9, HSTATE_KVM_VCPU(r13)
2363 li r12, BOOK3S_INTERRUPT_HMI
2364 b guest_exit_cont
2365
2366/*
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002367 * Check the reason we woke from nap, and take appropriate action.
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002368 * Returns (in r3):
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002369 * 0 if nothing needs to be done
2370 * 1 if something happened that needs to be handled by the host
Paul Mackerras66feed62015-03-28 14:21:12 +11002371 * -1 if there was a guest wakeup (IPI or msgsnd)
Suresh Warriere3c13e52016-08-19 15:35:51 +10002372 * -2 if we handled a PCI passthrough interrupt (returned by
2373 * kvmppc_read_intr only)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002374 *
2375 * Also sets r12 to the interrupt vector for any interrupt that needs
2376 * to be handled now by the host (0x500 for external interrupt), or zero.
Suresh Warrier37f55d32016-08-19 15:35:46 +10002377 * Modifies all volatile registers (since it may call a C function).
2378 * This routine calls kvmppc_read_intr, a C function, if an external
2379 * interrupt is pending.
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002380 */
2381kvmppc_check_wake_reason:
2382 mfspr r6, SPRN_SRR1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002383BEGIN_FTR_SECTION
2384 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2385FTR_SECTION_ELSE
2386 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2387ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2388 cmpwi r6, 8 /* was it an external interrupt? */
Suresh Warrier37f55d32016-08-19 15:35:46 +10002389 beq 7f /* if so, see what it was */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002390 li r3, 0
2391 li r12, 0
2392 cmpwi r6, 6 /* was it the decrementer? */
2393 beq 0f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002394BEGIN_FTR_SECTION
2395 cmpwi r6, 5 /* privileged doorbell? */
2396 beq 0f
Paul Mackerras5d00f662014-01-08 21:25:28 +11002397 cmpwi r6, 3 /* hypervisor doorbell? */
2398 beq 3f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002399END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302400 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2401 beq 4f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002402 li r3, 1 /* anything else, return 1 */
24030: blr
2404
Paul Mackerras5d00f662014-01-08 21:25:28 +11002405 /* hypervisor doorbell */
24063: li r12, BOOK3S_INTERRUPT_H_DOORBELL
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05302407
2408 /*
2409 * Clear the doorbell as we will invoke the handler
2410 * explicitly in the guest exit path.
2411 */
2412 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2413 PPC_MSGCLR(6)
Paul Mackerras66feed62015-03-28 14:21:12 +11002414 /* see if it's a host IPI */
Paul Mackerras5d00f662014-01-08 21:25:28 +11002415 li r3, 1
Paul Mackerras66feed62015-03-28 14:21:12 +11002416 lbz r0, HSTATE_HOST_IPI(r13)
2417 cmpwi r0, 0
2418 bnelr
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05302419 /* if not, return -1 */
Paul Mackerras66feed62015-03-28 14:21:12 +11002420 li r3, -1
Paul Mackerras5d00f662014-01-08 21:25:28 +11002421 blr
2422
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302423 /* Woken up due to Hypervisor maintenance interrupt */
24244: li r12, BOOK3S_INTERRUPT_HMI
2425 li r3, 1
2426 blr
2427
Suresh Warrier37f55d32016-08-19 15:35:46 +10002428 /* external interrupt - create a stack frame so we can call C */
24297: mflr r0
2430 std r0, PPC_LR_STKOFF(r1)
2431 stdu r1, -PPC_MIN_STKFRM(r1)
2432 bl kvmppc_read_intr
2433 nop
2434 li r12, BOOK3S_INTERRUPT_EXTERNAL
Suresh Warrierf7af5202016-08-19 15:35:52 +10002435 cmpdi r3, 1
2436 ble 1f
2437
2438 /*
2439 * Return code of 2 means PCI passthrough interrupt, but
2440 * we need to return back to host to complete handling the
2441 * interrupt. Trap reason is expected in r12 by guest
2442 * exit code.
2443 */
2444 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
24451:
Suresh Warrier37f55d32016-08-19 15:35:46 +10002446 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
2447 addi r1, r1, PPC_MIN_STKFRM
2448 mtlr r0
2449 blr
Paul Mackerrasde56a942011-06-29 00:21:34 +00002450
2451/*
2452 * Save away FP, VMX and VSX registers.
2453 * r3 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002454 * N.B. r30 and r31 are volatile across this function,
2455 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002456 */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002457kvmppc_save_fp:
2458 mflr r30
2459 mr r31,r3
Paul Mackerras89436332012-03-02 01:38:23 +00002460 mfmsr r5
2461 ori r8,r5,MSR_FP
Paul Mackerrasde56a942011-06-29 00:21:34 +00002462#ifdef CONFIG_ALTIVEC
2463BEGIN_FTR_SECTION
2464 oris r8,r8,MSR_VEC@h
2465END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2466#endif
2467#ifdef CONFIG_VSX
2468BEGIN_FTR_SECTION
2469 oris r8,r8,MSR_VSX@h
2470END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2471#endif
2472 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002473 addi r3,r3,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002474 bl store_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002475#ifdef CONFIG_ALTIVEC
2476BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002477 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002478 bl store_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002479END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2480#endif
2481 mfspr r6,SPRN_VRSAVE
Paul Mackerrase724f082014-03-13 20:02:48 +11002482 stw r6,VCPU_VRSAVE(r31)
Paul Mackerras595e4f72013-10-15 20:43:04 +11002483 mtlr r30
Paul Mackerrasde56a942011-06-29 00:21:34 +00002484 blr
2485
2486/*
2487 * Load up FP, VMX and VSX registers
2488 * r4 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002489 * N.B. r30 and r31 are volatile across this function,
2490 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002491 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002492kvmppc_load_fp:
Paul Mackerras595e4f72013-10-15 20:43:04 +11002493 mflr r30
2494 mr r31,r4
Paul Mackerrasde56a942011-06-29 00:21:34 +00002495 mfmsr r9
2496 ori r8,r9,MSR_FP
2497#ifdef CONFIG_ALTIVEC
2498BEGIN_FTR_SECTION
2499 oris r8,r8,MSR_VEC@h
2500END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2501#endif
2502#ifdef CONFIG_VSX
2503BEGIN_FTR_SECTION
2504 oris r8,r8,MSR_VSX@h
2505END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2506#endif
2507 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002508 addi r3,r4,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002509 bl load_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002510#ifdef CONFIG_ALTIVEC
2511BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002512 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002513 bl load_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002514END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2515#endif
Paul Mackerrase724f082014-03-13 20:02:48 +11002516 lwz r7,VCPU_VRSAVE(r31)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002517 mtspr SPRN_VRSAVE,r7
Paul Mackerras595e4f72013-10-15 20:43:04 +11002518 mtlr r30
2519 mr r4,r31
Paul Mackerrasde56a942011-06-29 00:21:34 +00002520 blr
Paul Mackerras44a3add2013-10-04 21:45:04 +10002521
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002522#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2523/*
2524 * Save transactional state and TM-related registers.
Simon Guo6f597c62018-05-23 15:01:48 +08002525 * Called with r3 pointing to the vcpu struct and r4 containing
2526 * the guest MSR value.
Paul Mackerras7854f752018-10-08 16:30:53 +11002527 * r5 is non-zero iff non-volatile register state needs to be maintained.
2528 * If r5 == 0, this can modify all checkpointed registers, but
Simon Guo6f597c62018-05-23 15:01:48 +08002529 * restores r1 and r2 before exit.
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002530 */
Paul Mackerras7854f752018-10-08 16:30:53 +11002531_GLOBAL_TOC(kvmppc_save_tm_hv)
2532EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv)
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002533 /* See if we need to handle fake suspend mode */
2534BEGIN_FTR_SECTION
Simon Guocaa3be92018-05-23 15:01:50 +08002535 b __kvmppc_save_tm
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002536END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
2537
2538 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
2539 cmpwi r0, 0
Simon Guocaa3be92018-05-23 15:01:50 +08002540 beq __kvmppc_save_tm
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002541
2542 /* The following code handles the fake_suspend = 1 case */
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002543 mflr r0
2544 std r0, PPC_LR_STKOFF(r1)
Nicholas Piggin267cdfa2021-09-08 20:17:18 +10002545 stdu r1, -TM_FRAME_SIZE(r1)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002546
2547 /* Turn on TM. */
2548 mfmsr r8
2549 li r0, 1
2550 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
2551 mtmsrd r8
2552
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11002553 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
2554 beq 4f
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002555BEGIN_FTR_SECTION
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11002556 bl pnv_power9_force_smt4_catch
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002557END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11002558 nop
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002559
Nicholas Piggin267cdfa2021-09-08 20:17:18 +10002560 /*
2561 * It's possible that treclaim. may modify registers, if we have lost
2562 * track of fake-suspend state in the guest due to it using rfscv.
2563 * Save and restore registers in case this occurs.
2564 */
2565 mfspr r3, SPRN_DSCR
2566 mfspr r4, SPRN_XER
2567 mfspr r5, SPRN_AMR
2568 /* SPRN_TAR would need to be saved here if the kernel ever used it */
2569 mfcr r12
2570 SAVE_NVGPRS(r1)
2571 SAVE_GPR(2, r1)
2572 SAVE_GPR(3, r1)
2573 SAVE_GPR(4, r1)
2574 SAVE_GPR(5, r1)
2575 stw r12, 8(r1)
2576 std r1, HSTATE_HOST_R1(r13)
2577
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002578 /* We have to treclaim here because that's the only way to do S->N */
2579 li r3, TM_CAUSE_KVM_RESCHED
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002580 TRECLAIM(R3)
2581
Nicholas Piggin267cdfa2021-09-08 20:17:18 +10002582 GET_PACA(r13)
2583 ld r1, HSTATE_HOST_R1(r13)
2584 REST_GPR(2, r1)
2585 REST_GPR(3, r1)
2586 REST_GPR(4, r1)
2587 REST_GPR(5, r1)
2588 lwz r12, 8(r1)
2589 REST_NVGPRS(r1)
2590 mtspr SPRN_DSCR, r3
2591 mtspr SPRN_XER, r4
2592 mtspr SPRN_AMR, r5
2593 mtcr r12
2594 HMT_MEDIUM
2595
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002596 /*
2597 * We were in fake suspend, so we are not going to save the
2598 * register state as the guest checkpointed state (since
2599 * we already have it), therefore we can now use any volatile GPR.
Paul Mackerras7854f752018-10-08 16:30:53 +11002600 * In fact treclaim in fake suspend state doesn't modify
2601 * any registers.
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002602 */
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002603
Paul Mackerras7854f752018-10-08 16:30:53 +11002604BEGIN_FTR_SECTION
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11002605 bl pnv_power9_force_smt4_release
Paul Mackerras7854f752018-10-08 16:30:53 +11002606END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11002607 nop
2608
26094:
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002610 mfspr r3, SPRN_PSSCR
2611 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */
2612 li r0, PSSCR_FAKE_SUSPEND
2613 andc r3, r3, r0
2614 mtspr SPRN_PSSCR, r3
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002615
Paul Mackerras681c6172018-03-21 21:32:03 +11002616 /* Don't save TEXASR, use value from last exit in real suspend state */
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002617 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002618 mfspr r5, SPRN_TFHAR
2619 mfspr r6, SPRN_TFIAR
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002620 std r5, VCPU_TFHAR(r9)
2621 std r6, VCPU_TFIAR(r9)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002622
Nicholas Piggin267cdfa2021-09-08 20:17:18 +10002623 addi r1, r1, TM_FRAME_SIZE
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002624 ld r0, PPC_LR_STKOFF(r1)
2625 mtlr r0
2626 blr
2627
2628/*
2629 * Restore transactional state and TM-related registers.
Simon Guo6f597c62018-05-23 15:01:48 +08002630 * Called with r3 pointing to the vcpu struct
2631 * and r4 containing the guest MSR value.
Paul Mackerras7854f752018-10-08 16:30:53 +11002632 * r5 is non-zero iff non-volatile register state needs to be maintained.
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002633 * This potentially modifies all checkpointed registers.
Simon Guo6f597c62018-05-23 15:01:48 +08002634 * It restores r1 and r2 from the PACA.
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002635 */
Paul Mackerras7854f752018-10-08 16:30:53 +11002636_GLOBAL_TOC(kvmppc_restore_tm_hv)
2637EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002638 /*
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002639 * If we are doing TM emulation for the guest on a POWER9 DD2,
2640 * then we don't actually do a trechkpt -- we either set up
2641 * fake-suspend mode, or emulate a TM rollback.
2642 */
2643BEGIN_FTR_SECTION
Simon Guocaa3be92018-05-23 15:01:50 +08002644 b __kvmppc_restore_tm
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002645END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
2646 mflr r0
2647 std r0, PPC_LR_STKOFF(r1)
2648
2649 li r0, 0
2650 stb r0, HSTATE_FAKE_SUSPEND(r13)
2651
2652 /* Turn on TM so we can restore TM SPRs */
2653 mfmsr r5
2654 li r0, 1
2655 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG
2656 mtmsrd r5
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002657
2658 /*
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002659 * The user may change these outside of a transaction, so they must
2660 * always be context switched.
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002661 */
Simon Guo6f597c62018-05-23 15:01:48 +08002662 ld r5, VCPU_TFHAR(r3)
2663 ld r6, VCPU_TFIAR(r3)
2664 ld r7, VCPU_TEXASR(r3)
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002665 mtspr SPRN_TFHAR, r5
2666 mtspr SPRN_TFIAR, r6
2667 mtspr SPRN_TEXASR, r7
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002668
Simon Guo6f597c62018-05-23 15:01:48 +08002669 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002670 beqlr /* TM not active in guest */
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002671
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002672 /* Make sure the failure summary is set */
2673 oris r7, r7, (TEXASR_FS)@h
2674 mtspr SPRN_TEXASR, r7
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002675
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002676 cmpwi r5, 1 /* check for suspended state */
2677 bgt 10f
2678 stb r5, HSTATE_FAKE_SUSPEND(r13)
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002679 b 9f /* and return */
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100268010: stdu r1, -PPC_MIN_STKFRM(r1)
2681 /* guest is in transactional state, so simulate rollback */
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002682 bl kvmhv_emulate_tm_rollback
2683 nop
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002684 addi r1, r1, PPC_MIN_STKFRM
Paul Mackerras7b0e8272018-05-30 20:07:52 +100026859: ld r0, PPC_LR_STKOFF(r1)
2686 mtlr r0
2687 blr
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002688#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002689
Paul Mackerras44a3add2013-10-04 21:45:04 +10002690/*
2691 * We come here if we get any exception or interrupt while we are
2692 * executing host real mode code while in guest MMU context.
Paul Mackerras857b99e2017-09-01 16:17:27 +10002693 * r12 is (CR << 32) | vector
2694 * r13 points to our PACA
2695 * r12 is saved in HSTATE_SCRATCH0(r13)
Paul Mackerras857b99e2017-09-01 16:17:27 +10002696 * r9 is saved in HSTATE_SCRATCH2(r13)
2697 * r13 is saved in HSPRG1
2698 * cfar is saved in HSTATE_CFAR(r13)
2699 * ppr is saved in HSTATE_PPR(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +10002700 */
2701kvmppc_bad_host_intr:
Paul Mackerras857b99e2017-09-01 16:17:27 +10002702 /*
2703 * Switch to the emergency stack, but start half-way down in
2704 * case we were already on it.
2705 */
2706 mr r9, r1
2707 std r1, PACAR1(r13)
2708 ld r1, PACAEMERGSP(r13)
2709 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
2710 std r9, 0(r1)
2711 std r0, GPR0(r1)
2712 std r9, GPR1(r1)
2713 std r2, GPR2(r1)
2714 SAVE_4GPRS(3, r1)
2715 SAVE_2GPRS(7, r1)
2716 srdi r0, r12, 32
2717 clrldi r12, r12, 32
2718 std r0, _CCR(r1)
2719 std r12, _TRAP(r1)
2720 andi. r0, r12, 2
2721 beq 1f
2722 mfspr r3, SPRN_HSRR0
2723 mfspr r4, SPRN_HSRR1
2724 mfspr r5, SPRN_HDAR
2725 mfspr r6, SPRN_HDSISR
2726 b 2f
27271: mfspr r3, SPRN_SRR0
2728 mfspr r4, SPRN_SRR1
2729 mfspr r5, SPRN_DAR
2730 mfspr r6, SPRN_DSISR
27312: std r3, _NIP(r1)
2732 std r4, _MSR(r1)
2733 std r5, _DAR(r1)
2734 std r6, _DSISR(r1)
2735 ld r9, HSTATE_SCRATCH2(r13)
2736 ld r12, HSTATE_SCRATCH0(r13)
2737 GET_SCRATCH0(r0)
2738 SAVE_4GPRS(9, r1)
2739 std r0, GPR13(r1)
2740 SAVE_NVGPRS(r1)
2741 ld r5, HSTATE_CFAR(r13)
2742 std r5, ORIG_GPR3(r1)
2743 mflr r3
Paul Mackerras857b99e2017-09-01 16:17:27 +10002744 mfctr r4
Paul Mackerras857b99e2017-09-01 16:17:27 +10002745 mfxer r5
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +05302746 lbz r6, PACAIRQSOFTMASK(r13)
Paul Mackerras857b99e2017-09-01 16:17:27 +10002747 std r3, _LINK(r1)
2748 std r4, _CTR(r1)
2749 std r5, _XER(r1)
2750 std r6, SOFTE(r1)
2751 ld r2, PACATOC(r13)
2752 LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
2753 std r3, STACK_FRAME_OVERHEAD-16(r1)
2754
2755 /*
Paul Mackerras857b99e2017-09-01 16:17:27 +10002756 * XXX On POWER7 and POWER8, we just spin here since we don't
2757 * know what the other threads are doing (and we don't want to
2758 * coordinate with them) - but at least we now have register state
2759 * in memory that we might be able to look at from another CPU.
2760 */
Paul Mackerras44a3add2013-10-04 21:45:04 +10002761 b .
Michael Neulinge4e38122014-03-25 10:47:02 +11002762
2763/*
2764 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2765 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2766 * r11 has the guest MSR value (in/out)
2767 * r9 has a vcpu pointer (in)
2768 * r0 is used as a scratch register
2769 */
2770kvmppc_msr_interrupt:
2771 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2772 cmpwi r0, 2 /* Check if we are in transactional state.. */
2773 ld r11, VCPU_INTR_MSR(r9)
2774 bne 1f
2775 /* ... if transactional, change to suspended */
2776 li r0, 1
27771: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2778 blr
Paul Mackerras9bc01a92014-05-26 19:48:40 +10002779
2780/*
Paul Mackerras41f4e632018-10-08 16:30:51 +11002781 * Load up guest PMU state. R3 points to the vcpu struct.
2782 */
2783_GLOBAL(kvmhv_load_guest_pmu)
2784EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu)
2785 mr r4, r3
2786 mflr r0
2787 li r3, 1
2788 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
2789 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
2790 isync
2791BEGIN_FTR_SECTION
2792 ld r3, VCPU_MMCR(r4)
2793 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
2794 cmpwi r5, MMCR0_PMAO
2795 beql kvmppc_fix_pmao
2796END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
2797 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
2798 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
2799 lwz r6, VCPU_PMC + 8(r4)
2800 lwz r7, VCPU_PMC + 12(r4)
2801 lwz r8, VCPU_PMC + 16(r4)
2802 lwz r9, VCPU_PMC + 20(r4)
2803 mtspr SPRN_PMC1, r3
2804 mtspr SPRN_PMC2, r5
2805 mtspr SPRN_PMC3, r6
2806 mtspr SPRN_PMC4, r7
2807 mtspr SPRN_PMC5, r8
2808 mtspr SPRN_PMC6, r9
2809 ld r3, VCPU_MMCR(r4)
2810 ld r5, VCPU_MMCR + 8(r4)
Athira Rajeev7e4a1452020-07-17 10:38:14 -04002811 ld r6, VCPU_MMCRA(r4)
Paul Mackerras41f4e632018-10-08 16:30:51 +11002812 ld r7, VCPU_SIAR(r4)
2813 ld r8, VCPU_SDAR(r4)
2814 mtspr SPRN_MMCR1, r5
2815 mtspr SPRN_MMCRA, r6
2816 mtspr SPRN_SIAR, r7
2817 mtspr SPRN_SDAR, r8
2818BEGIN_FTR_SECTION
Athira Rajeev5752fe02020-07-17 10:38:17 -04002819 ld r5, VCPU_MMCR + 24(r4)
2820 ld r6, VCPU_SIER + 8(r4)
2821 ld r7, VCPU_SIER + 16(r4)
2822 mtspr SPRN_MMCR3, r5
2823 mtspr SPRN_SIER2, r6
2824 mtspr SPRN_SIER3, r7
2825END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
2826BEGIN_FTR_SECTION
Athira Rajeev7e4a1452020-07-17 10:38:14 -04002827 ld r5, VCPU_MMCR + 16(r4)
Paul Mackerras41f4e632018-10-08 16:30:51 +11002828 ld r6, VCPU_SIER(r4)
2829 mtspr SPRN_MMCR2, r5
2830 mtspr SPRN_SIER, r6
2831BEGIN_FTR_SECTION_NESTED(96)
2832 lwz r7, VCPU_PMC + 24(r4)
2833 lwz r8, VCPU_PMC + 28(r4)
Athira Rajeev7e4a1452020-07-17 10:38:14 -04002834 ld r9, VCPU_MMCRS(r4)
Paul Mackerras41f4e632018-10-08 16:30:51 +11002835 mtspr SPRN_SPMC1, r7
2836 mtspr SPRN_SPMC2, r8
2837 mtspr SPRN_MMCRS, r9
2838END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
2839END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2840 mtspr SPRN_MMCR0, r3
2841 isync
2842 mtlr r0
2843 blr
2844
2845/*
2846 * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu.
2847 */
2848_GLOBAL(kvmhv_load_host_pmu)
2849EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu)
2850 mflr r0
2851 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
2852 cmpwi r4, 0
2853 beq 23f /* skip if not */
2854BEGIN_FTR_SECTION
2855 ld r3, HSTATE_MMCR0(r13)
2856 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
2857 cmpwi r4, MMCR0_PMAO
2858 beql kvmppc_fix_pmao
2859END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
2860 lwz r3, HSTATE_PMC1(r13)
2861 lwz r4, HSTATE_PMC2(r13)
2862 lwz r5, HSTATE_PMC3(r13)
2863 lwz r6, HSTATE_PMC4(r13)
2864 lwz r8, HSTATE_PMC5(r13)
2865 lwz r9, HSTATE_PMC6(r13)
2866 mtspr SPRN_PMC1, r3
2867 mtspr SPRN_PMC2, r4
2868 mtspr SPRN_PMC3, r5
2869 mtspr SPRN_PMC4, r6
2870 mtspr SPRN_PMC5, r8
2871 mtspr SPRN_PMC6, r9
2872 ld r3, HSTATE_MMCR0(r13)
2873 ld r4, HSTATE_MMCR1(r13)
2874 ld r5, HSTATE_MMCRA(r13)
2875 ld r6, HSTATE_SIAR(r13)
2876 ld r7, HSTATE_SDAR(r13)
2877 mtspr SPRN_MMCR1, r4
2878 mtspr SPRN_MMCRA, r5
2879 mtspr SPRN_SIAR, r6
2880 mtspr SPRN_SDAR, r7
2881BEGIN_FTR_SECTION
2882 ld r8, HSTATE_MMCR2(r13)
2883 ld r9, HSTATE_SIER(r13)
2884 mtspr SPRN_MMCR2, r8
2885 mtspr SPRN_SIER, r9
2886END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Athira Rajeev5752fe02020-07-17 10:38:17 -04002887BEGIN_FTR_SECTION
2888 ld r5, HSTATE_MMCR3(r13)
2889 ld r6, HSTATE_SIER2(r13)
2890 ld r7, HSTATE_SIER3(r13)
2891 mtspr SPRN_MMCR3, r5
2892 mtspr SPRN_SIER2, r6
2893 mtspr SPRN_SIER3, r7
2894END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
Paul Mackerras41f4e632018-10-08 16:30:51 +11002895 mtspr SPRN_MMCR0, r3
2896 isync
2897 mtlr r0
289823: blr
2899
2900/*
2901 * Save guest PMU state into the vcpu struct.
2902 * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA)
2903 */
2904_GLOBAL(kvmhv_save_guest_pmu)
2905EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu)
2906 mr r9, r3
2907 mr r8, r4
2908BEGIN_FTR_SECTION
2909 /*
2910 * POWER8 seems to have a hardware bug where setting
2911 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
2912 * when some counters are already negative doesn't seem
2913 * to cause a performance monitor alert (and hence interrupt).
2914 * The effect of this is that when saving the PMU state,
2915 * if there is no PMU alert pending when we read MMCR0
2916 * before freezing the counters, but one becomes pending
2917 * before we read the counters, we lose it.
2918 * To work around this, we need a way to freeze the counters
2919 * before reading MMCR0. Normally, freezing the counters
2920 * is done by writing MMCR0 (to set MMCR0[FC]) which
2921 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
2922 * we can also freeze the counters using MMCR2, by writing
2923 * 1s to all the counter freeze condition bits (there are
2924 * 9 bits each for 6 counters).
2925 */
2926 li r3, -1 /* set all freeze bits */
2927 clrrdi r3, r3, 10
2928 mfspr r10, SPRN_MMCR2
2929 mtspr SPRN_MMCR2, r3
2930 isync
2931END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2932 li r3, 1
2933 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
2934 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
2935 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
2936 mfspr r6, SPRN_MMCRA
2937 /* Clear MMCRA in order to disable SDAR updates */
2938 li r7, 0
2939 mtspr SPRN_MMCRA, r7
2940 isync
2941 cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */
2942 bne 21f
2943 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
2944 b 22f
294521: mfspr r5, SPRN_MMCR1
2946 mfspr r7, SPRN_SIAR
2947 mfspr r8, SPRN_SDAR
2948 std r4, VCPU_MMCR(r9)
2949 std r5, VCPU_MMCR + 8(r9)
Athira Rajeev7e4a1452020-07-17 10:38:14 -04002950 std r6, VCPU_MMCRA(r9)
Paul Mackerras41f4e632018-10-08 16:30:51 +11002951BEGIN_FTR_SECTION
Athira Rajeev7e4a1452020-07-17 10:38:14 -04002952 std r10, VCPU_MMCR + 16(r9)
Paul Mackerras41f4e632018-10-08 16:30:51 +11002953END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Athira Rajeev5752fe02020-07-17 10:38:17 -04002954BEGIN_FTR_SECTION
2955 mfspr r5, SPRN_MMCR3
2956 mfspr r6, SPRN_SIER2
2957 mfspr r7, SPRN_SIER3
2958 std r5, VCPU_MMCR + 24(r9)
2959 std r6, VCPU_SIER + 8(r9)
2960 std r7, VCPU_SIER + 16(r9)
2961END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
Paul Mackerras41f4e632018-10-08 16:30:51 +11002962 std r7, VCPU_SIAR(r9)
2963 std r8, VCPU_SDAR(r9)
2964 mfspr r3, SPRN_PMC1
2965 mfspr r4, SPRN_PMC2
2966 mfspr r5, SPRN_PMC3
2967 mfspr r6, SPRN_PMC4
2968 mfspr r7, SPRN_PMC5
2969 mfspr r8, SPRN_PMC6
2970 stw r3, VCPU_PMC(r9)
2971 stw r4, VCPU_PMC + 4(r9)
2972 stw r5, VCPU_PMC + 8(r9)
2973 stw r6, VCPU_PMC + 12(r9)
2974 stw r7, VCPU_PMC + 16(r9)
2975 stw r8, VCPU_PMC + 20(r9)
2976BEGIN_FTR_SECTION
2977 mfspr r5, SPRN_SIER
2978 std r5, VCPU_SIER(r9)
2979BEGIN_FTR_SECTION_NESTED(96)
2980 mfspr r6, SPRN_SPMC1
2981 mfspr r7, SPRN_SPMC2
2982 mfspr r8, SPRN_MMCRS
2983 stw r6, VCPU_PMC + 24(r9)
2984 stw r7, VCPU_PMC + 28(r9)
Athira Rajeev7e4a1452020-07-17 10:38:14 -04002985 std r8, VCPU_MMCRS(r9)
Paul Mackerras41f4e632018-10-08 16:30:51 +11002986 lis r4, 0x8000
2987 mtspr SPRN_MMCRS, r4
2988END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
2989END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
299022: blr
2991
2992/*
Paul Mackerras9bc01a92014-05-26 19:48:40 +10002993 * This works around a hardware bug on POWER8E processors, where
2994 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2995 * performance monitor interrupt. Instead, when we need to have
2996 * an interrupt pending, we have to arrange for a counter to overflow.
2997 */
2998kvmppc_fix_pmao:
2999 li r3, 0
3000 mtspr SPRN_MMCR2, r3
3001 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3002 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3003 mtspr SPRN_MMCR0, r3
3004 lis r3, 0x7fff
3005 ori r3, r3, 0xffff
3006 mtspr SPRN_PMC6, r3
3007 isync
3008 blr
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003009
3010#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3011/*
3012 * Start timing an activity
3013 * r3 = pointer to time accumulation struct, r4 = vcpu
3014 */
3015kvmhv_start_timing:
3016 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10003017 ld r6, VCORE_TB_OFFSET_APPL(r5)
3018 mftb r5
3019 subf r5, r6, r5 /* subtract current timebase offset */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003020 std r3, VCPU_CUR_ACTIVITY(r4)
3021 std r5, VCPU_ACTIVITY_START(r4)
3022 blr
3023
3024/*
3025 * Accumulate time to one activity and start another.
3026 * r3 = pointer to new time accumulation struct, r4 = vcpu
3027 */
3028kvmhv_accumulate_time:
3029 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10003030 ld r8, VCORE_TB_OFFSET_APPL(r5)
3031 ld r5, VCPU_CUR_ACTIVITY(r4)
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003032 ld r6, VCPU_ACTIVITY_START(r4)
3033 std r3, VCPU_CUR_ACTIVITY(r4)
3034 mftb r7
Paul Mackerras57b8daa2018-04-20 22:51:11 +10003035 subf r7, r8, r7 /* subtract current timebase offset */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003036 std r7, VCPU_ACTIVITY_START(r4)
3037 cmpdi r5, 0
3038 beqlr
3039 subf r3, r6, r7
3040 ld r8, TAS_SEQCOUNT(r5)
3041 cmpdi r8, 0
3042 addi r8, r8, 1
3043 std r8, TAS_SEQCOUNT(r5)
3044 lwsync
3045 ld r7, TAS_TOTAL(r5)
3046 add r7, r7, r3
3047 std r7, TAS_TOTAL(r5)
3048 ld r6, TAS_MIN(r5)
3049 ld r7, TAS_MAX(r5)
3050 beq 3f
3051 cmpd r3, r6
3052 bge 1f
30533: std r3, TAS_MIN(r5)
30541: cmpd r3, r7
3055 ble 2f
3056 std r3, TAS_MAX(r5)
30572: lwsync
3058 addi r8, r8, 1
3059 std r8, TAS_SEQCOUNT(r5)
3060 blr
3061#endif