blob: 30ece4cebaf53335abd06cffcdc7cb63fc68f5d7 [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100023#include <asm/mmu.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000024#include <asm/page.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100025#include <asm/ptrace.h>
26#include <asm/hvcall.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000027#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
Paul Mackerrasf0888f72012-02-03 00:54:17 +000029#include <asm/kvm_book3s_asm.h>
Aneesh Kumar K.Vf64e8082016-03-01 12:59:20 +053030#include <asm/book3s/64/mmu-hash.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110031#include <asm/tm.h>
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +053032#include <asm/opal.h>
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100033#include <asm/xive-regs.h>
Paul Mackerras857b99e2017-09-01 16:17:27 +100034#include <asm/thread_info.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110035
Paul Mackerras2f272462017-05-22 16:25:14 +100036/* Sign-extend HDEC if not on POWER9 */
37#define EXTEND_HDEC(reg) \
38BEGIN_FTR_SECTION; \
39 extsw reg, reg; \
40END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
41
Michael Neulinge4e38122014-03-25 10:47:02 +110042#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
Paul Mackerrasde56a942011-06-29 00:21:34 +000043
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110044/* Values in HSTATE_NAPPING(r13) */
45#define NAPPING_CEDE 1
46#define NAPPING_NOVCPU 2
47
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100048/* Stack frame offsets for kvmppc_hv_entry */
Paul Mackerras769377f2017-02-15 14:30:17 +110049#define SFS 160
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100050#define STACK_SLOT_TRAP (SFS-4)
51#define STACK_SLOT_TID (SFS-16)
52#define STACK_SLOT_PSSCR (SFS-24)
53#define STACK_SLOT_PID (SFS-32)
54#define STACK_SLOT_IAMR (SFS-40)
55#define STACK_SLOT_CIABR (SFS-48)
56#define STACK_SLOT_DAWR (SFS-56)
57#define STACK_SLOT_DAWRX (SFS-64)
Paul Mackerras769377f2017-02-15 14:30:17 +110058#define STACK_SLOT_HFSCR (SFS-72)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100059
Paul Mackerrasde56a942011-06-29 00:21:34 +000060/*
Paul Mackerras19ccb762011-07-23 17:42:46 +100061 * Call kvmppc_hv_entry in real mode.
Paul Mackerrasde56a942011-06-29 00:21:34 +000062 * Must be called with interrupts hard-disabled.
63 *
64 * Input Registers:
65 *
66 * LR = return address to continue at after eventually re-enabling MMU
67 */
Anton Blanchard6ed179b2014-06-12 18:16:53 +100068_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
Paul Mackerras218309b2013-09-06 13:23:44 +100069 mflr r0
70 std r0, PPC_LR_STKOFF(r1)
71 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +000072 mfmsr r10
Paul Mackerras8b24e692017-06-26 15:45:51 +100073 std r10, HSTATE_HOST_MSR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100074 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
Paul Mackerrasde56a942011-06-29 00:21:34 +000075 li r0,MSR_RI
76 andc r0,r10,r0
77 li r6,MSR_IR | MSR_DR
78 andc r6,r10,r6
79 mtmsrd r0,1 /* clear RI in MSR */
80 mtsrr0 r5
81 mtsrr1 r6
82 RFI
83
Paul Mackerras218309b2013-09-06 13:23:44 +100084kvmppc_call_hv_entry:
Paul Mackerrasc0101502017-10-19 14:11:23 +110085BEGIN_FTR_SECTION
86 /* On P9, do LPCR setting, if necessary */
87 ld r3, HSTATE_SPLIT_MODE(r13)
88 cmpdi r3, 0
89 beq 46f
90 lwz r4, KVM_SPLIT_DO_SET(r3)
91 cmpwi r4, 0
92 beq 46f
93 bl kvmhv_p9_set_lpcr
94 nop
9546:
96END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
97
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110098 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100099 bl kvmppc_hv_entry
100
101 /* Back from guest - restore host state and return to caller */
102
Michael Neulingeee7ff92014-01-08 21:25:19 +1100103BEGIN_FTR_SECTION
Paul Mackerras218309b2013-09-06 13:23:44 +1000104 /* Restore host DABR and DABRX */
105 ld r5,HSTATE_DABR(r13)
106 li r6,7
107 mtspr SPRN_DABR,r5
108 mtspr SPRN_DABRX,r6
Michael Neulingeee7ff92014-01-08 21:25:19 +1100109END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +1000110
111 /* Restore SPRG3 */
Scott Wood9d378df2014-03-10 17:29:38 -0500112 ld r3,PACA_SPRG_VDSO(r13)
113 mtspr SPRN_SPRG_VDSO_WRITE,r3
Paul Mackerras218309b2013-09-06 13:23:44 +1000114
Paul Mackerras218309b2013-09-06 13:23:44 +1000115 /* Reload the host's PMU registers */
116 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
117 lbz r4, LPPACA_PMCINUSE(r3)
118 cmpwi r4, 0
119 beq 23f /* skip if not */
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000120BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000121 ld r3, HSTATE_MMCR0(r13)
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000122 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
123 cmpwi r4, MMCR0_PMAO
124 beql kvmppc_fix_pmao
125END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000126 lwz r3, HSTATE_PMC1(r13)
127 lwz r4, HSTATE_PMC2(r13)
128 lwz r5, HSTATE_PMC3(r13)
129 lwz r6, HSTATE_PMC4(r13)
130 lwz r8, HSTATE_PMC5(r13)
131 lwz r9, HSTATE_PMC6(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000132 mtspr SPRN_PMC1, r3
133 mtspr SPRN_PMC2, r4
134 mtspr SPRN_PMC3, r5
135 mtspr SPRN_PMC4, r6
136 mtspr SPRN_PMC5, r8
137 mtspr SPRN_PMC6, r9
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000138 ld r3, HSTATE_MMCR0(r13)
139 ld r4, HSTATE_MMCR1(r13)
140 ld r5, HSTATE_MMCRA(r13)
141 ld r6, HSTATE_SIAR(r13)
142 ld r7, HSTATE_SDAR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000143 mtspr SPRN_MMCR1, r4
144 mtspr SPRN_MMCRA, r5
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100145 mtspr SPRN_SIAR, r6
146 mtspr SPRN_SDAR, r7
147BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000148 ld r8, HSTATE_MMCR2(r13)
149 ld r9, HSTATE_SIER(r13)
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100150 mtspr SPRN_MMCR2, r8
151 mtspr SPRN_SIER, r9
152END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +1000153 mtspr SPRN_MMCR0, r3
154 isync
15523:
156
157 /*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100158 * Reload DEC. HDEC interrupts were disabled when
159 * we reloaded the host's LPCR value.
160 */
161 ld r3, HSTATE_DECEXP(r13)
162 mftb r4
163 subf r4, r4, r3
164 mtspr SPRN_DEC, r4
165
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000166 /* hwthread_req may have got set by cede or no vcpu, so clear it */
167 li r0, 0
168 stb r0, HSTATE_HWTHREAD_REQ(r13)
169
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100170 /*
Aravinda Prasade20bbd32017-05-11 16:33:37 +0530171 * For external interrupts we need to call the Linux
172 * handler to process the interrupt. We do that by jumping
173 * to absolute address 0x500 for external interrupts.
174 * The [h]rfid at the end of the handler will return to
175 * the book3s_hv_interrupts.S code. For other interrupts
176 * we do the rfid to get back to the book3s_hv_interrupts.S
177 * code here.
Paul Mackerras218309b2013-09-06 13:23:44 +1000178 */
179 ld r8, 112+PPC_LR_STKOFF(r1)
180 addi r1, r1, 112
181 ld r7, HSTATE_HOST_MSR(r13)
182
Paul Mackerras8b24e692017-06-26 15:45:51 +1000183 /* Return the trap number on this thread as the return value */
184 mr r3, r12
185
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100186 /*
187 * If we came back from the guest via a relocation-on interrupt,
188 * we will be in virtual mode at this point, which makes it a
189 * little easier to get back to the caller.
190 */
191 mfmsr r0
192 andi. r0, r0, MSR_IR /* in real mode? */
193 bne .Lvirt_return
194
Paul Mackerras8b24e692017-06-26 15:45:51 +1000195 /* RFI into the highmem handler */
Paul Mackerras218309b2013-09-06 13:23:44 +1000196 mfmsr r6
197 li r0, MSR_RI
198 andc r6, r6, r0
199 mtmsrd r6, 1 /* Clear RI in MSR */
200 mtsrr0 r8
201 mtsrr1 r7
Paul Mackerras218309b2013-09-06 13:23:44 +1000202 RFI
203
Paul Mackerras8b24e692017-06-26 15:45:51 +1000204 /* Virtual-mode return */
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100205.Lvirt_return:
Paul Mackerras8b24e692017-06-26 15:45:51 +1000206 mtlr r8
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100207 blr
208
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100209kvmppc_primary_no_guest:
210 /* We handle this much like a ceded vcpu */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100211 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
Paul Mackerras2f272462017-05-22 16:25:14 +1000212 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
213 /* HDEC value came from DEC in the first place, it will fit */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100214 mfspr r3, SPRN_HDEC
215 mtspr SPRN_DEC, r3
Paul Mackerras6af27c82015-03-28 14:21:10 +1100216 /*
217 * Make sure the primary has finished the MMU switch.
218 * We should never get here on a secondary thread, but
219 * check it for robustness' sake.
220 */
221 ld r5, HSTATE_KVM_VCORE(r13)
22265: lbz r0, VCORE_IN_GUEST(r5)
223 cmpwi r0, 0
224 beq 65b
225 /* Set LPCR. */
226 ld r8,VCORE_LPCR(r5)
227 mtspr SPRN_LPCR,r8
228 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100229 /* set our bit in napping_threads */
230 ld r5, HSTATE_KVM_VCORE(r13)
231 lbz r7, HSTATE_PTID(r13)
232 li r0, 1
233 sld r0, r0, r7
234 addi r6, r5, VCORE_NAPPING_THREADS
2351: lwarx r3, 0, r6
236 or r3, r3, r0
237 stwcx. r3, 0, r6
238 bne 1b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100239 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100240 isync
241 li r12, 0
242 lwz r7, VCORE_ENTRY_EXIT(r5)
243 cmpwi r7, 0x100
244 bge kvm_novcpu_exit /* another thread already exiting */
245 li r3, NAPPING_NOVCPU
246 stb r3, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100247
Paul Mackerrasccc07772015-03-28 14:21:07 +1100248 li r3, 0 /* Don't wake on privileged (OS) doorbell */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100249 b kvm_do_nap
250
Suresh Warrier37f55d32016-08-19 15:35:46 +1000251/*
252 * kvm_novcpu_wakeup
253 * Entered from kvm_start_guest if kvm_hstate.napping is set
254 * to NAPPING_NOVCPU
255 * r2 = kernel TOC
256 * r13 = paca
257 */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100258kvm_novcpu_wakeup:
259 ld r1, HSTATE_HOST_R1(r13)
260 ld r5, HSTATE_KVM_VCORE(r13)
261 li r0, 0
262 stb r0, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100263
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100264 /* check the wake reason */
265 bl kvmppc_check_wake_reason
Paul Mackerras6af27c82015-03-28 14:21:10 +1100266
Suresh Warrier37f55d32016-08-19 15:35:46 +1000267 /*
268 * Restore volatile registers since we could have called
269 * a C routine in kvmppc_check_wake_reason.
270 * r5 = VCORE
271 */
272 ld r5, HSTATE_KVM_VCORE(r13)
273
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100274 /* see if any other thread is already exiting */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100275 lwz r0, VCORE_ENTRY_EXIT(r5)
276 cmpwi r0, 0x100
277 bge kvm_novcpu_exit
278
279 /* clear our bit in napping_threads */
280 lbz r7, HSTATE_PTID(r13)
281 li r0, 1
282 sld r0, r0, r7
283 addi r6, r5, VCORE_NAPPING_THREADS
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002844: lwarx r7, 0, r6
285 andc r7, r7, r0
286 stwcx. r7, 0, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100287 bne 4b
288
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100289 /* See if the wake reason means we need to exit */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100290 cmpdi r3, 0
291 bge kvm_novcpu_exit
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100292
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100293 /* See if our timeslice has expired (HDEC is negative) */
294 mfspr r0, SPRN_HDEC
Paul Mackerras2f272462017-05-22 16:25:14 +1000295 EXTEND_HDEC(r0)
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100296 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerras2f272462017-05-22 16:25:14 +1000297 cmpdi r0, 0
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100298 blt kvm_novcpu_exit
299
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100300 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
301 ld r4, HSTATE_KVM_VCPU(r13)
302 cmpdi r4, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100303 beq kvmppc_primary_no_guest
304
305#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
306 addi r3, r4, VCPU_TB_RMENTRY
307 bl kvmhv_start_timing
308#endif
309 b kvmppc_got_guest
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100310
311kvm_novcpu_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100312#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
313 ld r4, HSTATE_KVM_VCPU(r13)
314 cmpdi r4, 0
315 beq 13f
316 addi r3, r4, VCPU_TB_RMEXIT
317 bl kvmhv_accumulate_time
318#endif
Paul Mackerraseddb60f2015-03-28 14:21:11 +110031913: mr r3, r12
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000320 stw r12, STACK_SLOT_TRAP(r1)
Paul Mackerraseddb60f2015-03-28 14:21:11 +1100321 bl kvmhv_commence_exit
322 nop
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000323 lwz r12, STACK_SLOT_TRAP(r1)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100324 b kvmhv_switch_to_host
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100325
Paul Mackerras371fefd2011-06-29 00:23:08 +0000326/*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100327 * We come in here when wakened from nap mode.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000328 * Relocation is off and most register values are lost.
329 * r13 points to the PACA.
Nicholas Piggin9d292502017-06-13 23:05:51 +1000330 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000331 */
332 .globl kvm_start_guest
333kvm_start_guest:
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530334 /* Set runlatch bit the minute you wake up from nap */
Paul Mackerras1f09c3e2015-03-28 14:21:04 +1100335 mfspr r0, SPRN_CTRLF
336 ori r0, r0, 1
337 mtspr SPRN_CTRLT, r0
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530338
Nicholas Piggin9d292502017-06-13 23:05:51 +1000339 /*
340 * Could avoid this and pass it through in r3. For now,
341 * code expects it to be in SRR1.
342 */
343 mtspr SPRN_SRR1,r3
344
Paul Mackerras19ccb762011-07-23 17:42:46 +1000345 ld r2,PACATOC(r13)
346
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000347 li r0,KVM_HWTHREAD_IN_KVM
348 stb r0,HSTATE_HWTHREAD_STATE(r13)
349
350 /* NV GPR values from power7_idle() will no longer be valid */
351 li r0,1
352 stb r0,PACA_NAPSTATELOST(r13)
353
Paul Mackerras4619ac82013-04-17 20:31:41 +0000354 /* were we napping due to cede? */
355 lbz r0,HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100356 cmpwi r0,NAPPING_CEDE
357 beq kvm_end_cede
358 cmpwi r0,NAPPING_NOVCPU
359 beq kvm_novcpu_wakeup
360
361 ld r1,PACAEMERGSP(r13)
362 subi r1,r1,STACK_FRAME_OVERHEAD
Paul Mackerras4619ac82013-04-17 20:31:41 +0000363
364 /*
365 * We weren't napping due to cede, so this must be a secondary
366 * thread being woken up to run a guest, or being woken up due
367 * to a stray IPI. (Or due to some machine check or hypervisor
368 * maintenance interrupt while the core is in KVM.)
369 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000370
371 /* Check the wake reason in SRR1 to see why we got here */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100372 bl kvmppc_check_wake_reason
Suresh Warrier37f55d32016-08-19 15:35:46 +1000373 /*
374 * kvmppc_check_wake_reason could invoke a C routine, but we
375 * have no volatile registers to restore when we return.
376 */
377
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100378 cmpdi r3, 0
379 bge kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000380
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000381 /* get vcore pointer, NULL if we have nothing to run */
382 ld r5,HSTATE_KVM_VCORE(r13)
383 cmpdi r5,0
384 /* if we have no vcore to run, go back to sleep */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000385 beq kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000386
Paul Mackerras56548fc2014-12-03 14:48:40 +1100387kvm_secondary_got_guest:
388
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100389 /* Set HSTATE_DSCR(r13) to something sensible */
Anshuman Khandual1db36522015-05-21 12:13:03 +0530390 ld r6, PACA_DSCR_DEFAULT(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100391 std r6, HSTATE_DSCR(r13)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000392
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000393 /* On thread 0 of a subcore, set HDEC to max */
394 lbz r4, HSTATE_PTID(r13)
395 cmpwi r4, 0
396 bne 63f
Paul Mackerras2f272462017-05-22 16:25:14 +1000397 LOAD_REG_ADDR(r6, decrementer_max)
398 ld r6, 0(r6)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000399 mtspr SPRN_HDEC, r6
400 /* and set per-LPAR registers, if doing dynamic micro-threading */
401 ld r6, HSTATE_SPLIT_MODE(r13)
402 cmpdi r6, 0
403 beq 63f
Paul Mackerrasc0101502017-10-19 14:11:23 +1100404BEGIN_FTR_SECTION
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000405 ld r0, KVM_SPLIT_RPR(r6)
406 mtspr SPRN_RPR, r0
407 ld r0, KVM_SPLIT_PMMAR(r6)
408 mtspr SPRN_PMMAR, r0
409 ld r0, KVM_SPLIT_LDBAR(r6)
410 mtspr SPRN_LDBAR, r0
411 isync
Paul Mackerrasc0101502017-10-19 14:11:23 +1100412FTR_SECTION_ELSE
413 /* On P9 we use the split_info for coordinating LPCR changes */
414 lwz r4, KVM_SPLIT_DO_SET(r6)
415 cmpwi r4, 0
416 beq 63f
417 mr r3, r6
418 bl kvmhv_p9_set_lpcr
419 nop
420ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasb4deba52015-07-02 20:38:16 +100042163:
422 /* Order load of vcpu after load of vcore */
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100423 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000424 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100425 bl kvmppc_hv_entry
Paul Mackerras218309b2013-09-06 13:23:44 +1000426
427 /* Back from the guest, go back to nap */
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000428 /* Clear our vcpu and vcore pointers so we don't come back in early */
Paul Mackerras218309b2013-09-06 13:23:44 +1000429 li r0, 0
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000430 std r0, HSTATE_KVM_VCPU(r13)
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100431 /*
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000432 * Once we clear HSTATE_KVM_VCORE(r13), the code in
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100433 * kvmppc_run_core() is going to assume that all our vcpu
434 * state is visible in memory. This lwsync makes sure
435 * that that is true.
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100436 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000437 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000438 std r0, HSTATE_KVM_VCORE(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000439
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530440 /*
441 * All secondaries exiting guest will fall through this path.
442 * Before proceeding, just check for HMI interrupt and
443 * invoke opal hmi handler. By now we are sure that the
444 * primary thread on this core/subcore has already made partition
445 * switch/TB resync and we are good to call opal hmi handler.
446 */
447 cmpwi r12, BOOK3S_INTERRUPT_HMI
448 bne kvm_no_guest
449
450 li r3,0 /* NULL argument */
451 bl hmi_exception_realmode
Paul Mackerras56548fc2014-12-03 14:48:40 +1100452/*
453 * At this point we have finished executing in the guest.
454 * We need to wait for hwthread_req to become zero, since
455 * we may not turn on the MMU while hwthread_req is non-zero.
456 * While waiting we also need to check if we get given a vcpu to run.
457 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000458kvm_no_guest:
Paul Mackerras56548fc2014-12-03 14:48:40 +1100459 lbz r3, HSTATE_HWTHREAD_REQ(r13)
460 cmpwi r3, 0
461 bne 53f
462 HMT_MEDIUM
463 li r0, KVM_HWTHREAD_IN_KERNEL
Paul Mackerras218309b2013-09-06 13:23:44 +1000464 stb r0, HSTATE_HWTHREAD_STATE(r13)
Paul Mackerras56548fc2014-12-03 14:48:40 +1100465 /* need to recheck hwthread_req after a barrier, to avoid race */
466 sync
467 lbz r3, HSTATE_HWTHREAD_REQ(r13)
468 cmpwi r3, 0
469 bne 54f
470/*
Shreyas B. Prabhu5fa6b6b2016-07-08 11:50:46 +0530471 * We jump to pnv_wakeup_loss, which will return to the caller
Paul Mackerras56548fc2014-12-03 14:48:40 +1100472 * of power7_nap in the powernv cpu offline loop. The value we
Nicholas Piggin9d292502017-06-13 23:05:51 +1000473 * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss
474 * requires SRR1 in r12.
Paul Mackerras56548fc2014-12-03 14:48:40 +1100475 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000476 li r3, LPCR_PECE0
477 mfspr r4, SPRN_LPCR
478 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
479 mtspr SPRN_LPCR, r4
Paul Mackerras56548fc2014-12-03 14:48:40 +1100480 li r3, 0
Nicholas Piggin9d292502017-06-13 23:05:51 +1000481 mfspr r12,SPRN_SRR1
Shreyas B. Prabhu5fa6b6b2016-07-08 11:50:46 +0530482 b pnv_wakeup_loss
Paul Mackerras56548fc2014-12-03 14:48:40 +1100483
48453: HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000485 ld r5, HSTATE_KVM_VCORE(r13)
486 cmpdi r5, 0
487 bne 60f
488 ld r3, HSTATE_SPLIT_MODE(r13)
489 cmpdi r3, 0
490 beq kvm_no_guest
Paul Mackerrasc0101502017-10-19 14:11:23 +1100491 lwz r0, KVM_SPLIT_DO_SET(r3)
492 cmpwi r0, 0
493 bne kvmhv_do_set
494 lwz r0, KVM_SPLIT_DO_RESTORE(r3)
495 cmpwi r0, 0
496 bne kvmhv_do_restore
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000497 lbz r0, KVM_SPLIT_DO_NAP(r3)
498 cmpwi r0, 0
Paul Mackerras56548fc2014-12-03 14:48:40 +1100499 beq kvm_no_guest
500 HMT_MEDIUM
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000501 b kvm_unsplit_nap
50260: HMT_MEDIUM
Paul Mackerras56548fc2014-12-03 14:48:40 +1100503 b kvm_secondary_got_guest
504
50554: li r0, KVM_HWTHREAD_IN_KVM
506 stb r0, HSTATE_HWTHREAD_STATE(r13)
507 b kvm_no_guest
Paul Mackerras218309b2013-09-06 13:23:44 +1000508
Paul Mackerrasc0101502017-10-19 14:11:23 +1100509kvmhv_do_set:
510 /* Set LPCR, LPIDR etc. on P9 */
511 HMT_MEDIUM
512 bl kvmhv_p9_set_lpcr
513 nop
514 b kvm_no_guest
515
516kvmhv_do_restore:
517 HMT_MEDIUM
518 bl kvmhv_p9_restore_lpcr
519 nop
520 b kvm_no_guest
521
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000522/*
523 * Here the primary thread is trying to return the core to
524 * whole-core mode, so we need to nap.
525 */
526kvm_unsplit_nap:
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530527 /*
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530528 * When secondaries are napping in kvm_unsplit_nap() with
529 * hwthread_req = 1, HMI goes ignored even though subcores are
530 * already exited the guest. Hence HMI keeps waking up secondaries
531 * from nap in a loop and secondaries always go back to nap since
532 * no vcore is assigned to them. This makes impossible for primary
533 * thread to get hold of secondary threads resulting into a soft
534 * lockup in KVM path.
535 *
536 * Let us check if HMI is pending and handle it before we go to nap.
537 */
538 cmpwi r12, BOOK3S_INTERRUPT_HMI
539 bne 55f
540 li r3, 0 /* NULL argument */
541 bl hmi_exception_realmode
54255:
543 /*
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530544 * Ensure that secondary doesn't nap when it has
545 * its vcore pointer set.
546 */
547 sync /* matches smp_mb() before setting split_info.do_nap */
548 ld r0, HSTATE_KVM_VCORE(r13)
549 cmpdi r0, 0
550 bne kvm_no_guest
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000551 /* clear any pending message */
552BEGIN_FTR_SECTION
553 lis r6, (PPC_DBELL_SERVER << (63-36))@h
554 PPC_MSGCLR(6)
555END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
556 /* Set kvm_split_mode.napped[tid] = 1 */
557 ld r3, HSTATE_SPLIT_MODE(r13)
558 li r0, 1
Paul Mackerrasc0101502017-10-19 14:11:23 +1100559 lbz r4, HSTATE_TID(r13)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000560 addi r4, r4, KVM_SPLIT_NAPPED
561 stbx r0, r3, r4
562 /* Check the do_nap flag again after setting napped[] */
563 sync
564 lbz r0, KVM_SPLIT_DO_NAP(r3)
565 cmpwi r0, 0
566 beq 57f
567 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
Paul Mackerrasbf53c882016-11-18 14:34:07 +1100568 mfspr r5, SPRN_LPCR
569 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
570 b kvm_nap_sequence
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000571
57257: li r0, 0
573 stbx r0, r3, r4
574 b kvm_no_guest
575
Paul Mackerras218309b2013-09-06 13:23:44 +1000576/******************************************************************************
577 * *
578 * Entry code *
579 * *
580 *****************************************************************************/
581
Paul Mackerrasde56a942011-06-29 00:21:34 +0000582.global kvmppc_hv_entry
583kvmppc_hv_entry:
584
585 /* Required state:
586 *
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100587 * R4 = vcpu pointer (or NULL)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000588 * MSR = ~IR|DR
589 * R13 = PACA
590 * R1 = host R1
Michael Neuling06a29e42014-08-19 14:59:30 +1000591 * R2 = TOC
Paul Mackerrasde56a942011-06-29 00:21:34 +0000592 * all other volatile GPRS = free
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100593 * Does not preserve non-volatile GPRs or CR fields
Paul Mackerrasde56a942011-06-29 00:21:34 +0000594 */
595 mflr r0
Paul Mackerras218309b2013-09-06 13:23:44 +1000596 std r0, PPC_LR_STKOFF(r1)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000597 stdu r1, -SFS(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000598
Paul Mackerrasde56a942011-06-29 00:21:34 +0000599 /* Save R1 in the PACA */
600 std r1, HSTATE_HOST_R1(r13)
601
Paul Mackerras44a3add2013-10-04 21:45:04 +1000602 li r6, KVM_GUEST_MODE_HOST_HV
603 stb r6, HSTATE_IN_GUEST(r13)
604
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100605#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
606 /* Store initial timestamp */
607 cmpdi r4, 0
608 beq 1f
609 addi r3, r4, VCPU_TB_RMENTRY
610 bl kvmhv_start_timing
6111:
612#endif
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100613
614 /* Use cr7 as an indication of radix mode */
615 ld r5, HSTATE_KVM_VCORE(r13)
616 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
617 lbz r0, KVM_RADIX(r9)
618 cmpwi cr7, r0, 0
619
Paul Mackerras9e368f22011-06-29 00:40:08 +0000620 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100621 * POWER7/POWER8 host -> guest partition switch code.
Paul Mackerras9e368f22011-06-29 00:40:08 +0000622 * We don't have to lock against concurrent tlbies,
623 * but we do have to coordinate across hardware threads.
624 */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100625 /* Set bit in entry map iff exit map is zero. */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100626 li r7, 1
627 lbz r6, HSTATE_PTID(r13)
628 sld r7, r7, r6
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100629 addi r8, r5, VCORE_ENTRY_EXIT
63021: lwarx r3, 0, r8
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100631 cmpwi r3, 0x100 /* any threads starting to exit? */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000632 bge secondary_too_late /* if so we're too late to the party */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100633 or r3, r3, r7
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100634 stwcx. r3, 0, r8
Paul Mackerras371fefd2011-06-29 00:23:08 +0000635 bne 21b
636
637 /* Primary thread switches to guest partition. */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000638 cmpwi r6,0
Paul Mackerras6af27c82015-03-28 14:21:10 +1100639 bne 10f
Paul Mackerrasde56a942011-06-29 00:21:34 +0000640 lwz r7,KVM_LPID(r9)
Paul Mackerras7a840842016-11-16 22:25:20 +1100641BEGIN_FTR_SECTION
642 ld r6,KVM_SDR1(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000643 li r0,LPID_RSVD /* switch to reserved LPID */
644 mtspr SPRN_LPID,r0
645 ptesync
646 mtspr SPRN_SDR1,r6 /* switch to partition page table */
Paul Mackerras7a840842016-11-16 22:25:20 +1100647END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000648 mtspr SPRN_LPID,r7
649 isync
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000650
651 /* See if we need to flush the TLB */
652 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100653BEGIN_FTR_SECTION
654 /*
655 * On POWER9, individual threads can come in here, but the
656 * TLB is shared between the 4 threads in a core, hence
657 * invalidating on one thread invalidates for all.
658 * Thus we make all 4 threads use the same bit here.
659 */
660 clrrdi r6,r6,2
661END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000662 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
663 srdi r6,r6,6 /* doubleword number */
664 sldi r6,r6,3 /* address offset */
665 add r6,r6,r9
666 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100667 li r8,1
668 sld r8,r8,r7
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000669 ld r7,0(r6)
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100670 and. r7,r7,r8
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000671 beq 22f
Paul Mackerrasca252052014-01-08 21:25:22 +1100672 /* Flush the TLB of any entries for this LPID */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100673 lwz r0,KVM_TLB_SETS(r9)
674 mtctr r0
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000675 li r7,0x800 /* IS field = 0b10 */
676 ptesync
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100677 li r0,0 /* RS for P9 version of tlbiel */
678 bne cr7, 29f
67928: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000680 addi r7,r7,0x1000
681 bdnz 28b
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100682 b 30f
68329: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */
684 addi r7,r7,0x1000
685 bdnz 29b
68630: ptesync
68723: ldarx r7,0,r6 /* clear the bit after TLB flushed */
688 andc r7,r7,r8
689 stdcx. r7,0,r6
690 bne 23b
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000691
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000692 /* Add timebase offset onto timebase */
69322: ld r8,VCORE_TB_OFFSET(r5)
694 cmpdi r8,0
695 beq 37f
696 mftb r6 /* current host timebase */
697 add r8,r8,r6
698 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
699 mftb r7 /* check if lower 24 bits overflowed */
700 clrldi r6,r6,40
701 clrldi r7,r7,40
702 cmpld r7,r6
703 bge 37f
704 addis r8,r8,0x100 /* if so, increment upper 40 bits */
705 mtspr SPRN_TBU40,r8
706
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000707 /* Load guest PCR value to select appropriate compat mode */
70837: ld r7, VCORE_PCR(r5)
709 cmpdi r7, 0
710 beq 38f
711 mtspr SPRN_PCR, r7
71238:
Michael Neulingb005255e2014-01-08 21:25:21 +1100713
714BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000715 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +1100716 ld r8, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000717 ld r7, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +1100718 mtspr SPRN_DPDES, r8
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000719 mtspr SPRN_VTB, r7
Michael Neulingb005255e2014-01-08 21:25:21 +1100720END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
721
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530722 /* Mark the subcore state as inside guest */
723 bl kvmppc_subcore_enter_guest
724 nop
725 ld r5, HSTATE_KVM_VCORE(r13)
726 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000727 li r0,1
Paul Mackerras371fefd2011-06-29 00:23:08 +0000728 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000729
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100730 /* Do we have a guest vcpu to run? */
Paul Mackerras6af27c82015-03-28 14:21:10 +110073110: cmpdi r4, 0
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100732 beq kvmppc_primary_no_guest
733kvmppc_got_guest:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100734 /* Increment yield count if they have a VPA */
735 ld r3, VCPU_VPA(r4)
736 cmpdi r3, 0
737 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +0200738 li r6, LPPACA_YIELDCOUNT
739 LWZX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100740 addi r5, r5, 1
Alexander Graf0865a582014-06-11 10:36:17 +0200741 STWX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100742 li r6, 1
743 stb r6, VCPU_VPA_DIRTY(r4)
74425:
745
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100746 /* Save purr/spurr */
747 mfspr r5,SPRN_PURR
748 mfspr r6,SPRN_SPURR
749 std r5,HSTATE_PURR(r13)
750 std r6,HSTATE_SPURR(r13)
751 ld r7,VCPU_PURR(r4)
752 ld r8,VCPU_SPURR(r4)
753 mtspr SPRN_PURR,r7
754 mtspr SPRN_SPURR,r8
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100755
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100756 /* Save host values of some registers */
757BEGIN_FTR_SECTION
758 mfspr r5, SPRN_TIDR
759 mfspr r6, SPRN_PSSCR
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100760 mfspr r7, SPRN_PID
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +1000761 mfspr r8, SPRN_IAMR
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100762 std r5, STACK_SLOT_TID(r1)
763 std r6, STACK_SLOT_PSSCR(r1)
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100764 std r7, STACK_SLOT_PID(r1)
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +1000765 std r8, STACK_SLOT_IAMR(r1)
Paul Mackerras769377f2017-02-15 14:30:17 +1100766 mfspr r5, SPRN_HFSCR
767 std r5, STACK_SLOT_HFSCR(r1)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100768END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000769BEGIN_FTR_SECTION
770 mfspr r5, SPRN_CIABR
771 mfspr r6, SPRN_DAWR
772 mfspr r7, SPRN_DAWRX
773 std r5, STACK_SLOT_CIABR(r1)
774 std r6, STACK_SLOT_DAWR(r1)
775 std r7, STACK_SLOT_DAWRX(r1)
776END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100777
Michael Neulingeee7ff92014-01-08 21:25:19 +1100778BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000779 /* Set partition DABR */
780 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
Paul Mackerras8563bf52014-01-08 21:25:29 +1100781 lwz r5,VCPU_DABRX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000782 ld r6,VCPU_DABR(r4)
783 mtspr SPRN_DABRX,r5
784 mtspr SPRN_DABR,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000785 isync
Michael Neulingeee7ff92014-01-08 21:25:19 +1100786END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000787
Michael Neulinge4e38122014-03-25 10:47:02 +1100788#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
789BEGIN_FTR_SECTION
Paul Mackerras67f8a8c2017-09-12 13:47:23 +1000790 /*
791 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
792 */
Paul Mackerrasf024ee02016-06-22 14:21:59 +1000793 bl kvmppc_restore_tm
794END_FTR_SECTION_IFSET(CPU_FTR_TM)
Michael Neulinge4e38122014-03-25 10:47:02 +1100795#endif
796
Paul Mackerrasde56a942011-06-29 00:21:34 +0000797 /* Load guest PMU registers */
798 /* R4 is live here (vcpu pointer) */
799 li r3, 1
800 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
801 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
802 isync
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000803BEGIN_FTR_SECTION
804 ld r3, VCPU_MMCR(r4)
805 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
806 cmpwi r5, MMCR0_PMAO
807 beql kvmppc_fix_pmao
808END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000809 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
810 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
811 lwz r6, VCPU_PMC + 8(r4)
812 lwz r7, VCPU_PMC + 12(r4)
813 lwz r8, VCPU_PMC + 16(r4)
814 lwz r9, VCPU_PMC + 20(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000815 mtspr SPRN_PMC1, r3
816 mtspr SPRN_PMC2, r5
817 mtspr SPRN_PMC3, r6
818 mtspr SPRN_PMC4, r7
819 mtspr SPRN_PMC5, r8
820 mtspr SPRN_PMC6, r9
Paul Mackerrasde56a942011-06-29 00:21:34 +0000821 ld r3, VCPU_MMCR(r4)
822 ld r5, VCPU_MMCR + 8(r4)
823 ld r6, VCPU_MMCR + 16(r4)
824 ld r7, VCPU_SIAR(r4)
825 ld r8, VCPU_SDAR(r4)
826 mtspr SPRN_MMCR1, r5
827 mtspr SPRN_MMCRA, r6
828 mtspr SPRN_SIAR, r7
829 mtspr SPRN_SDAR, r8
Michael Neulingb005255e2014-01-08 21:25:21 +1100830BEGIN_FTR_SECTION
831 ld r5, VCPU_MMCR + 24(r4)
832 ld r6, VCPU_SIER(r4)
Paul Mackerras83677f52016-11-16 22:33:27 +1100833 mtspr SPRN_MMCR2, r5
834 mtspr SPRN_SIER, r6
835BEGIN_FTR_SECTION_NESTED(96)
Michael Neulingb005255e2014-01-08 21:25:21 +1100836 lwz r7, VCPU_PMC + 24(r4)
837 lwz r8, VCPU_PMC + 28(r4)
838 ld r9, VCPU_MMCR + 32(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100839 mtspr SPRN_SPMC1, r7
840 mtspr SPRN_SPMC2, r8
841 mtspr SPRN_MMCRS, r9
Paul Mackerras83677f52016-11-16 22:33:27 +1100842END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
Michael Neulingb005255e2014-01-08 21:25:21 +1100843END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000844 mtspr SPRN_MMCR0, r3
845 isync
846
847 /* Load up FP, VMX and VSX registers */
848 bl kvmppc_load_fp
849
850 ld r14, VCPU_GPR(R14)(r4)
851 ld r15, VCPU_GPR(R15)(r4)
852 ld r16, VCPU_GPR(R16)(r4)
853 ld r17, VCPU_GPR(R17)(r4)
854 ld r18, VCPU_GPR(R18)(r4)
855 ld r19, VCPU_GPR(R19)(r4)
856 ld r20, VCPU_GPR(R20)(r4)
857 ld r21, VCPU_GPR(R21)(r4)
858 ld r22, VCPU_GPR(R22)(r4)
859 ld r23, VCPU_GPR(R23)(r4)
860 ld r24, VCPU_GPR(R24)(r4)
861 ld r25, VCPU_GPR(R25)(r4)
862 ld r26, VCPU_GPR(R26)(r4)
863 ld r27, VCPU_GPR(R27)(r4)
864 ld r28, VCPU_GPR(R28)(r4)
865 ld r29, VCPU_GPR(R29)(r4)
866 ld r30, VCPU_GPR(R30)(r4)
867 ld r31, VCPU_GPR(R31)(r4)
868
Paul Mackerrasde56a942011-06-29 00:21:34 +0000869 /* Switch DSCR to guest value */
870 ld r5, VCPU_DSCR(r4)
871 mtspr SPRN_DSCR, r5
Paul Mackerrasde56a942011-06-29 00:21:34 +0000872
Michael Neulingb005255e2014-01-08 21:25:21 +1100873BEGIN_FTR_SECTION
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100874 /* Skip next section on POWER7 */
Michael Neulingb005255e2014-01-08 21:25:21 +1100875 b 8f
876END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +1100877 /* Load up POWER8-specific registers */
878 ld r5, VCPU_IAMR(r4)
879 lwz r6, VCPU_PSPB(r4)
880 ld r7, VCPU_FSCR(r4)
881 mtspr SPRN_IAMR, r5
882 mtspr SPRN_PSPB, r6
883 mtspr SPRN_FSCR, r7
884 ld r5, VCPU_DAWR(r4)
885 ld r6, VCPU_DAWRX(r4)
886 ld r7, VCPU_CIABR(r4)
887 ld r8, VCPU_TAR(r4)
888 mtspr SPRN_DAWR, r5
889 mtspr SPRN_DAWRX, r6
890 mtspr SPRN_CIABR, r7
891 mtspr SPRN_TAR, r8
892 ld r5, VCPU_IC(r4)
Michael Neuling7b490412014-01-08 21:25:32 +1100893 ld r8, VCPU_EBBHR(r4)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000894 mtspr SPRN_IC, r5
Michael Neulingb005255e2014-01-08 21:25:21 +1100895 mtspr SPRN_EBBHR, r8
896 ld r5, VCPU_EBBRR(r4)
897 ld r6, VCPU_BESCR(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100898 lwz r7, VCPU_GUEST_PID(r4)
899 ld r8, VCPU_WORT(r4)
Paul Mackerras83677f52016-11-16 22:33:27 +1100900 mtspr SPRN_EBBRR, r5
901 mtspr SPRN_BESCR, r6
Michael Neulingb005255e2014-01-08 21:25:21 +1100902 mtspr SPRN_PID, r7
903 mtspr SPRN_WORT, r8
Paul Mackerras83677f52016-11-16 22:33:27 +1100904BEGIN_FTR_SECTION
Paul Mackerrasf11f6f72017-01-30 21:21:52 +1100905 PPC_INVALIDATE_ERAT
906END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
907BEGIN_FTR_SECTION
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100908 /* POWER8-only registers */
Paul Mackerras83677f52016-11-16 22:33:27 +1100909 ld r5, VCPU_TCSCR(r4)
910 ld r6, VCPU_ACOP(r4)
911 ld r7, VCPU_CSIGR(r4)
912 ld r8, VCPU_TACR(r4)
913 mtspr SPRN_TCSCR, r5
914 mtspr SPRN_ACOP, r6
915 mtspr SPRN_CSIGR, r7
916 mtspr SPRN_TACR, r8
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100917FTR_SECTION_ELSE
918 /* POWER9-only registers */
919 ld r5, VCPU_TID(r4)
920 ld r6, VCPU_PSSCR(r4)
921 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
Paul Mackerras769377f2017-02-15 14:30:17 +1100922 ld r7, VCPU_HFSCR(r4)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100923 mtspr SPRN_TIDR, r5
924 mtspr SPRN_PSSCR, r6
Paul Mackerras769377f2017-02-15 14:30:17 +1100925 mtspr SPRN_HFSCR, r7
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100926ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Michael Neulingb005255e2014-01-08 21:25:21 +11009278:
928
Paul Mackerrasde56a942011-06-29 00:21:34 +0000929 /*
930 * Set the decrementer to the guest decrementer.
931 */
932 ld r8,VCPU_DEC_EXPIRES(r4)
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +1100933 /* r8 is a host timebase value here, convert to guest TB */
934 ld r5,HSTATE_KVM_VCORE(r13)
935 ld r6,VCORE_TB_OFFSET(r5)
936 add r8,r8,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000937 mftb r7
938 subf r3,r7,r8
939 mtspr SPRN_DEC,r3
Paul Mackerrasde56a942011-06-29 00:21:34 +0000940
941 ld r5, VCPU_SPRG0(r4)
942 ld r6, VCPU_SPRG1(r4)
943 ld r7, VCPU_SPRG2(r4)
944 ld r8, VCPU_SPRG3(r4)
945 mtspr SPRN_SPRG0, r5
946 mtspr SPRN_SPRG1, r6
947 mtspr SPRN_SPRG2, r7
948 mtspr SPRN_SPRG3, r8
949
Paul Mackerrasde56a942011-06-29 00:21:34 +0000950 /* Load up DAR and DSISR */
951 ld r5, VCPU_DAR(r4)
952 lwz r6, VCPU_DSISR(r4)
953 mtspr SPRN_DAR, r5
954 mtspr SPRN_DSISR, r6
955
Paul Mackerrasde56a942011-06-29 00:21:34 +0000956 /* Restore AMR and UAMOR, set AMOR to all 1s */
957 ld r5,VCPU_AMR(r4)
958 ld r6,VCPU_UAMOR(r4)
959 li r7,-1
960 mtspr SPRN_AMR,r5
961 mtspr SPRN_UAMOR,r6
962 mtspr SPRN_AMOR,r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000963
964 /* Restore state of CTRL run bit; assume 1 on entry */
965 lwz r5,VCPU_CTRL(r4)
966 andi. r5,r5,1
967 bne 4f
968 mfspr r6,SPRN_CTRLF
969 clrrdi r6,r6,1
970 mtspr SPRN_CTRLT,r6
9714:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100972 /* Secondary threads wait for primary to have done partition switch */
973 ld r5, HSTATE_KVM_VCORE(r13)
974 lbz r6, HSTATE_PTID(r13)
975 cmpwi r6, 0
976 beq 21f
977 lbz r0, VCORE_IN_GUEST(r5)
978 cmpwi r0, 0
979 bne 21f
980 HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +100098120: lwz r3, VCORE_ENTRY_EXIT(r5)
982 cmpwi r3, 0x100
983 bge no_switch_exit
984 lbz r0, VCORE_IN_GUEST(r5)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100985 cmpwi r0, 0
986 beq 20b
987 HMT_MEDIUM
98821:
989 /* Set LPCR. */
990 ld r8,VCORE_LPCR(r5)
991 mtspr SPRN_LPCR,r8
992 isync
993
994 /* Check if HDEC expires soon */
995 mfspr r3, SPRN_HDEC
Paul Mackerras2f272462017-05-22 16:25:14 +1000996 EXTEND_HDEC(r3)
997 cmpdi r3, 512 /* 1 microsecond */
Paul Mackerras6af27c82015-03-28 14:21:10 +1100998 blt hdec_soon
999
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001000 /* For hash guest, clear out and reload the SLB */
1001 ld r6, VCPU_KVM(r4)
1002 lbz r0, KVM_RADIX(r6)
1003 cmpwi r0, 0
1004 bne 9f
1005 li r6, 0
1006 slbmte r6, r6
1007 slbia
1008 ptesync
1009
1010 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
1011 lwz r5,VCPU_SLB_MAX(r4)
1012 cmpwi r5,0
1013 beq 9f
1014 mtctr r5
1015 addi r6,r4,VCPU_SLB
10161: ld r8,VCPU_SLB_E(r6)
1017 ld r9,VCPU_SLB_V(r6)
1018 slbmte r9,r8
1019 addi r6,r6,VCPU_SLB_SIZE
1020 bdnz 1b
10219:
1022
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001023#ifdef CONFIG_KVM_XICS
1024 /* We are entering the guest on that thread, push VCPU to XIVE */
1025 ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
Andreas Schwab0bfa33c2017-08-15 14:37:01 +10001026 cmpldi cr0, r10, 0
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001027 beq no_xive
1028 ld r11, VCPU_XIVE_SAVED_STATE(r4)
1029 li r9, TM_QW1_OS
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001030 eieio
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001031 stdcix r11,r9,r10
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001032 lwz r11, VCPU_XIVE_CAM_WORD(r4)
1033 li r9, TM_QW1_OS + TM_WORD2
1034 stwcix r11,r9,r10
1035 li r9, 1
1036 stw r9, VCPU_XIVE_PUSHED(r4)
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001037 eieio
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001038no_xive:
1039#endif /* CONFIG_KVM_XICS */
1040
Suresh Warrier37f55d32016-08-19 15:35:46 +10001041deliver_guest_interrupt:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001042 ld r6, VCPU_CTR(r4)
Sam bobroffc63517c2015-05-27 09:56:57 +10001043 ld r7, VCPU_XER(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001044
1045 mtctr r6
1046 mtxer r7
1047
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001048kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
Paul Mackerras4619ac82013-04-17 20:31:41 +00001049 ld r10, VCPU_PC(r4)
1050 ld r11, VCPU_MSR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001051 ld r6, VCPU_SRR0(r4)
1052 ld r7, VCPU_SRR1(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001053 mtspr SPRN_SRR0, r6
1054 mtspr SPRN_SRR1, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001055
Paul Mackerras4619ac82013-04-17 20:31:41 +00001056 /* r11 = vcpu->arch.msr & ~MSR_HV */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001057 rldicl r11, r11, 63 - MSR_HV_LG, 1
1058 rotldi r11, r11, 1 + MSR_HV_LG
1059 ori r11, r11, MSR_ME
1060
Paul Mackerras19ccb762011-07-23 17:42:46 +10001061 /* Check if we can deliver an external or decrementer interrupt now */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001062 ld r0, VCPU_PENDING_EXC(r4)
1063 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
1064 cmpdi cr1, r0, 0
1065 andi. r8, r11, MSR_EE
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001066 mfspr r8, SPRN_LPCR
1067 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
1068 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
1069 mtspr SPRN_LPCR, r8
Paul Mackerras19ccb762011-07-23 17:42:46 +10001070 isync
Paul Mackerras19ccb762011-07-23 17:42:46 +10001071 beq 5f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001072 li r0, BOOK3S_INTERRUPT_EXTERNAL
1073 bne cr1, 12f
1074 mfspr r0, SPRN_DEC
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10001075BEGIN_FTR_SECTION
1076 /* On POWER9 check whether the guest has large decrementer enabled */
1077 andis. r8, r8, LPCR_LD@h
1078 bne 15f
1079END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1080 extsw r0, r0
108115: cmpdi r0, 0
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001082 li r0, BOOK3S_INTERRUPT_DECREMENTER
1083 bge 5f
1084
108512: mtspr SPRN_SRR0, r10
Paul Mackerras19ccb762011-07-23 17:42:46 +10001086 mr r10,r0
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001087 mtspr SPRN_SRR1, r11
Michael Neulinge4e38122014-03-25 10:47:02 +11001088 mr r9, r4
1089 bl kvmppc_msr_interrupt
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +110010905:
Paul Mackerras57900692017-05-16 16:41:20 +10001091BEGIN_FTR_SECTION
1092 b fast_guest_return
1093END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1094 /* On POWER9, check for pending doorbell requests */
1095 lbz r0, VCPU_DBELL_REQ(r4)
1096 cmpwi r0, 0
1097 beq fast_guest_return
1098 ld r5, HSTATE_KVM_VCORE(r13)
1099 /* Set DPDES register so the CPU will take a doorbell interrupt */
1100 li r0, 1
1101 mtspr SPRN_DPDES, r0
1102 std r0, VCORE_DPDES(r5)
1103 /* Make sure other cpus see vcore->dpdes set before dbell req clear */
1104 lwsync
1105 /* Clear the pending doorbell request */
1106 li r0, 0
1107 stb r0, VCPU_DBELL_REQ(r4)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001108
Liu Ping Fan27025a62013-11-19 14:12:48 +08001109/*
1110 * Required state:
1111 * R4 = vcpu
1112 * R10: value for HSRR0
1113 * R11: value for HSRR1
1114 * R13 = PACA
1115 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001116fast_guest_return:
Paul Mackerras4619ac82013-04-17 20:31:41 +00001117 li r0,0
1118 stb r0,VCPU_CEDED(r4) /* cancel cede */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001119 mtspr SPRN_HSRR0,r10
1120 mtspr SPRN_HSRR1,r11
1121
1122 /* Activate guest mode, so faults get handled by KVM */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001123 li r9, KVM_GUEST_MODE_GUEST_HV
Paul Mackerrasde56a942011-06-29 00:21:34 +00001124 stb r9, HSTATE_IN_GUEST(r13)
1125
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001126#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1127 /* Accumulate timing */
1128 addi r3, r4, VCPU_TB_GUEST
1129 bl kvmhv_accumulate_time
1130#endif
1131
Paul Mackerrasde56a942011-06-29 00:21:34 +00001132 /* Enter guest */
1133
Paul Mackerras0acb9112013-02-04 18:10:51 +00001134BEGIN_FTR_SECTION
1135 ld r5, VCPU_CFAR(r4)
1136 mtspr SPRN_CFAR, r5
1137END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001138BEGIN_FTR_SECTION
1139 ld r0, VCPU_PPR(r4)
1140END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001141
Paul Mackerrasde56a942011-06-29 00:21:34 +00001142 ld r5, VCPU_LR(r4)
1143 lwz r6, VCPU_CR(r4)
1144 mtlr r5
1145 mtcr r6
1146
Michael Neulingc75df6f2012-06-25 13:33:10 +00001147 ld r1, VCPU_GPR(R1)(r4)
1148 ld r2, VCPU_GPR(R2)(r4)
1149 ld r3, VCPU_GPR(R3)(r4)
1150 ld r5, VCPU_GPR(R5)(r4)
1151 ld r6, VCPU_GPR(R6)(r4)
1152 ld r7, VCPU_GPR(R7)(r4)
1153 ld r8, VCPU_GPR(R8)(r4)
1154 ld r9, VCPU_GPR(R9)(r4)
1155 ld r10, VCPU_GPR(R10)(r4)
1156 ld r11, VCPU_GPR(R11)(r4)
1157 ld r12, VCPU_GPR(R12)(r4)
1158 ld r13, VCPU_GPR(R13)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001159
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001160BEGIN_FTR_SECTION
1161 mtspr SPRN_PPR, r0
1162END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Michael Neulinge001fa72017-09-15 15:26:14 +10001163
1164/* Move canary into DSISR to check for later */
1165BEGIN_FTR_SECTION
1166 li r0, 0x7fff
1167 mtspr SPRN_HDSISR, r0
1168END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1169
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001170 ld r0, VCPU_GPR(R0)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001171 ld r4, VCPU_GPR(R4)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001172
1173 hrfid
1174 b .
1175
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001176secondary_too_late:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001177 li r12, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001178 cmpdi r4, 0
1179 beq 11f
Paul Mackerras6af27c82015-03-28 14:21:10 +11001180 stw r12, VCPU_TRAP(r4)
1181#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001182 addi r3, r4, VCPU_TB_RMEXIT
1183 bl kvmhv_accumulate_time
Paul Mackerras6af27c82015-03-28 14:21:10 +11001184#endif
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100118511: b kvmhv_switch_to_host
1186
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001187no_switch_exit:
1188 HMT_MEDIUM
1189 li r12, 0
1190 b 12f
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001191hdec_soon:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001192 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000119312: stw r12, VCPU_TRAP(r4)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001194 mr r9, r4
1195#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001196 addi r3, r4, VCPU_TB_RMEXIT
1197 bl kvmhv_accumulate_time
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001198#endif
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001199 b guest_bypass
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001200
Paul Mackerrasde56a942011-06-29 00:21:34 +00001201/******************************************************************************
1202 * *
1203 * Exit code *
1204 * *
1205 *****************************************************************************/
1206
1207/*
1208 * We come here from the first-level interrupt handlers.
1209 */
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301210 .globl kvmppc_interrupt_hv
1211kvmppc_interrupt_hv:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001212 /*
1213 * Register contents:
Nicholas Piggind3918e72016-12-22 04:29:25 +10001214 * R12 = (guest CR << 32) | interrupt vector
Paul Mackerrasde56a942011-06-29 00:21:34 +00001215 * R13 = PACA
Nicholas Piggind3918e72016-12-22 04:29:25 +10001216 * guest R12 saved in shadow VCPU SCRATCH0
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001217 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
Paul Mackerrasde56a942011-06-29 00:21:34 +00001218 * guest R13 saved in SPRN_SCRATCH0
1219 */
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001220 std r9, HSTATE_SCRATCH2(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +10001221 lbz r9, HSTATE_IN_GUEST(r13)
1222 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1223 beq kvmppc_bad_host_intr
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301224#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1225 cmpwi r9, KVM_GUEST_MODE_GUEST
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001226 ld r9, HSTATE_SCRATCH2(r13)
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301227 beq kvmppc_interrupt_pr
1228#endif
Paul Mackerras44a3add2013-10-04 21:45:04 +10001229 /* We're now back in the host but in guest MMU context */
1230 li r9, KVM_GUEST_MODE_HOST_HV
1231 stb r9, HSTATE_IN_GUEST(r13)
1232
Paul Mackerrasde56a942011-06-29 00:21:34 +00001233 ld r9, HSTATE_KVM_VCPU(r13)
1234
1235 /* Save registers */
1236
Michael Neulingc75df6f2012-06-25 13:33:10 +00001237 std r0, VCPU_GPR(R0)(r9)
1238 std r1, VCPU_GPR(R1)(r9)
1239 std r2, VCPU_GPR(R2)(r9)
1240 std r3, VCPU_GPR(R3)(r9)
1241 std r4, VCPU_GPR(R4)(r9)
1242 std r5, VCPU_GPR(R5)(r9)
1243 std r6, VCPU_GPR(R6)(r9)
1244 std r7, VCPU_GPR(R7)(r9)
1245 std r8, VCPU_GPR(R8)(r9)
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001246 ld r0, HSTATE_SCRATCH2(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001247 std r0, VCPU_GPR(R9)(r9)
1248 std r10, VCPU_GPR(R10)(r9)
1249 std r11, VCPU_GPR(R11)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001250 ld r3, HSTATE_SCRATCH0(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001251 std r3, VCPU_GPR(R12)(r9)
Nicholas Piggind3918e72016-12-22 04:29:25 +10001252 /* CR is in the high half of r12 */
1253 srdi r4, r12, 32
Paul Mackerrasde56a942011-06-29 00:21:34 +00001254 stw r4, VCPU_CR(r9)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001255BEGIN_FTR_SECTION
1256 ld r3, HSTATE_CFAR(r13)
1257 std r3, VCPU_CFAR(r9)
1258END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001259BEGIN_FTR_SECTION
1260 ld r4, HSTATE_PPR(r13)
1261 std r4, VCPU_PPR(r9)
1262END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001263
1264 /* Restore R1/R2 so we can handle faults */
1265 ld r1, HSTATE_HOST_R1(r13)
1266 ld r2, PACATOC(r13)
1267
1268 mfspr r10, SPRN_SRR0
1269 mfspr r11, SPRN_SRR1
1270 std r10, VCPU_SRR0(r9)
1271 std r11, VCPU_SRR1(r9)
Nicholas Piggind3918e72016-12-22 04:29:25 +10001272 /* trap is in the low half of r12, clear CR from the high half */
1273 clrldi r12, r12, 32
Paul Mackerrasde56a942011-06-29 00:21:34 +00001274 andi. r0, r12, 2 /* need to read HSRR0/1? */
1275 beq 1f
1276 mfspr r10, SPRN_HSRR0
1277 mfspr r11, SPRN_HSRR1
1278 clrrdi r12, r12, 2
12791: std r10, VCPU_PC(r9)
1280 std r11, VCPU_MSR(r9)
1281
1282 GET_SCRATCH0(r3)
1283 mflr r4
Michael Neulingc75df6f2012-06-25 13:33:10 +00001284 std r3, VCPU_GPR(R13)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001285 std r4, VCPU_LR(r9)
1286
Paul Mackerrasde56a942011-06-29 00:21:34 +00001287 stw r12,VCPU_TRAP(r9)
1288
Paul Mackerras8b24e692017-06-26 15:45:51 +10001289 /*
1290 * Now that we have saved away SRR0/1 and HSRR0/1,
1291 * interrupts are recoverable in principle, so set MSR_RI.
1292 * This becomes important for relocation-on interrupts from
1293 * the guest, which we can get in radix mode on POWER9.
1294 */
1295 li r0, MSR_RI
1296 mtmsrd r0, 1
1297
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001298#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1299 addi r3, r9, VCPU_TB_RMINTR
1300 mr r4, r9
1301 bl kvmhv_accumulate_time
1302 ld r5, VCPU_GPR(R5)(r9)
1303 ld r6, VCPU_GPR(R6)(r9)
1304 ld r7, VCPU_GPR(R7)(r9)
1305 ld r8, VCPU_GPR(R8)(r9)
1306#endif
1307
Paul Mackerras4a157d62014-12-03 13:30:39 +11001308 /* Save HEIR (HV emulation assist reg) in emul_inst
Paul Mackerras697d3892011-12-12 12:36:37 +00001309 if this is an HEI (HV emulation interrupt, e40) */
1310 li r3,KVM_INST_FETCH_FAILED
Paul Mackerras2bf27602015-03-20 20:39:40 +11001311 stw r3,VCPU_LAST_INST(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001312 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1313 bne 11f
1314 mfspr r3,SPRN_HEIR
Paul Mackerras4a157d62014-12-03 13:30:39 +1100131511: stw r3,VCPU_HEIR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001316
1317 /* these are volatile across C function calls */
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001318#ifdef CONFIG_RELOCATABLE
1319 ld r3, HSTATE_SCRATCH1(r13)
1320 mtctr r3
1321#else
Paul Mackerras697d3892011-12-12 12:36:37 +00001322 mfctr r3
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001323#endif
Paul Mackerras697d3892011-12-12 12:36:37 +00001324 mfxer r4
1325 std r3, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001326 std r4, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001327
Paul Mackerras697d3892011-12-12 12:36:37 +00001328 /* If this is a page table miss then see if it's theirs or ours */
1329 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1330 beq kvmppc_hdsi
Paul Mackerras342d3db2011-12-12 12:38:05 +00001331 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1332 beq kvmppc_hisi
Paul Mackerras697d3892011-12-12 12:36:37 +00001333
Paul Mackerrasde56a942011-06-29 00:21:34 +00001334 /* See if this is a leftover HDEC interrupt */
1335 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1336 bne 2f
1337 mfspr r3,SPRN_HDEC
Paul Mackerrasa4faf2e2017-08-25 19:52:12 +10001338 EXTEND_HDEC(r3)
1339 cmpdi r3,0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001340 mr r4,r9
1341 bge fast_guest_return
Paul Mackerrasde56a942011-06-29 00:21:34 +000013422:
Paul Mackerras697d3892011-12-12 12:36:37 +00001343 /* See if this is an hcall we can handle in real mode */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001344 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1345 beq hcall_try_real_mode
Paul Mackerrasde56a942011-06-29 00:21:34 +00001346
Paul Mackerras66feed62015-03-28 14:21:12 +11001347 /* Hypervisor doorbell - exit only if host IPI flag set */
1348 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1349 bne 3f
Nicholas Pigginbd0fdb12017-03-13 03:03:49 +10001350BEGIN_FTR_SECTION
1351 PPC_MSGSYNC
Nicholas Piggin2cde3712017-10-10 20:18:28 +10001352 lwsync
Nicholas Pigginbd0fdb12017-03-13 03:03:49 +10001353END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras66feed62015-03-28 14:21:12 +11001354 lbz r0, HSTATE_HOST_IPI(r13)
Gautham R. Shenoy06554d92015-08-07 17:41:20 +05301355 cmpwi r0, 0
Paul Mackerras66feed62015-03-28 14:21:12 +11001356 beq 4f
1357 b guest_exit_cont
13583:
Paul Mackerras769377f2017-02-15 14:30:17 +11001359 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1360 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1361 bne 14f
1362 mfspr r3, SPRN_HFSCR
1363 std r3, VCPU_HFSCR(r9)
1364 b guest_exit_cont
136514:
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001366 /* External interrupt ? */
1367 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001368 bne+ guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001369
1370 /* External interrupt, first check for host_ipi. If this is
1371 * set, we know the host wants us out so let's do it now
1372 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001373 bl kvmppc_read_intr
Suresh Warrier37f55d32016-08-19 15:35:46 +10001374
1375 /*
1376 * Restore the active volatile registers after returning from
1377 * a C function.
1378 */
1379 ld r9, HSTATE_KVM_VCPU(r13)
1380 li r12, BOOK3S_INTERRUPT_EXTERNAL
1381
1382 /*
1383 * kvmppc_read_intr return codes:
1384 *
1385 * Exit to host (r3 > 0)
1386 * 1 An interrupt is pending that needs to be handled by the host
1387 * Exit guest and return to host by branching to guest_exit_cont
1388 *
Suresh Warrierf7af5202016-08-19 15:35:52 +10001389 * 2 Passthrough that needs completion in the host
1390 * Exit guest and return to host by branching to guest_exit_cont
1391 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1392 * to indicate to the host to complete handling the interrupt
1393 *
Suresh Warrier37f55d32016-08-19 15:35:46 +10001394 * Before returning to guest, we check if any CPU is heading out
1395 * to the host and if so, we head out also. If no CPUs are heading
1396 * check return values <= 0.
1397 *
1398 * Return to guest (r3 <= 0)
1399 * 0 No external interrupt is pending
1400 * -1 A guest wakeup IPI (which has now been cleared)
1401 * In either case, we return to guest to deliver any pending
1402 * guest interrupts.
Suresh Warriere3c13e52016-08-19 15:35:51 +10001403 *
1404 * -2 A PCI passthrough external interrupt was handled
1405 * (interrupt was delivered directly to guest)
1406 * Return to guest to deliver any pending guest interrupts.
Suresh Warrier37f55d32016-08-19 15:35:46 +10001407 */
1408
Suresh Warrierf7af5202016-08-19 15:35:52 +10001409 cmpdi r3, 1
1410 ble 1f
1411
1412 /* Return code = 2 */
1413 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1414 stw r12, VCPU_TRAP(r9)
1415 b guest_exit_cont
1416
14171: /* Return code <= 1 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001418 cmpdi r3, 0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001419 bgt guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001420
Suresh Warrier37f55d32016-08-19 15:35:46 +10001421 /* Return code <= 0 */
Paul Mackerras66feed62015-03-28 14:21:12 +110014224: ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras4619ac82013-04-17 20:31:41 +00001423 lwz r0, VCORE_ENTRY_EXIT(r5)
1424 cmpwi r0, 0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001425 mr r4, r9
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001426 blt deliver_guest_interrupt
Paul Mackerrasde56a942011-06-29 00:21:34 +00001427
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001428guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
Paul Mackerras43ff3f62018-01-11 14:31:43 +11001429 /* Save more register state */
1430 mfdar r6
1431 mfdsisr r7
1432 std r6, VCPU_DAR(r9)
1433 stw r7, VCPU_DSISR(r9)
1434 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1435 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1436 beq mc_cont
1437 std r6, VCPU_FAULT_DAR(r9)
1438 stw r7, VCPU_FAULT_DSISR(r9)
1439
1440 /* See if it is a machine check */
1441 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1442 beq machine_check_realmode
1443mc_cont:
1444#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1445 addi r3, r9, VCPU_TB_RMEXIT
1446 mr r4, r9
1447 bl kvmhv_accumulate_time
1448#endif
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001449#ifdef CONFIG_KVM_XICS
1450 /* We are exiting, pull the VP from the XIVE */
1451 lwz r0, VCPU_XIVE_PUSHED(r9)
1452 cmpwi cr0, r0, 0
1453 beq 1f
1454 li r7, TM_SPC_PULL_OS_CTX
1455 li r6, TM_QW1_OS
1456 mfmsr r0
1457 andi. r0, r0, MSR_IR /* in real mode? */
1458 beq 2f
1459 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1460 cmpldi cr0, r10, 0
1461 beq 1f
1462 /* First load to pull the context, we ignore the value */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001463 eieio
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001464 lwzx r11, r7, r10
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001465 /* Second load to recover the context state (Words 0 and 1) */
1466 ldx r11, r6, r10
1467 b 3f
14682: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1469 cmpldi cr0, r10, 0
1470 beq 1f
1471 /* First load to pull the context, we ignore the value */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001472 eieio
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001473 lwzcix r11, r7, r10
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001474 /* Second load to recover the context state (Words 0 and 1) */
1475 ldcix r11, r6, r10
14763: std r11, VCPU_XIVE_SAVED_STATE(r9)
1477 /* Fixup some of the state for the next load */
1478 li r10, 0
1479 li r0, 0xff
1480 stw r10, VCPU_XIVE_PUSHED(r9)
1481 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1482 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001483 eieio
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100014841:
1485#endif /* CONFIG_KVM_XICS */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001486
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001487 /* For hash guest, read the guest SLB and save it away */
1488 ld r5, VCPU_KVM(r9)
1489 lbz r0, KVM_RADIX(r5)
1490 li r5, 0
1491 cmpwi r0, 0
1492 bne 3f /* for radix, save 0 entries */
1493 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1494 mtctr r0
1495 li r6,0
1496 addi r7,r9,VCPU_SLB
14971: slbmfee r8,r6
1498 andis. r0,r8,SLB_ESID_V@h
1499 beq 2f
1500 add r8,r8,r6 /* put index in */
1501 slbmfev r3,r6
1502 std r8,VCPU_SLB_E(r7)
1503 std r3,VCPU_SLB_V(r7)
1504 addi r7,r7,VCPU_SLB_SIZE
1505 addi r5,r5,1
15062: addi r6,r6,1
1507 bdnz 1b
1508 /* Finally clear out the SLB */
1509 li r0,0
1510 slbmte r0,r0
1511 slbia
1512 ptesync
15133: stw r5,VCPU_SLB_MAX(r9)
1514
1515guest_bypass:
Gautham R. Shenoy7e022e72015-05-21 13:57:04 +05301516 mr r3, r12
Paul Mackerras6af27c82015-03-28 14:21:10 +11001517 /* Increment exit count, poke other threads to exit */
1518 bl kvmhv_commence_exit
Paul Mackerraseddb60f2015-03-28 14:21:11 +11001519 nop
1520 ld r9, HSTATE_KVM_VCPU(r13)
1521 lwz r12, VCPU_TRAP(r9)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001522
Paul Mackerrasec257162015-06-24 21:18:03 +10001523 /* Stop others sending VCPU interrupts to this physical CPU */
1524 li r0, -1
1525 stw r0, VCPU_CPU(r9)
1526 stw r0, VCPU_THREAD_CPU(r9)
1527
Paul Mackerrasde56a942011-06-29 00:21:34 +00001528 /* Save guest CTRL register, set runlatch to 1 */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001529 mfspr r6,SPRN_CTRLF
Paul Mackerrasde56a942011-06-29 00:21:34 +00001530 stw r6,VCPU_CTRL(r9)
1531 andi. r0,r6,1
1532 bne 4f
1533 ori r6,r6,1
1534 mtspr SPRN_CTRLT,r6
15354:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001536 /*
1537 * Save the guest PURR/SPURR
1538 */
1539 mfspr r5,SPRN_PURR
1540 mfspr r6,SPRN_SPURR
1541 ld r7,VCPU_PURR(r9)
1542 ld r8,VCPU_SPURR(r9)
1543 std r5,VCPU_PURR(r9)
1544 std r6,VCPU_SPURR(r9)
1545 subf r5,r7,r5
1546 subf r6,r8,r6
1547
1548 /*
1549 * Restore host PURR/SPURR and add guest times
1550 * so that the time in the guest gets accounted.
1551 */
1552 ld r3,HSTATE_PURR(r13)
1553 ld r4,HSTATE_SPURR(r13)
1554 add r3,r3,r5
1555 add r4,r4,r6
1556 mtspr SPRN_PURR,r3
1557 mtspr SPRN_SPURR,r4
1558
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001559 /* Save DEC */
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10001560 ld r3, HSTATE_KVM_VCORE(r13)
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001561 mfspr r5,SPRN_DEC
1562 mftb r6
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10001563 /* On P9, if the guest has large decr enabled, don't sign extend */
1564BEGIN_FTR_SECTION
1565 ld r4, VCORE_LPCR(r3)
1566 andis. r4, r4, LPCR_LD@h
1567 bne 16f
1568END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001569 extsw r5,r5
Paul Mackerras1bc3fe82017-05-22 16:55:16 +1000157016: add r5,r5,r6
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001571 /* r5 is a guest timebase value here, convert to host TB */
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001572 ld r4,VCORE_TB_OFFSET(r3)
1573 subf r5,r4,r5
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001574 std r5,VCPU_DEC_EXPIRES(r9)
1575
Michael Neulingb005255e2014-01-08 21:25:21 +11001576BEGIN_FTR_SECTION
1577 b 8f
1578END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +11001579 /* Save POWER8-specific registers */
1580 mfspr r5, SPRN_IAMR
1581 mfspr r6, SPRN_PSPB
1582 mfspr r7, SPRN_FSCR
1583 std r5, VCPU_IAMR(r9)
1584 stw r6, VCPU_PSPB(r9)
1585 std r7, VCPU_FSCR(r9)
1586 mfspr r5, SPRN_IC
Michael Neulingb005255e2014-01-08 21:25:21 +11001587 mfspr r7, SPRN_TAR
1588 std r5, VCPU_IC(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001589 std r7, VCPU_TAR(r9)
Michael Neuling7b490412014-01-08 21:25:32 +11001590 mfspr r8, SPRN_EBBHR
Michael Neulingb005255e2014-01-08 21:25:21 +11001591 std r8, VCPU_EBBHR(r9)
1592 mfspr r5, SPRN_EBBRR
1593 mfspr r6, SPRN_BESCR
Michael Neulingb005255e2014-01-08 21:25:21 +11001594 mfspr r7, SPRN_PID
1595 mfspr r8, SPRN_WORT
Paul Mackerras83677f52016-11-16 22:33:27 +11001596 std r5, VCPU_EBBRR(r9)
1597 std r6, VCPU_BESCR(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001598 stw r7, VCPU_GUEST_PID(r9)
1599 std r8, VCPU_WORT(r9)
Paul Mackerras83677f52016-11-16 22:33:27 +11001600BEGIN_FTR_SECTION
1601 mfspr r5, SPRN_TCSCR
1602 mfspr r6, SPRN_ACOP
1603 mfspr r7, SPRN_CSIGR
1604 mfspr r8, SPRN_TACR
1605 std r5, VCPU_TCSCR(r9)
1606 std r6, VCPU_ACOP(r9)
1607 std r7, VCPU_CSIGR(r9)
1608 std r8, VCPU_TACR(r9)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001609FTR_SECTION_ELSE
1610 mfspr r5, SPRN_TIDR
1611 mfspr r6, SPRN_PSSCR
1612 std r5, VCPU_TID(r9)
1613 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1614 rotldi r6, r6, 60
1615 std r6, VCPU_PSSCR(r9)
Paul Mackerras769377f2017-02-15 14:30:17 +11001616 /* Restore host HFSCR value */
1617 ld r7, STACK_SLOT_HFSCR(r1)
1618 mtspr SPRN_HFSCR, r7
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001619ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasccec4452016-03-05 19:34:39 +11001620 /*
1621 * Restore various registers to 0, where non-zero values
1622 * set by the guest could disrupt the host.
1623 */
1624 li r0, 0
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001625 mtspr SPRN_PSPB, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001626 mtspr SPRN_WORT, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001627BEGIN_FTR_SECTION
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001628 mtspr SPRN_IAMR, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001629 mtspr SPRN_TCSCR, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001630 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1631 li r0, 1
1632 sldi r0, r0, 31
1633 mtspr SPRN_MMCRS, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001634END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Michael Neulingb005255e2014-01-08 21:25:21 +110016358:
1636
Paul Mackerrasde56a942011-06-29 00:21:34 +00001637 /* Save and reset AMR and UAMOR before turning on the MMU */
1638 mfspr r5,SPRN_AMR
1639 mfspr r6,SPRN_UAMOR
1640 std r5,VCPU_AMR(r9)
1641 std r6,VCPU_UAMOR(r9)
1642 li r6,0
1643 mtspr SPRN_AMR,r6
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001644 mtspr SPRN_UAMOR, r6
Paul Mackerrasde56a942011-06-29 00:21:34 +00001645
Paul Mackerrasde56a942011-06-29 00:21:34 +00001646 /* Switch DSCR back to host value */
1647 mfspr r8, SPRN_DSCR
1648 ld r7, HSTATE_DSCR(r13)
Paul Mackerrascfc86022013-09-21 09:53:28 +10001649 std r8, VCPU_DSCR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001650 mtspr SPRN_DSCR, r7
1651
1652 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001653 std r14, VCPU_GPR(R14)(r9)
1654 std r15, VCPU_GPR(R15)(r9)
1655 std r16, VCPU_GPR(R16)(r9)
1656 std r17, VCPU_GPR(R17)(r9)
1657 std r18, VCPU_GPR(R18)(r9)
1658 std r19, VCPU_GPR(R19)(r9)
1659 std r20, VCPU_GPR(R20)(r9)
1660 std r21, VCPU_GPR(R21)(r9)
1661 std r22, VCPU_GPR(R22)(r9)
1662 std r23, VCPU_GPR(R23)(r9)
1663 std r24, VCPU_GPR(R24)(r9)
1664 std r25, VCPU_GPR(R25)(r9)
1665 std r26, VCPU_GPR(R26)(r9)
1666 std r27, VCPU_GPR(R27)(r9)
1667 std r28, VCPU_GPR(R28)(r9)
1668 std r29, VCPU_GPR(R29)(r9)
1669 std r30, VCPU_GPR(R30)(r9)
1670 std r31, VCPU_GPR(R31)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001671
1672 /* Save SPRGs */
1673 mfspr r3, SPRN_SPRG0
1674 mfspr r4, SPRN_SPRG1
1675 mfspr r5, SPRN_SPRG2
1676 mfspr r6, SPRN_SPRG3
1677 std r3, VCPU_SPRG0(r9)
1678 std r4, VCPU_SPRG1(r9)
1679 std r5, VCPU_SPRG2(r9)
1680 std r6, VCPU_SPRG3(r9)
1681
Paul Mackerras89436332012-03-02 01:38:23 +00001682 /* save FP state */
1683 mr r3, r9
Paul Mackerras595e4f72013-10-15 20:43:04 +11001684 bl kvmppc_save_fp
Paul Mackerras89436332012-03-02 01:38:23 +00001685
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001686#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1687BEGIN_FTR_SECTION
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10001688 /*
1689 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
1690 */
Paul Mackerrasf024ee02016-06-22 14:21:59 +10001691 bl kvmppc_save_tm
1692END_FTR_SECTION_IFSET(CPU_FTR_TM)
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001693#endif
1694
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001695 /* Increment yield count if they have a VPA */
1696 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1697 cmpdi r8, 0
1698 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +02001699 li r4, LPPACA_YIELDCOUNT
1700 LWZX_BE r3, r8, r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001701 addi r3, r3, 1
Alexander Graf0865a582014-06-11 10:36:17 +02001702 STWX_BE r3, r8, r4
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001703 li r3, 1
1704 stb r3, VCPU_VPA_DIRTY(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000170525:
1706 /* Save PMU registers if requested */
1707 /* r8 and cr0.eq are live here */
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001708BEGIN_FTR_SECTION
1709 /*
1710 * POWER8 seems to have a hardware bug where setting
1711 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1712 * when some counters are already negative doesn't seem
1713 * to cause a performance monitor alert (and hence interrupt).
1714 * The effect of this is that when saving the PMU state,
1715 * if there is no PMU alert pending when we read MMCR0
1716 * before freezing the counters, but one becomes pending
1717 * before we read the counters, we lose it.
1718 * To work around this, we need a way to freeze the counters
1719 * before reading MMCR0. Normally, freezing the counters
1720 * is done by writing MMCR0 (to set MMCR0[FC]) which
1721 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1722 * we can also freeze the counters using MMCR2, by writing
1723 * 1s to all the counter freeze condition bits (there are
1724 * 9 bits each for 6 counters).
1725 */
1726 li r3, -1 /* set all freeze bits */
1727 clrrdi r3, r3, 10
1728 mfspr r10, SPRN_MMCR2
1729 mtspr SPRN_MMCR2, r3
1730 isync
1731END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001732 li r3, 1
1733 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1734 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1735 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
Paul Mackerras89436332012-03-02 01:38:23 +00001736 mfspr r6, SPRN_MMCRA
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001737 /* Clear MMCRA in order to disable SDAR updates */
Paul Mackerras89436332012-03-02 01:38:23 +00001738 li r7, 0
1739 mtspr SPRN_MMCRA, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001740 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001741 beq 21f /* if no VPA, save PMU stuff anyway */
1742 lbz r7, LPPACA_PMCINUSE(r8)
1743 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1744 bne 21f
1745 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1746 b 22f
174721: mfspr r5, SPRN_MMCR1
Paul Mackerras14941782013-09-06 13:11:18 +10001748 mfspr r7, SPRN_SIAR
1749 mfspr r8, SPRN_SDAR
Paul Mackerrasde56a942011-06-29 00:21:34 +00001750 std r4, VCPU_MMCR(r9)
1751 std r5, VCPU_MMCR + 8(r9)
1752 std r6, VCPU_MMCR + 16(r9)
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001753BEGIN_FTR_SECTION
1754 std r10, VCPU_MMCR + 24(r9)
1755END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras14941782013-09-06 13:11:18 +10001756 std r7, VCPU_SIAR(r9)
1757 std r8, VCPU_SDAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001758 mfspr r3, SPRN_PMC1
1759 mfspr r4, SPRN_PMC2
1760 mfspr r5, SPRN_PMC3
1761 mfspr r6, SPRN_PMC4
1762 mfspr r7, SPRN_PMC5
1763 mfspr r8, SPRN_PMC6
1764 stw r3, VCPU_PMC(r9)
1765 stw r4, VCPU_PMC + 4(r9)
1766 stw r5, VCPU_PMC + 8(r9)
1767 stw r6, VCPU_PMC + 12(r9)
1768 stw r7, VCPU_PMC + 16(r9)
1769 stw r8, VCPU_PMC + 20(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001770BEGIN_FTR_SECTION
Michael Neulingb005255e2014-01-08 21:25:21 +11001771 mfspr r5, SPRN_SIER
Paul Mackerras83677f52016-11-16 22:33:27 +11001772 std r5, VCPU_SIER(r9)
1773BEGIN_FTR_SECTION_NESTED(96)
Michael Neulingb005255e2014-01-08 21:25:21 +11001774 mfspr r6, SPRN_SPMC1
1775 mfspr r7, SPRN_SPMC2
1776 mfspr r8, SPRN_MMCRS
Michael Neulingb005255e2014-01-08 21:25:21 +11001777 stw r6, VCPU_PMC + 24(r9)
1778 stw r7, VCPU_PMC + 28(r9)
1779 std r8, VCPU_MMCR + 32(r9)
1780 lis r4, 0x8000
1781 mtspr SPRN_MMCRS, r4
Paul Mackerras83677f52016-11-16 22:33:27 +11001782END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
Michael Neulingb005255e2014-01-08 21:25:21 +11001783END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000178422:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001785
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001786 /* Restore host values of some registers */
1787BEGIN_FTR_SECTION
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10001788 ld r5, STACK_SLOT_CIABR(r1)
1789 ld r6, STACK_SLOT_DAWR(r1)
1790 ld r7, STACK_SLOT_DAWRX(r1)
1791 mtspr SPRN_CIABR, r5
1792 mtspr SPRN_DAWR, r6
1793 mtspr SPRN_DAWRX, r7
1794END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1795BEGIN_FTR_SECTION
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001796 ld r5, STACK_SLOT_TID(r1)
1797 ld r6, STACK_SLOT_PSSCR(r1)
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001798 ld r7, STACK_SLOT_PID(r1)
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001799 ld r8, STACK_SLOT_IAMR(r1)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001800 mtspr SPRN_TIDR, r5
1801 mtspr SPRN_PSSCR, r6
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001802 mtspr SPRN_PID, r7
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001803 mtspr SPRN_IAMR, r8
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001804END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001805
1806#ifdef CONFIG_PPC_RADIX_MMU
1807 /*
1808 * Are we running hash or radix ?
1809 */
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10001810 ld r5, VCPU_KVM(r9)
1811 lbz r0, KVM_RADIX(r5)
1812 cmpwi cr2, r0, 0
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001813 beq cr2, 4f
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001814
1815 /* Radix: Handle the case where the guest used an illegal PID */
1816 LOAD_REG_ADDR(r4, mmu_base_pid)
1817 lwz r3, VCPU_GUEST_PID(r9)
1818 lwz r5, 0(r4)
1819 cmpw cr0,r3,r5
1820 blt 2f
1821
1822 /*
1823 * Illegal PID, the HW might have prefetched and cached in the TLB
1824 * some translations for the LPID 0 / guest PID combination which
1825 * Linux doesn't know about, so we need to flush that PID out of
1826 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
1827 * the right context.
1828 */
1829 li r0,0
1830 mtspr SPRN_LPID,r0
1831 isync
1832
1833 /* Then do a congruence class local flush */
1834 ld r6,VCPU_KVM(r9)
1835 lwz r0,KVM_TLB_SETS(r6)
1836 mtctr r0
1837 li r7,0x400 /* IS field = 0b01 */
1838 ptesync
1839 sldi r0,r3,32 /* RS has PID */
18401: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
1841 addi r7,r7,0x1000
1842 bdnz 1b
1843 ptesync
1844
18452: /* Flush the ERAT on radix P9 DD1 guest exit */
Paul Mackerrasf11f6f72017-01-30 21:21:52 +11001846BEGIN_FTR_SECTION
1847 PPC_INVALIDATE_ERAT
1848END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
Paul Mackerras6964e6a2018-01-11 14:51:02 +110018494:
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001850#endif /* CONFIG_PPC_RADIX_MMU */
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001851
Paul Mackerrasde56a942011-06-29 00:21:34 +00001852 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001853 * POWER7/POWER8 guest -> host partition switch code.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001854 * We don't have to lock against tlbies but we do
1855 * have to coordinate the hardware threads.
1856 */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001857kvmhv_switch_to_host:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001858 /* Secondary threads wait for primary to do partition switch */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001859 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001860 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1861 lbz r3,HSTATE_PTID(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001862 cmpwi r3,0
1863 beq 15f
1864 HMT_LOW
186513: lbz r3,VCORE_IN_GUEST(r5)
1866 cmpwi r3,0
1867 bne 13b
1868 HMT_MEDIUM
1869 b 16f
1870
1871 /* Primary thread waits for all the secondaries to exit guest */
187215: lwz r3,VCORE_ENTRY_EXIT(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001873 rlwinm r0,r3,32-8,0xff
Paul Mackerrasde56a942011-06-29 00:21:34 +00001874 clrldi r3,r3,56
1875 cmpw r3,r0
1876 bne 15b
1877 isync
1878
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001879 /* Did we actually switch to the guest at all? */
1880 lbz r6, VCORE_IN_GUEST(r5)
1881 cmpwi r6, 0
1882 beq 19f
1883
Paul Mackerrasde56a942011-06-29 00:21:34 +00001884 /* Primary thread switches back to host partition */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001885 lwz r7,KVM_HOST_LPID(r4)
Paul Mackerras7a840842016-11-16 22:25:20 +11001886BEGIN_FTR_SECTION
1887 ld r6,KVM_HOST_SDR1(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001888 li r8,LPID_RSVD /* switch to reserved LPID */
1889 mtspr SPRN_LPID,r8
1890 ptesync
Paul Mackerras7a840842016-11-16 22:25:20 +11001891 mtspr SPRN_SDR1,r6 /* switch to host page table */
1892END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001893 mtspr SPRN_LPID,r7
1894 isync
1895
Michael Neulingb005255e2014-01-08 21:25:21 +11001896BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001897 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +11001898 mfspr r7, SPRN_DPDES
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001899 mfspr r8, SPRN_VTB
Michael Neulingb005255e2014-01-08 21:25:21 +11001900 std r7, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001901 std r8, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +11001902 /* clear DPDES so we don't get guest doorbells in the host */
1903 li r8, 0
1904 mtspr SPRN_DPDES, r8
1905END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1906
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301907 /* If HMI, call kvmppc_realmode_hmi_handler() */
1908 cmpwi r12, BOOK3S_INTERRUPT_HMI
1909 bne 27f
1910 bl kvmppc_realmode_hmi_handler
1911 nop
1912 li r12, BOOK3S_INTERRUPT_HMI
1913 /*
1914 * At this point kvmppc_realmode_hmi_handler would have resync-ed
1915 * the TB. Hence it is not required to subtract guest timebase
1916 * offset from timebase. So, skip it.
1917 *
1918 * Also, do not call kvmppc_subcore_exit_guest() because it has
1919 * been invoked as part of kvmppc_realmode_hmi_handler().
1920 */
1921 b 30f
1922
192327:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001924 /* Subtract timebase offset from timebase */
1925 ld r8,VCORE_TB_OFFSET(r5)
1926 cmpdi r8,0
1927 beq 17f
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001928 mftb r6 /* current guest timebase */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001929 subf r8,r8,r6
1930 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1931 mftb r7 /* check if lower 24 bits overflowed */
1932 clrldi r6,r6,40
1933 clrldi r7,r7,40
1934 cmpld r7,r6
1935 bge 17f
1936 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1937 mtspr SPRN_TBU40,r8
1938
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530193917: bl kvmppc_subcore_exit_guest
1940 nop
194130: ld r5,HSTATE_KVM_VCORE(r13)
1942 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1943
Paul Mackerrasde56a942011-06-29 00:21:34 +00001944 /* Reset PCR */
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301945 ld r0, VCORE_PCR(r5)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001946 cmpdi r0, 0
1947 beq 18f
1948 li r0, 0
1949 mtspr SPRN_PCR, r0
195018:
1951 /* Signal secondary CPUs to continue */
1952 stb r0,VCORE_IN_GUEST(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000195319: lis r8,0x7fff /* MAX_INT@h */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001954 mtspr SPRN_HDEC,r8
1955
Paul Mackerrasc0101502017-10-19 14:11:23 +1100195616:
1957BEGIN_FTR_SECTION
1958 /* On POWER9 with HPT-on-radix we need to wait for all other threads */
1959 ld r3, HSTATE_SPLIT_MODE(r13)
1960 cmpdi r3, 0
1961 beq 47f
1962 lwz r8, KVM_SPLIT_DO_RESTORE(r3)
1963 cmpwi r8, 0
1964 beq 47f
1965 stw r12, STACK_SLOT_TRAP(r1)
1966 bl kvmhv_p9_restore_lpcr
1967 nop
1968 lwz r12, STACK_SLOT_TRAP(r1)
1969 b 48f
197047:
1971END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1972 ld r8,KVM_HOST_LPCR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001973 mtspr SPRN_LPCR,r8
1974 isync
Paul Mackerrasc0101502017-10-19 14:11:23 +1100197548:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001976 /* load host SLB entries */
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001977BEGIN_MMU_FTR_SECTION
1978 b 0f
1979END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001980 ld r8,PACA_SLBSHADOWPTR(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001981
1982 .rept SLB_NUM_BOLTED
Alexander Graf0865a582014-06-11 10:36:17 +02001983 li r3, SLBSHADOW_SAVEAREA
1984 LDX_BE r5, r8, r3
1985 addi r3, r3, 8
1986 LDX_BE r6, r8, r3
Paul Mackerrasde56a942011-06-29 00:21:34 +00001987 andis. r7,r5,SLB_ESID_V@h
1988 beq 1f
1989 slbmte r6,r5
19901: addi r8,r8,16
1991 .endr
Paul Mackerrasf4c51f82017-01-30 21:21:45 +110019920:
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001993#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1994 /* Finish timing, if we have a vcpu */
1995 ld r4, HSTATE_KVM_VCPU(r13)
1996 cmpdi r4, 0
1997 li r3, 0
1998 beq 2f
1999 bl kvmhv_accumulate_time
20002:
2001#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +00002002 /* Unset guest mode */
2003 li r0, KVM_GUEST_MODE_NONE
2004 stb r0, HSTATE_IN_GUEST(r13)
2005
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10002006 ld r0, SFS+PPC_LR_STKOFF(r1)
2007 addi r1, r1, SFS
Paul Mackerras218309b2013-09-06 13:23:44 +10002008 mtlr r0
2009 blr
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002010
Paul Mackerras697d3892011-12-12 12:36:37 +00002011/*
2012 * Check whether an HDSI is an HPTE not found fault or something else.
2013 * If it is an HPTE not found fault that is due to the guest accessing
2014 * a page that they have mapped but which we have paged out, then
2015 * we continue on with the guest exit path. In all other cases,
2016 * reflect the HDSI to the guest as a DSI.
2017 */
2018kvmppc_hdsi:
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002019 ld r3, VCPU_KVM(r9)
2020 lbz r0, KVM_RADIX(r3)
Paul Mackerras697d3892011-12-12 12:36:37 +00002021 mfspr r4, SPRN_HDAR
2022 mfspr r6, SPRN_HDSISR
Michael Neulinge001fa72017-09-15 15:26:14 +10002023BEGIN_FTR_SECTION
2024 /* Look for DSISR canary. If we find it, retry instruction */
2025 cmpdi r6, 0x7fff
2026 beq 6f
2027END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2028 cmpwi r0, 0
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002029 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
Paul Mackerras4cf302b2011-12-12 12:38:51 +00002030 /* HPTE not found fault or protection fault? */
2031 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
Paul Mackerras697d3892011-12-12 12:36:37 +00002032 beq 1f /* if not, send it to the guest */
Paul Mackerras4e5acdc2017-02-28 11:05:47 +11002033 andi. r0, r11, MSR_DR /* data relocation enabled? */
2034 beq 3f
Paul Mackerrasef8c6402017-01-30 21:21:43 +11002035BEGIN_FTR_SECTION
2036 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2037 b 4f
2038END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras697d3892011-12-12 12:36:37 +00002039 clrrdi r0, r4, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00002040 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11002041 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
2042 bne 7f /* if no SLB entry found */
Paul Mackerras697d3892011-12-12 12:36:37 +000020434: std r4, VCPU_FAULT_DAR(r9)
2044 stw r6, VCPU_FAULT_DSISR(r9)
2045
2046 /* Search the hash table. */
2047 mr r3, r9 /* vcpu pointer */
Paul Mackerras342d3db2011-12-12 12:38:05 +00002048 li r7, 1 /* data fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002049 bl kvmppc_hpte_hv_fault
Paul Mackerras697d3892011-12-12 12:36:37 +00002050 ld r9, HSTATE_KVM_VCPU(r13)
2051 ld r10, VCPU_PC(r9)
2052 ld r11, VCPU_MSR(r9)
2053 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
2054 cmpdi r3, 0 /* retry the instruction */
2055 beq 6f
2056 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002057 beq guest_exit_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00002058 cmpdi r3, -2 /* MMIO emulation; need instr word */
2059 beq 2f
2060
Paul Mackerrascf29b212015-10-27 16:10:20 +11002061 /* Synthesize a DSI (or DSegI) for the guest */
Paul Mackerras697d3892011-12-12 12:36:37 +00002062 ld r4, VCPU_FAULT_DAR(r9)
2063 mr r6, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110020641: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
Paul Mackerras697d3892011-12-12 12:36:37 +00002065 mtspr SPRN_DSISR, r6
Paul Mackerrascf29b212015-10-27 16:10:20 +110020667: mtspr SPRN_DAR, r4
Paul Mackerras697d3892011-12-12 12:36:37 +00002067 mtspr SPRN_SRR0, r10
2068 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11002069 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11002070 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002071fast_interrupt_c_return:
Paul Mackerras697d3892011-12-12 12:36:37 +000020726: ld r7, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10002073 ld r8, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00002074 mtctr r7
2075 mtxer r8
2076 mr r4, r9
2077 b fast_guest_return
2078
20793: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
2080 ld r5, KVM_VRMA_SLB_V(r5)
2081 b 4b
2082
2083 /* If this is for emulated MMIO, load the instruction word */
20842: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
2085
2086 /* Set guest mode to 'jump over instruction' so if lwz faults
2087 * we'll just continue at the next IP. */
2088 li r0, KVM_GUEST_MODE_SKIP
2089 stb r0, HSTATE_IN_GUEST(r13)
2090
2091 /* Do the access with MSR:DR enabled */
2092 mfmsr r3
2093 ori r4, r3, MSR_DR /* Enable paging for data */
2094 mtmsrd r4
2095 lwz r8, 0(r10)
2096 mtmsrd r3
2097
2098 /* Store the result */
2099 stw r8, VCPU_LAST_INST(r9)
2100
2101 /* Unset guest mode. */
Paul Mackerras44a3add2013-10-04 21:45:04 +10002102 li r0, KVM_GUEST_MODE_HOST_HV
Paul Mackerras697d3892011-12-12 12:36:37 +00002103 stb r0, HSTATE_IN_GUEST(r13)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002104 b guest_exit_cont
Paul Mackerrasde56a942011-06-29 00:21:34 +00002105
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002106.Lradix_hdsi:
2107 std r4, VCPU_FAULT_DAR(r9)
2108 stw r6, VCPU_FAULT_DSISR(r9)
2109.Lradix_hisi:
2110 mfspr r5, SPRN_ASDR
2111 std r5, VCPU_FAULT_GPA(r9)
2112 b guest_exit_cont
2113
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002114/*
Paul Mackerras342d3db2011-12-12 12:38:05 +00002115 * Similarly for an HISI, reflect it to the guest as an ISI unless
2116 * it is an HPTE not found fault for a page that we have paged out.
2117 */
2118kvmppc_hisi:
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002119 ld r3, VCPU_KVM(r9)
2120 lbz r0, KVM_RADIX(r3)
2121 cmpwi r0, 0
2122 bne .Lradix_hisi /* for radix, just save ASDR */
Paul Mackerras342d3db2011-12-12 12:38:05 +00002123 andis. r0, r11, SRR1_ISI_NOPT@h
2124 beq 1f
Paul Mackerras4e5acdc2017-02-28 11:05:47 +11002125 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
2126 beq 3f
Paul Mackerrasef8c6402017-01-30 21:21:43 +11002127BEGIN_FTR_SECTION
2128 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2129 b 4f
2130END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras342d3db2011-12-12 12:38:05 +00002131 clrrdi r0, r10, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00002132 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11002133 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2134 bne 7f /* if no SLB entry found */
Paul Mackerras342d3db2011-12-12 12:38:05 +000021354:
2136 /* Search the hash table. */
2137 mr r3, r9 /* vcpu pointer */
2138 mr r4, r10
2139 mr r6, r11
2140 li r7, 0 /* instruction fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002141 bl kvmppc_hpte_hv_fault
Paul Mackerras342d3db2011-12-12 12:38:05 +00002142 ld r9, HSTATE_KVM_VCPU(r13)
2143 ld r10, VCPU_PC(r9)
2144 ld r11, VCPU_MSR(r9)
2145 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2146 cmpdi r3, 0 /* retry the instruction */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002147 beq fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00002148 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002149 beq guest_exit_cont
Paul Mackerras342d3db2011-12-12 12:38:05 +00002150
Paul Mackerrascf29b212015-10-27 16:10:20 +11002151 /* Synthesize an ISI (or ISegI) for the guest */
Paul Mackerras342d3db2011-12-12 12:38:05 +00002152 mr r11, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110021531: li r0, BOOK3S_INTERRUPT_INST_STORAGE
21547: mtspr SPRN_SRR0, r10
Paul Mackerras342d3db2011-12-12 12:38:05 +00002155 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11002156 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11002157 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002158 b fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00002159
21603: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2161 ld r5, KVM_VRMA_SLB_V(r6)
2162 b 4b
2163
2164/*
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002165 * Try to handle an hcall in real mode.
2166 * Returns to the guest if we handle it, or continues on up to
2167 * the kernel if we can't (i.e. if we don't have a handler for
2168 * it, or if the handler returns H_TOO_HARD).
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002169 *
2170 * r5 - r8 contain hcall args,
2171 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002172 */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002173hcall_try_real_mode:
Michael Neulingc75df6f2012-06-25 13:33:10 +00002174 ld r3,VCPU_GPR(R3)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002175 andi. r0,r11,MSR_PR
Liu Ping Fan27025a62013-11-19 14:12:48 +08002176 /* sc 1 from userspace - reflect to guest syscall */
2177 bne sc_1_fast_return
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002178 clrrdi r3,r3,2
2179 cmpldi r3,hcall_real_table_end - hcall_real_table
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002180 bge guest_exit_cont
Paul Mackerras699a0ea2014-06-02 11:02:59 +10002181 /* See if this hcall is enabled for in-kernel handling */
2182 ld r4, VCPU_KVM(r9)
2183 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2184 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2185 add r4, r4, r0
2186 ld r0, KVM_ENABLED_HCALLS(r4)
2187 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2188 srd r0, r0, r4
2189 andi. r0, r0, 1
2190 beq guest_exit_cont
2191 /* Get pointer to handler, if any, and call it */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002192 LOAD_REG_ADDR(r4, hcall_real_table)
Paul Mackerras4baa1d82013-07-08 20:09:53 +10002193 lwax r3,r3,r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002194 cmpwi r3,0
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002195 beq guest_exit_cont
Anton Blanchard05a308c2014-06-12 18:16:10 +10002196 add r12,r3,r4
2197 mtctr r12
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002198 mr r3,r9 /* get vcpu pointer */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002199 ld r4,VCPU_GPR(R4)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002200 bctrl
2201 cmpdi r3,H_TOO_HARD
2202 beq hcall_real_fallback
2203 ld r4,HSTATE_KVM_VCPU(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00002204 std r3,VCPU_GPR(R3)(r4)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002205 ld r10,VCPU_PC(r4)
2206 ld r11,VCPU_MSR(r4)
2207 b fast_guest_return
2208
Liu Ping Fan27025a62013-11-19 14:12:48 +08002209sc_1_fast_return:
2210 mtspr SPRN_SRR0,r10
2211 mtspr SPRN_SRR1,r11
2212 li r10, BOOK3S_INTERRUPT_SYSCALL
Michael Neulinge4e38122014-03-25 10:47:02 +11002213 bl kvmppc_msr_interrupt
Liu Ping Fan27025a62013-11-19 14:12:48 +08002214 mr r4,r9
2215 b fast_guest_return
2216
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002217 /* We've attempted a real mode hcall, but it's punted it back
2218 * to userspace. We need to restore some clobbered volatiles
2219 * before resuming the pass-it-to-qemu path */
2220hcall_real_fallback:
2221 li r12,BOOK3S_INTERRUPT_SYSCALL
2222 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002223
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002224 b guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002225
2226 .globl hcall_real_table
2227hcall_real_table:
2228 .long 0 /* 0 - unused */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002229 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2230 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2231 .long DOTSYM(kvmppc_h_read) - hcall_real_table
Paul Mackerrascdeee512015-06-24 21:18:07 +10002232 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2233 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002234 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2235 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11002236 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002237 .long 0 /* 0x24 - H_SET_SPRG0 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002238 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002239 .long 0 /* 0x2c */
2240 .long 0 /* 0x30 */
2241 .long 0 /* 0x34 */
2242 .long 0 /* 0x38 */
2243 .long 0 /* 0x3c */
2244 .long 0 /* 0x40 */
2245 .long 0 /* 0x44 */
2246 .long 0 /* 0x48 */
2247 .long 0 /* 0x4c */
2248 .long 0 /* 0x50 */
2249 .long 0 /* 0x54 */
2250 .long 0 /* 0x58 */
2251 .long 0 /* 0x5c */
2252 .long 0 /* 0x60 */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00002253#ifdef CONFIG_KVM_XICS
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002254 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2255 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2256 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10002257 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002258 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00002259#else
2260 .long 0 /* 0x64 - H_EOI */
2261 .long 0 /* 0x68 - H_CPPR */
2262 .long 0 /* 0x6c - H_IPI */
2263 .long 0 /* 0x70 - H_IPOLL */
2264 .long 0 /* 0x74 - H_XIRR */
2265#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002266 .long 0 /* 0x78 */
2267 .long 0 /* 0x7c */
2268 .long 0 /* 0x80 */
2269 .long 0 /* 0x84 */
2270 .long 0 /* 0x88 */
2271 .long 0 /* 0x8c */
2272 .long 0 /* 0x90 */
2273 .long 0 /* 0x94 */
2274 .long 0 /* 0x98 */
2275 .long 0 /* 0x9c */
2276 .long 0 /* 0xa0 */
2277 .long 0 /* 0xa4 */
2278 .long 0 /* 0xa8 */
2279 .long 0 /* 0xac */
2280 .long 0 /* 0xb0 */
2281 .long 0 /* 0xb4 */
2282 .long 0 /* 0xb8 */
2283 .long 0 /* 0xbc */
2284 .long 0 /* 0xc0 */
2285 .long 0 /* 0xc4 */
2286 .long 0 /* 0xc8 */
2287 .long 0 /* 0xcc */
2288 .long 0 /* 0xd0 */
2289 .long 0 /* 0xd4 */
2290 .long 0 /* 0xd8 */
2291 .long 0 /* 0xdc */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002292 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
Sam Bobroff90fd09f2014-12-03 13:30:40 +11002293 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002294 .long 0 /* 0xe8 */
2295 .long 0 /* 0xec */
2296 .long 0 /* 0xf0 */
2297 .long 0 /* 0xf4 */
2298 .long 0 /* 0xf8 */
2299 .long 0 /* 0xfc */
2300 .long 0 /* 0x100 */
2301 .long 0 /* 0x104 */
2302 .long 0 /* 0x108 */
2303 .long 0 /* 0x10c */
2304 .long 0 /* 0x110 */
2305 .long 0 /* 0x114 */
2306 .long 0 /* 0x118 */
2307 .long 0 /* 0x11c */
2308 .long 0 /* 0x120 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002309 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
Paul Mackerras8563bf52014-01-08 21:25:29 +11002310 .long 0 /* 0x128 */
2311 .long 0 /* 0x12c */
2312 .long 0 /* 0x130 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002313 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11002314 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +11002315 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
Michael Ellermane928e9c2015-03-20 20:39:41 +11002316 .long 0 /* 0x140 */
2317 .long 0 /* 0x144 */
2318 .long 0 /* 0x148 */
2319 .long 0 /* 0x14c */
2320 .long 0 /* 0x150 */
2321 .long 0 /* 0x154 */
2322 .long 0 /* 0x158 */
2323 .long 0 /* 0x15c */
2324 .long 0 /* 0x160 */
2325 .long 0 /* 0x164 */
2326 .long 0 /* 0x168 */
2327 .long 0 /* 0x16c */
2328 .long 0 /* 0x170 */
2329 .long 0 /* 0x174 */
2330 .long 0 /* 0x178 */
2331 .long 0 /* 0x17c */
2332 .long 0 /* 0x180 */
2333 .long 0 /* 0x184 */
2334 .long 0 /* 0x188 */
2335 .long 0 /* 0x18c */
2336 .long 0 /* 0x190 */
2337 .long 0 /* 0x194 */
2338 .long 0 /* 0x198 */
2339 .long 0 /* 0x19c */
2340 .long 0 /* 0x1a0 */
2341 .long 0 /* 0x1a4 */
2342 .long 0 /* 0x1a8 */
2343 .long 0 /* 0x1ac */
2344 .long 0 /* 0x1b0 */
2345 .long 0 /* 0x1b4 */
2346 .long 0 /* 0x1b8 */
2347 .long 0 /* 0x1bc */
2348 .long 0 /* 0x1c0 */
2349 .long 0 /* 0x1c4 */
2350 .long 0 /* 0x1c8 */
2351 .long 0 /* 0x1cc */
2352 .long 0 /* 0x1d0 */
2353 .long 0 /* 0x1d4 */
2354 .long 0 /* 0x1d8 */
2355 .long 0 /* 0x1dc */
2356 .long 0 /* 0x1e0 */
2357 .long 0 /* 0x1e4 */
2358 .long 0 /* 0x1e8 */
2359 .long 0 /* 0x1ec */
2360 .long 0 /* 0x1f0 */
2361 .long 0 /* 0x1f4 */
2362 .long 0 /* 0x1f8 */
2363 .long 0 /* 0x1fc */
2364 .long 0 /* 0x200 */
2365 .long 0 /* 0x204 */
2366 .long 0 /* 0x208 */
2367 .long 0 /* 0x20c */
2368 .long 0 /* 0x210 */
2369 .long 0 /* 0x214 */
2370 .long 0 /* 0x218 */
2371 .long 0 /* 0x21c */
2372 .long 0 /* 0x220 */
2373 .long 0 /* 0x224 */
2374 .long 0 /* 0x228 */
2375 .long 0 /* 0x22c */
2376 .long 0 /* 0x230 */
2377 .long 0 /* 0x234 */
2378 .long 0 /* 0x238 */
2379 .long 0 /* 0x23c */
2380 .long 0 /* 0x240 */
2381 .long 0 /* 0x244 */
2382 .long 0 /* 0x248 */
2383 .long 0 /* 0x24c */
2384 .long 0 /* 0x250 */
2385 .long 0 /* 0x254 */
2386 .long 0 /* 0x258 */
2387 .long 0 /* 0x25c */
2388 .long 0 /* 0x260 */
2389 .long 0 /* 0x264 */
2390 .long 0 /* 0x268 */
2391 .long 0 /* 0x26c */
2392 .long 0 /* 0x270 */
2393 .long 0 /* 0x274 */
2394 .long 0 /* 0x278 */
2395 .long 0 /* 0x27c */
2396 .long 0 /* 0x280 */
2397 .long 0 /* 0x284 */
2398 .long 0 /* 0x288 */
2399 .long 0 /* 0x28c */
2400 .long 0 /* 0x290 */
2401 .long 0 /* 0x294 */
2402 .long 0 /* 0x298 */
2403 .long 0 /* 0x29c */
2404 .long 0 /* 0x2a0 */
2405 .long 0 /* 0x2a4 */
2406 .long 0 /* 0x2a8 */
2407 .long 0 /* 0x2ac */
2408 .long 0 /* 0x2b0 */
2409 .long 0 /* 0x2b4 */
2410 .long 0 /* 0x2b8 */
2411 .long 0 /* 0x2bc */
2412 .long 0 /* 0x2c0 */
2413 .long 0 /* 0x2c4 */
2414 .long 0 /* 0x2c8 */
2415 .long 0 /* 0x2cc */
2416 .long 0 /* 0x2d0 */
2417 .long 0 /* 0x2d4 */
2418 .long 0 /* 0x2d8 */
2419 .long 0 /* 0x2dc */
2420 .long 0 /* 0x2e0 */
2421 .long 0 /* 0x2e4 */
2422 .long 0 /* 0x2e8 */
2423 .long 0 /* 0x2ec */
2424 .long 0 /* 0x2f0 */
2425 .long 0 /* 0x2f4 */
2426 .long 0 /* 0x2f8 */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10002427#ifdef CONFIG_KVM_XICS
2428 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2429#else
2430 .long 0 /* 0x2fc - H_XIRR_X*/
2431#endif
Michael Ellermane928e9c2015-03-20 20:39:41 +11002432 .long DOTSYM(kvmppc_h_random) - hcall_real_table
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002433 .globl hcall_real_table_end
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002434hcall_real_table_end:
2435
Paul Mackerras8563bf52014-01-08 21:25:29 +11002436_GLOBAL(kvmppc_h_set_xdabr)
2437 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2438 beq 6f
2439 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2440 andc. r0, r5, r0
2441 beq 3f
24426: li r3, H_PARAMETER
2443 blr
2444
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002445_GLOBAL(kvmppc_h_set_dabr)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002446 li r5, DABRX_USER | DABRX_KERNEL
24473:
Michael Neulingeee7ff92014-01-08 21:25:19 +11002448BEGIN_FTR_SECTION
2449 b 2f
2450END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002451 std r4,VCPU_DABR(r3)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002452 stw r5, VCPU_DABRX(r3)
2453 mtspr SPRN_DABRX, r5
Paul Mackerras89436332012-03-02 01:38:23 +00002454 /* Work around P7 bug where DABR can get corrupted on mtspr */
24551: mtspr SPRN_DABR,r4
2456 mfspr r5, SPRN_DABR
2457 cmpd r4, r5
2458 bne 1b
2459 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002460 li r3,0
2461 blr
2462
Paul Mackerras8563bf52014-01-08 21:25:29 +11002463 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
24642: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
Thomas Huth760a7362015-11-20 09:11:45 +01002465 rlwimi r5, r4, 2, DAWRX_WT
Paul Mackerras8563bf52014-01-08 21:25:29 +11002466 clrrdi r4, r4, 3
2467 std r4, VCPU_DAWR(r3)
2468 std r5, VCPU_DAWRX(r3)
2469 mtspr SPRN_DAWR, r4
2470 mtspr SPRN_DAWRX, r5
2471 li r3, 0
Paul Mackerrasde56a942011-06-29 00:21:34 +00002472 blr
2473
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002474_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002475 ori r11,r11,MSR_EE
2476 std r11,VCPU_MSR(r3)
2477 li r0,1
2478 stb r0,VCPU_CEDED(r3)
2479 sync /* order setting ceded vs. testing prodded */
2480 lbz r5,VCPU_PRODDED(r3)
2481 cmpwi r5,0
Paul Mackerras04f995a2012-08-06 00:03:28 +00002482 bne kvm_cede_prodded
Paul Mackerras6af27c82015-03-28 14:21:10 +11002483 li r12,0 /* set trap to 0 to say hcall is handled */
2484 stw r12,VCPU_TRAP(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002485 li r0,H_SUCCESS
Michael Neulingc75df6f2012-06-25 13:33:10 +00002486 std r0,VCPU_GPR(R3)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002487
2488 /*
2489 * Set our bit in the bitmask of napping threads unless all the
2490 * other threads are already napping, in which case we send this
2491 * up to the host.
2492 */
2493 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002494 lbz r6,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002495 lwz r8,VCORE_ENTRY_EXIT(r5)
2496 clrldi r8,r8,56
2497 li r0,1
2498 sld r0,r0,r6
2499 addi r6,r5,VCORE_NAPPING_THREADS
250031: lwarx r4,0,r6
2501 or r4,r4,r0
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002502 cmpw r4,r8
2503 beq kvm_cede_exit
Paul Mackerras19ccb762011-07-23 17:42:46 +10002504 stwcx. r4,0,r6
2505 bne 31b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002506 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11002507 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002508 li r0,NAPPING_CEDE
Paul Mackerras19ccb762011-07-23 17:42:46 +10002509 stb r0,HSTATE_NAPPING(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002510 lwz r7,VCORE_ENTRY_EXIT(r5)
2511 cmpwi r7,0x100
2512 bge 33f /* another thread already exiting */
2513
2514/*
2515 * Although not specifically required by the architecture, POWER7
2516 * preserves the following registers in nap mode, even if an SMT mode
2517 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2518 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2519 */
2520 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002521 std r14, VCPU_GPR(R14)(r3)
2522 std r15, VCPU_GPR(R15)(r3)
2523 std r16, VCPU_GPR(R16)(r3)
2524 std r17, VCPU_GPR(R17)(r3)
2525 std r18, VCPU_GPR(R18)(r3)
2526 std r19, VCPU_GPR(R19)(r3)
2527 std r20, VCPU_GPR(R20)(r3)
2528 std r21, VCPU_GPR(R21)(r3)
2529 std r22, VCPU_GPR(R22)(r3)
2530 std r23, VCPU_GPR(R23)(r3)
2531 std r24, VCPU_GPR(R24)(r3)
2532 std r25, VCPU_GPR(R25)(r3)
2533 std r26, VCPU_GPR(R26)(r3)
2534 std r27, VCPU_GPR(R27)(r3)
2535 std r28, VCPU_GPR(R28)(r3)
2536 std r29, VCPU_GPR(R29)(r3)
2537 std r30, VCPU_GPR(R30)(r3)
2538 std r31, VCPU_GPR(R31)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002539
2540 /* save FP state */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002541 bl kvmppc_save_fp
Paul Mackerras19ccb762011-07-23 17:42:46 +10002542
Paul Mackerras93d17392016-06-22 15:52:55 +10002543#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2544BEGIN_FTR_SECTION
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10002545 /*
2546 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2547 */
Paul Mackerras93d17392016-06-22 15:52:55 +10002548 ld r9, HSTATE_KVM_VCPU(r13)
2549 bl kvmppc_save_tm
2550END_FTR_SECTION_IFSET(CPU_FTR_TM)
2551#endif
2552
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002553 /*
2554 * Set DEC to the smaller of DEC and HDEC, so that we wake
2555 * no later than the end of our timeslice (HDEC interrupts
2556 * don't wake us from nap).
2557 */
2558 mfspr r3, SPRN_DEC
2559 mfspr r4, SPRN_HDEC
2560 mftb r5
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10002561BEGIN_FTR_SECTION
2562 /* On P9 check whether the guest has large decrementer mode enabled */
2563 ld r6, HSTATE_KVM_VCORE(r13)
2564 ld r6, VCORE_LPCR(r6)
2565 andis. r6, r6, LPCR_LD@h
2566 bne 68f
2567END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras2f272462017-05-22 16:25:14 +10002568 extsw r3, r3
Paul Mackerras1bc3fe82017-05-22 16:55:16 +1000256968: EXTEND_HDEC(r4)
Paul Mackerras2f272462017-05-22 16:25:14 +10002570 cmpd r3, r4
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002571 ble 67f
2572 mtspr SPRN_DEC, r4
257367:
2574 /* save expiry time of guest decrementer */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002575 add r3, r3, r5
2576 ld r4, HSTATE_KVM_VCPU(r13)
2577 ld r5, HSTATE_KVM_VCORE(r13)
2578 ld r6, VCORE_TB_OFFSET(r5)
2579 subf r3, r6, r3 /* convert to host TB value */
2580 std r3, VCPU_DEC_EXPIRES(r4)
2581
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002582#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2583 ld r4, HSTATE_KVM_VCPU(r13)
2584 addi r3, r4, VCPU_TB_CEDE
2585 bl kvmhv_accumulate_time
2586#endif
2587
Paul Mackerrasccc07772015-03-28 14:21:07 +11002588 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2589
Paul Mackerras19ccb762011-07-23 17:42:46 +10002590 /*
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002591 * Take a nap until a decrementer or external or doobell interrupt
Paul Mackerrasccc07772015-03-28 14:21:07 +11002592 * occurs, with PECE1 and PECE0 set in LPCR.
Paul Mackerras66feed62015-03-28 14:21:12 +11002593 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
Paul Mackerrasccc07772015-03-28 14:21:07 +11002594 * Also clear the runlatch bit before napping.
Paul Mackerras19ccb762011-07-23 17:42:46 +10002595 */
Paul Mackerras56548fc2014-12-03 14:48:40 +11002596kvm_do_nap:
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002597 mfspr r0, SPRN_CTRLF
2598 clrrdi r0, r0, 1
2599 mtspr SPRN_CTRLT, r0
Preeti U Murthy582b9102014-04-11 16:02:08 +05302600
Paul Mackerrasf0888f72012-02-03 00:54:17 +00002601 li r0,1
2602 stb r0,HSTATE_HWTHREAD_REQ(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002603 mfspr r5,SPRN_LPCR
2604 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002605BEGIN_FTR_SECTION
Paul Mackerras66feed62015-03-28 14:21:12 +11002606 ori r5, r5, LPCR_PECEDH
Paul Mackerrasccc07772015-03-28 14:21:07 +11002607 rlwimi r5, r3, 0, LPCR_PECEDP
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002608END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002609
2610kvm_nap_sequence: /* desired LPCR value in r5 */
2611BEGIN_FTR_SECTION
2612 /*
2613 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2614 * enable state loss = 1 (allow SMT mode switch)
2615 * requested level = 0 (just stop dispatching)
2616 */
2617 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2618 mtspr SPRN_PSSCR, r3
2619 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2620 li r4, LPCR_PECE_HVEE@higher
2621 sldi r4, r4, 32
2622 or r5, r5, r4
2623END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002624 mtspr SPRN_LPCR,r5
2625 isync
2626 li r0, 0
2627 std r0, HSTATE_SCRATCH0(r13)
2628 ptesync
2629 ld r0, HSTATE_SCRATCH0(r13)
26301: cmpd r0, r0
2631 bne 1b
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002632BEGIN_FTR_SECTION
Paul Mackerras19ccb762011-07-23 17:42:46 +10002633 nap
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002634FTR_SECTION_ELSE
2635 PPC_STOP
2636ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002637 b .
2638
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100263933: mr r4, r3
2640 li r3, 0
2641 li r12, 0
2642 b 34f
2643
Paul Mackerras19ccb762011-07-23 17:42:46 +10002644kvm_end_cede:
Paul Mackerras4619ac82013-04-17 20:31:41 +00002645 /* get vcpu pointer */
2646 ld r4, HSTATE_KVM_VCPU(r13)
2647
Paul Mackerras19ccb762011-07-23 17:42:46 +10002648 /* Woken by external or decrementer interrupt */
2649 ld r1, HSTATE_HOST_R1(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002650
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002651#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2652 addi r3, r4, VCPU_TB_RMINTR
2653 bl kvmhv_accumulate_time
2654#endif
2655
Paul Mackerras93d17392016-06-22 15:52:55 +10002656#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2657BEGIN_FTR_SECTION
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10002658 /*
2659 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2660 */
Paul Mackerras93d17392016-06-22 15:52:55 +10002661 bl kvmppc_restore_tm
2662END_FTR_SECTION_IFSET(CPU_FTR_TM)
2663#endif
2664
Paul Mackerras19ccb762011-07-23 17:42:46 +10002665 /* load up FP state */
2666 bl kvmppc_load_fp
2667
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002668 /* Restore guest decrementer */
2669 ld r3, VCPU_DEC_EXPIRES(r4)
2670 ld r5, HSTATE_KVM_VCORE(r13)
2671 ld r6, VCORE_TB_OFFSET(r5)
2672 add r3, r3, r6 /* convert host TB to guest TB value */
2673 mftb r7
2674 subf r3, r7, r3
2675 mtspr SPRN_DEC, r3
2676
Paul Mackerras19ccb762011-07-23 17:42:46 +10002677 /* Load NV GPRS */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002678 ld r14, VCPU_GPR(R14)(r4)
2679 ld r15, VCPU_GPR(R15)(r4)
2680 ld r16, VCPU_GPR(R16)(r4)
2681 ld r17, VCPU_GPR(R17)(r4)
2682 ld r18, VCPU_GPR(R18)(r4)
2683 ld r19, VCPU_GPR(R19)(r4)
2684 ld r20, VCPU_GPR(R20)(r4)
2685 ld r21, VCPU_GPR(R21)(r4)
2686 ld r22, VCPU_GPR(R22)(r4)
2687 ld r23, VCPU_GPR(R23)(r4)
2688 ld r24, VCPU_GPR(R24)(r4)
2689 ld r25, VCPU_GPR(R25)(r4)
2690 ld r26, VCPU_GPR(R26)(r4)
2691 ld r27, VCPU_GPR(R27)(r4)
2692 ld r28, VCPU_GPR(R28)(r4)
2693 ld r29, VCPU_GPR(R29)(r4)
2694 ld r30, VCPU_GPR(R30)(r4)
2695 ld r31, VCPU_GPR(R31)(r4)
Suresh Warrier37f55d32016-08-19 15:35:46 +10002696
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002697 /* Check the wake reason in SRR1 to see why we got here */
2698 bl kvmppc_check_wake_reason
Paul Mackerras19ccb762011-07-23 17:42:46 +10002699
Suresh Warrier37f55d32016-08-19 15:35:46 +10002700 /*
2701 * Restore volatile registers since we could have called a
2702 * C routine in kvmppc_check_wake_reason
2703 * r4 = VCPU
2704 * r3 tells us whether we need to return to host or not
2705 * WARNING: it gets checked further down:
2706 * should not modify r3 until this check is done.
2707 */
2708 ld r4, HSTATE_KVM_VCPU(r13)
2709
Paul Mackerras19ccb762011-07-23 17:42:46 +10002710 /* clear our bit in vcore->napping_threads */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100271134: ld r5,HSTATE_KVM_VCORE(r13)
2712 lbz r7,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002713 li r0,1
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002714 sld r0,r0,r7
Paul Mackerras19ccb762011-07-23 17:42:46 +10002715 addi r6,r5,VCORE_NAPPING_THREADS
271632: lwarx r7,0,r6
2717 andc r7,r7,r0
2718 stwcx. r7,0,r6
2719 bne 32b
2720 li r0,0
2721 stb r0,HSTATE_NAPPING(r13)
2722
Suresh Warrier37f55d32016-08-19 15:35:46 +10002723 /* See if the wake reason saved in r3 means we need to exit */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002724 stw r12, VCPU_TRAP(r4)
Paul Mackerras4619ac82013-04-17 20:31:41 +00002725 mr r9, r4
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002726 cmpdi r3, 0
2727 bgt guest_exit_cont
Paul Mackerras4619ac82013-04-17 20:31:41 +00002728
Paul Mackerras19ccb762011-07-23 17:42:46 +10002729 /* see if any other thread is already exiting */
2730 lwz r0,VCORE_ENTRY_EXIT(r5)
2731 cmpwi r0,0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002732 bge guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002733
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002734 b kvmppc_cede_reentry /* if not go back to guest */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002735
2736 /* cede when already previously prodded case */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002737kvm_cede_prodded:
2738 li r0,0
Paul Mackerras19ccb762011-07-23 17:42:46 +10002739 stb r0,VCPU_PRODDED(r3)
2740 sync /* order testing prodded vs. clearing ceded */
2741 stb r0,VCPU_CEDED(r3)
2742 li r3,H_SUCCESS
2743 blr
2744
2745 /* we've ceded but we want to give control to the host */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002746kvm_cede_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +11002747 ld r9, HSTATE_KVM_VCPU(r13)
2748 b guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002749
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002750 /* Try to handle a machine check in real mode */
2751machine_check_realmode:
2752 mr r3, r9 /* get vcpu pointer */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002753 bl kvmppc_realmode_machine_check
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002754 nop
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002755 ld r9, HSTATE_KVM_VCPU(r13)
2756 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302757 /*
Aravinda Prasade20bbd32017-05-11 16:33:37 +05302758 * For the guest that is FWNMI capable, deliver all the MCE errors
2759 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
2760 * reason. This new approach injects machine check errors in guest
2761 * address space to guest with additional information in the form
2762 * of RTAS event, thus enabling guest kernel to suitably handle
2763 * such errors.
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302764 *
Aravinda Prasade20bbd32017-05-11 16:33:37 +05302765 * For the guest that is not FWNMI capable (old QEMU) fallback
2766 * to old behaviour for backward compatibility:
2767 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
2768 * through machine check interrupt (set HSRR0 to 0x200).
2769 * For handled errors (no-fatal), just go back to guest execution
2770 * with current HSRR0.
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302771 * if we receive machine check with MSR(RI=0) then deliver it to
2772 * guest as machine check causing guest to crash.
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302773 */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302774 ld r11, VCPU_MSR(r9)
Paul Mackerras1c9e3d52015-11-12 16:43:48 +11002775 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2776 bne mc_cont /* if so, exit to host */
Aravinda Prasade20bbd32017-05-11 16:33:37 +05302777 /* Check if guest is capable of handling NMI exit */
2778 ld r10, VCPU_KVM(r9)
2779 lbz r10, KVM_FWNMI(r10)
2780 cmpdi r10, 1 /* FWNMI capable? */
2781 beq mc_cont /* if so, exit with KVM_EXIT_NMI. */
2782
2783 /* if not, fall through for backward compatibility. */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302784 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2785 beq 1f /* Deliver a machine check to guest */
2786 ld r10, VCPU_PC(r9)
2787 cmpdi r3, 0 /* Did we handle MCE ? */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302788 bne 2f /* Continue guest execution. */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002789 /* If not, deliver a machine check. SRR0/1 are already set */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +053027901: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
Michael Neulinge4e38122014-03-25 10:47:02 +11002791 bl kvmppc_msr_interrupt
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +053027922: b fast_interrupt_c_return
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002793
Paul Mackerrasde56a942011-06-29 00:21:34 +00002794/*
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002795 * Check the reason we woke from nap, and take appropriate action.
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002796 * Returns (in r3):
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002797 * 0 if nothing needs to be done
2798 * 1 if something happened that needs to be handled by the host
Paul Mackerras66feed62015-03-28 14:21:12 +11002799 * -1 if there was a guest wakeup (IPI or msgsnd)
Suresh Warriere3c13e52016-08-19 15:35:51 +10002800 * -2 if we handled a PCI passthrough interrupt (returned by
2801 * kvmppc_read_intr only)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002802 *
2803 * Also sets r12 to the interrupt vector for any interrupt that needs
2804 * to be handled now by the host (0x500 for external interrupt), or zero.
Suresh Warrier37f55d32016-08-19 15:35:46 +10002805 * Modifies all volatile registers (since it may call a C function).
2806 * This routine calls kvmppc_read_intr, a C function, if an external
2807 * interrupt is pending.
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002808 */
2809kvmppc_check_wake_reason:
2810 mfspr r6, SPRN_SRR1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002811BEGIN_FTR_SECTION
2812 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2813FTR_SECTION_ELSE
2814 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2815ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2816 cmpwi r6, 8 /* was it an external interrupt? */
Suresh Warrier37f55d32016-08-19 15:35:46 +10002817 beq 7f /* if so, see what it was */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002818 li r3, 0
2819 li r12, 0
2820 cmpwi r6, 6 /* was it the decrementer? */
2821 beq 0f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002822BEGIN_FTR_SECTION
2823 cmpwi r6, 5 /* privileged doorbell? */
2824 beq 0f
Paul Mackerras5d00f662014-01-08 21:25:28 +11002825 cmpwi r6, 3 /* hypervisor doorbell? */
2826 beq 3f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002827END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302828 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2829 beq 4f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002830 li r3, 1 /* anything else, return 1 */
28310: blr
2832
Paul Mackerras5d00f662014-01-08 21:25:28 +11002833 /* hypervisor doorbell */
28343: li r12, BOOK3S_INTERRUPT_H_DOORBELL
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05302835
2836 /*
2837 * Clear the doorbell as we will invoke the handler
2838 * explicitly in the guest exit path.
2839 */
2840 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2841 PPC_MSGCLR(6)
Paul Mackerras66feed62015-03-28 14:21:12 +11002842 /* see if it's a host IPI */
Paul Mackerras5d00f662014-01-08 21:25:28 +11002843 li r3, 1
Nicholas Piggin2cde3712017-10-10 20:18:28 +10002844BEGIN_FTR_SECTION
2845 PPC_MSGSYNC
2846 lwsync
2847END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras66feed62015-03-28 14:21:12 +11002848 lbz r0, HSTATE_HOST_IPI(r13)
2849 cmpwi r0, 0
2850 bnelr
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05302851 /* if not, return -1 */
Paul Mackerras66feed62015-03-28 14:21:12 +11002852 li r3, -1
Paul Mackerras5d00f662014-01-08 21:25:28 +11002853 blr
2854
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302855 /* Woken up due to Hypervisor maintenance interrupt */
28564: li r12, BOOK3S_INTERRUPT_HMI
2857 li r3, 1
2858 blr
2859
Suresh Warrier37f55d32016-08-19 15:35:46 +10002860 /* external interrupt - create a stack frame so we can call C */
28617: mflr r0
2862 std r0, PPC_LR_STKOFF(r1)
2863 stdu r1, -PPC_MIN_STKFRM(r1)
2864 bl kvmppc_read_intr
2865 nop
2866 li r12, BOOK3S_INTERRUPT_EXTERNAL
Suresh Warrierf7af5202016-08-19 15:35:52 +10002867 cmpdi r3, 1
2868 ble 1f
2869
2870 /*
2871 * Return code of 2 means PCI passthrough interrupt, but
2872 * we need to return back to host to complete handling the
2873 * interrupt. Trap reason is expected in r12 by guest
2874 * exit code.
2875 */
2876 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
28771:
Suresh Warrier37f55d32016-08-19 15:35:46 +10002878 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
2879 addi r1, r1, PPC_MIN_STKFRM
2880 mtlr r0
2881 blr
Paul Mackerrasde56a942011-06-29 00:21:34 +00002882
2883/*
2884 * Save away FP, VMX and VSX registers.
2885 * r3 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002886 * N.B. r30 and r31 are volatile across this function,
2887 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002888 */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002889kvmppc_save_fp:
2890 mflr r30
2891 mr r31,r3
Paul Mackerras89436332012-03-02 01:38:23 +00002892 mfmsr r5
2893 ori r8,r5,MSR_FP
Paul Mackerrasde56a942011-06-29 00:21:34 +00002894#ifdef CONFIG_ALTIVEC
2895BEGIN_FTR_SECTION
2896 oris r8,r8,MSR_VEC@h
2897END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2898#endif
2899#ifdef CONFIG_VSX
2900BEGIN_FTR_SECTION
2901 oris r8,r8,MSR_VSX@h
2902END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2903#endif
2904 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002905 addi r3,r3,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002906 bl store_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002907#ifdef CONFIG_ALTIVEC
2908BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002909 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002910 bl store_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002911END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2912#endif
2913 mfspr r6,SPRN_VRSAVE
Paul Mackerrase724f082014-03-13 20:02:48 +11002914 stw r6,VCPU_VRSAVE(r31)
Paul Mackerras595e4f72013-10-15 20:43:04 +11002915 mtlr r30
Paul Mackerrasde56a942011-06-29 00:21:34 +00002916 blr
2917
2918/*
2919 * Load up FP, VMX and VSX registers
2920 * r4 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002921 * N.B. r30 and r31 are volatile across this function,
2922 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002923 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002924kvmppc_load_fp:
Paul Mackerras595e4f72013-10-15 20:43:04 +11002925 mflr r30
2926 mr r31,r4
Paul Mackerrasde56a942011-06-29 00:21:34 +00002927 mfmsr r9
2928 ori r8,r9,MSR_FP
2929#ifdef CONFIG_ALTIVEC
2930BEGIN_FTR_SECTION
2931 oris r8,r8,MSR_VEC@h
2932END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2933#endif
2934#ifdef CONFIG_VSX
2935BEGIN_FTR_SECTION
2936 oris r8,r8,MSR_VSX@h
2937END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2938#endif
2939 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002940 addi r3,r4,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002941 bl load_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002942#ifdef CONFIG_ALTIVEC
2943BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002944 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002945 bl load_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002946END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2947#endif
Paul Mackerrase724f082014-03-13 20:02:48 +11002948 lwz r7,VCPU_VRSAVE(r31)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002949 mtspr SPRN_VRSAVE,r7
Paul Mackerras595e4f72013-10-15 20:43:04 +11002950 mtlr r30
2951 mr r4,r31
Paul Mackerrasde56a942011-06-29 00:21:34 +00002952 blr
Paul Mackerras44a3add2013-10-04 21:45:04 +10002953
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002954#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2955/*
2956 * Save transactional state and TM-related registers.
2957 * Called with r9 pointing to the vcpu struct.
2958 * This can modify all checkpointed registers, but
2959 * restores r1, r2 and r9 (vcpu pointer) before exit.
2960 */
2961kvmppc_save_tm:
2962 mflr r0
2963 std r0, PPC_LR_STKOFF(r1)
2964
2965 /* Turn on TM. */
2966 mfmsr r8
2967 li r0, 1
2968 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
2969 mtmsrd r8
2970
2971 ld r5, VCPU_MSR(r9)
2972 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2973 beq 1f /* TM not active in guest. */
2974
2975 std r1, HSTATE_HOST_R1(r13)
2976 li r3, TM_CAUSE_KVM_RESCHED
2977
2978 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2979 li r5, 0
2980 mtmsrd r5, 1
2981
2982 /* All GPRs are volatile at this point. */
2983 TRECLAIM(R3)
2984
2985 /* Temporarily store r13 and r9 so we have some regs to play with */
2986 SET_SCRATCH0(r13)
2987 GET_PACA(r13)
2988 std r9, PACATMSCRATCH(r13)
2989 ld r9, HSTATE_KVM_VCPU(r13)
2990
2991 /* Get a few more GPRs free. */
2992 std r29, VCPU_GPRS_TM(29)(r9)
2993 std r30, VCPU_GPRS_TM(30)(r9)
2994 std r31, VCPU_GPRS_TM(31)(r9)
2995
2996 /* Save away PPR and DSCR soon so don't run with user values. */
2997 mfspr r31, SPRN_PPR
2998 HMT_MEDIUM
2999 mfspr r30, SPRN_DSCR
3000 ld r29, HSTATE_DSCR(r13)
3001 mtspr SPRN_DSCR, r29
3002
3003 /* Save all but r9, r13 & r29-r31 */
3004 reg = 0
3005 .rept 29
3006 .if (reg != 9) && (reg != 13)
3007 std reg, VCPU_GPRS_TM(reg)(r9)
3008 .endif
3009 reg = reg + 1
3010 .endr
3011 /* ... now save r13 */
3012 GET_SCRATCH0(r4)
3013 std r4, VCPU_GPRS_TM(13)(r9)
3014 /* ... and save r9 */
3015 ld r4, PACATMSCRATCH(r13)
3016 std r4, VCPU_GPRS_TM(9)(r9)
3017
3018 /* Reload stack pointer and TOC. */
3019 ld r1, HSTATE_HOST_R1(r13)
3020 ld r2, PACATOC(r13)
3021
3022 /* Set MSR RI now we have r1 and r13 back. */
3023 li r5, MSR_RI
3024 mtmsrd r5, 1
3025
3026 /* Save away checkpinted SPRs. */
3027 std r31, VCPU_PPR_TM(r9)
3028 std r30, VCPU_DSCR_TM(r9)
3029 mflr r5
3030 mfcr r6
3031 mfctr r7
3032 mfspr r8, SPRN_AMR
3033 mfspr r10, SPRN_TAR
Paul Mackerras0d808df2016-11-07 15:09:58 +11003034 mfxer r11
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003035 std r5, VCPU_LR_TM(r9)
3036 stw r6, VCPU_CR_TM(r9)
3037 std r7, VCPU_CTR_TM(r9)
3038 std r8, VCPU_AMR_TM(r9)
3039 std r10, VCPU_TAR_TM(r9)
Paul Mackerras0d808df2016-11-07 15:09:58 +11003040 std r11, VCPU_XER_TM(r9)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003041
3042 /* Restore r12 as trap number. */
3043 lwz r12, VCPU_TRAP(r9)
3044
3045 /* Save FP/VSX. */
3046 addi r3, r9, VCPU_FPRS_TM
3047 bl store_fp_state
3048 addi r3, r9, VCPU_VRS_TM
3049 bl store_vr_state
3050 mfspr r6, SPRN_VRSAVE
3051 stw r6, VCPU_VRSAVE_TM(r9)
30521:
3053 /*
3054 * We need to save these SPRs after the treclaim so that the software
3055 * error code is recorded correctly in the TEXASR. Also the user may
3056 * change these outside of a transaction, so they must always be
3057 * context switched.
3058 */
3059 mfspr r5, SPRN_TFHAR
3060 mfspr r6, SPRN_TFIAR
3061 mfspr r7, SPRN_TEXASR
3062 std r5, VCPU_TFHAR(r9)
3063 std r6, VCPU_TFIAR(r9)
3064 std r7, VCPU_TEXASR(r9)
3065
3066 ld r0, PPC_LR_STKOFF(r1)
3067 mtlr r0
3068 blr
3069
3070/*
3071 * Restore transactional state and TM-related registers.
3072 * Called with r4 pointing to the vcpu struct.
3073 * This potentially modifies all checkpointed registers.
3074 * It restores r1, r2, r4 from the PACA.
3075 */
3076kvmppc_restore_tm:
3077 mflr r0
3078 std r0, PPC_LR_STKOFF(r1)
3079
3080 /* Turn on TM/FP/VSX/VMX so we can restore them. */
3081 mfmsr r5
3082 li r6, MSR_TM >> 32
3083 sldi r6, r6, 32
3084 or r5, r5, r6
3085 ori r5, r5, MSR_FP
3086 oris r5, r5, (MSR_VEC | MSR_VSX)@h
3087 mtmsrd r5
3088
3089 /*
3090 * The user may change these outside of a transaction, so they must
3091 * always be context switched.
3092 */
3093 ld r5, VCPU_TFHAR(r4)
3094 ld r6, VCPU_TFIAR(r4)
3095 ld r7, VCPU_TEXASR(r4)
3096 mtspr SPRN_TFHAR, r5
3097 mtspr SPRN_TFIAR, r6
3098 mtspr SPRN_TEXASR, r7
3099
3100 ld r5, VCPU_MSR(r4)
3101 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
3102 beqlr /* TM not active in guest */
3103 std r1, HSTATE_HOST_R1(r13)
3104
3105 /* Make sure the failure summary is set, otherwise we'll program check
3106 * when we trechkpt. It's possible that this might have been not set
3107 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
3108 * host.
3109 */
3110 oris r7, r7, (TEXASR_FS)@h
3111 mtspr SPRN_TEXASR, r7
3112
3113 /*
3114 * We need to load up the checkpointed state for the guest.
3115 * We need to do this early as it will blow away any GPRs, VSRs and
3116 * some SPRs.
3117 */
3118
3119 mr r31, r4
3120 addi r3, r31, VCPU_FPRS_TM
3121 bl load_fp_state
3122 addi r3, r31, VCPU_VRS_TM
3123 bl load_vr_state
3124 mr r4, r31
3125 lwz r7, VCPU_VRSAVE_TM(r4)
3126 mtspr SPRN_VRSAVE, r7
3127
3128 ld r5, VCPU_LR_TM(r4)
3129 lwz r6, VCPU_CR_TM(r4)
3130 ld r7, VCPU_CTR_TM(r4)
3131 ld r8, VCPU_AMR_TM(r4)
3132 ld r9, VCPU_TAR_TM(r4)
Paul Mackerras0d808df2016-11-07 15:09:58 +11003133 ld r10, VCPU_XER_TM(r4)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003134 mtlr r5
3135 mtcr r6
3136 mtctr r7
3137 mtspr SPRN_AMR, r8
3138 mtspr SPRN_TAR, r9
Paul Mackerras0d808df2016-11-07 15:09:58 +11003139 mtxer r10
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003140
3141 /*
3142 * Load up PPR and DSCR values but don't put them in the actual SPRs
3143 * till the last moment to avoid running with userspace PPR and DSCR for
3144 * too long.
3145 */
3146 ld r29, VCPU_DSCR_TM(r4)
3147 ld r30, VCPU_PPR_TM(r4)
3148
3149 std r2, PACATMSCRATCH(r13) /* Save TOC */
3150
3151 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
3152 li r5, 0
3153 mtmsrd r5, 1
3154
3155 /* Load GPRs r0-r28 */
3156 reg = 0
3157 .rept 29
3158 ld reg, VCPU_GPRS_TM(reg)(r31)
3159 reg = reg + 1
3160 .endr
3161
3162 mtspr SPRN_DSCR, r29
3163 mtspr SPRN_PPR, r30
3164
3165 /* Load final GPRs */
3166 ld 29, VCPU_GPRS_TM(29)(r31)
3167 ld 30, VCPU_GPRS_TM(30)(r31)
3168 ld 31, VCPU_GPRS_TM(31)(r31)
3169
3170 /* TM checkpointed state is now setup. All GPRs are now volatile. */
3171 TRECHKPT
3172
3173 /* Now let's get back the state we need. */
3174 HMT_MEDIUM
3175 GET_PACA(r13)
3176 ld r29, HSTATE_DSCR(r13)
3177 mtspr SPRN_DSCR, r29
3178 ld r4, HSTATE_KVM_VCPU(r13)
3179 ld r1, HSTATE_HOST_R1(r13)
3180 ld r2, PACATMSCRATCH(r13)
3181
3182 /* Set the MSR RI since we have our registers back. */
3183 li r5, MSR_RI
3184 mtmsrd r5, 1
3185
3186 ld r0, PPC_LR_STKOFF(r1)
3187 mtlr r0
3188 blr
3189#endif
3190
Paul Mackerras44a3add2013-10-04 21:45:04 +10003191/*
3192 * We come here if we get any exception or interrupt while we are
3193 * executing host real mode code while in guest MMU context.
Paul Mackerras857b99e2017-09-01 16:17:27 +10003194 * r12 is (CR << 32) | vector
3195 * r13 points to our PACA
3196 * r12 is saved in HSTATE_SCRATCH0(r13)
3197 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE
3198 * r9 is saved in HSTATE_SCRATCH2(r13)
3199 * r13 is saved in HSPRG1
3200 * cfar is saved in HSTATE_CFAR(r13)
3201 * ppr is saved in HSTATE_PPR(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +10003202 */
3203kvmppc_bad_host_intr:
Paul Mackerras857b99e2017-09-01 16:17:27 +10003204 /*
3205 * Switch to the emergency stack, but start half-way down in
3206 * case we were already on it.
3207 */
3208 mr r9, r1
3209 std r1, PACAR1(r13)
3210 ld r1, PACAEMERGSP(r13)
3211 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
3212 std r9, 0(r1)
3213 std r0, GPR0(r1)
3214 std r9, GPR1(r1)
3215 std r2, GPR2(r1)
3216 SAVE_4GPRS(3, r1)
3217 SAVE_2GPRS(7, r1)
3218 srdi r0, r12, 32
3219 clrldi r12, r12, 32
3220 std r0, _CCR(r1)
3221 std r12, _TRAP(r1)
3222 andi. r0, r12, 2
3223 beq 1f
3224 mfspr r3, SPRN_HSRR0
3225 mfspr r4, SPRN_HSRR1
3226 mfspr r5, SPRN_HDAR
3227 mfspr r6, SPRN_HDSISR
3228 b 2f
32291: mfspr r3, SPRN_SRR0
3230 mfspr r4, SPRN_SRR1
3231 mfspr r5, SPRN_DAR
3232 mfspr r6, SPRN_DSISR
32332: std r3, _NIP(r1)
3234 std r4, _MSR(r1)
3235 std r5, _DAR(r1)
3236 std r6, _DSISR(r1)
3237 ld r9, HSTATE_SCRATCH2(r13)
3238 ld r12, HSTATE_SCRATCH0(r13)
3239 GET_SCRATCH0(r0)
3240 SAVE_4GPRS(9, r1)
3241 std r0, GPR13(r1)
3242 SAVE_NVGPRS(r1)
3243 ld r5, HSTATE_CFAR(r13)
3244 std r5, ORIG_GPR3(r1)
3245 mflr r3
3246#ifdef CONFIG_RELOCATABLE
3247 ld r4, HSTATE_SCRATCH1(r13)
3248#else
3249 mfctr r4
3250#endif
3251 mfxer r5
3252 lbz r6, PACASOFTIRQEN(r13)
3253 std r3, _LINK(r1)
3254 std r4, _CTR(r1)
3255 std r5, _XER(r1)
3256 std r6, SOFTE(r1)
3257 ld r2, PACATOC(r13)
3258 LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
3259 std r3, STACK_FRAME_OVERHEAD-16(r1)
3260
3261 /*
3262 * On POWER9 do a minimal restore of the MMU and call C code,
3263 * which will print a message and panic.
3264 * XXX On POWER7 and POWER8, we just spin here since we don't
3265 * know what the other threads are doing (and we don't want to
3266 * coordinate with them) - but at least we now have register state
3267 * in memory that we might be able to look at from another CPU.
3268 */
3269BEGIN_FTR_SECTION
Paul Mackerras44a3add2013-10-04 21:45:04 +10003270 b .
Paul Mackerras857b99e2017-09-01 16:17:27 +10003271END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
3272 ld r9, HSTATE_KVM_VCPU(r13)
3273 ld r10, VCPU_KVM(r9)
3274
3275 li r0, 0
3276 mtspr SPRN_AMR, r0
3277 mtspr SPRN_IAMR, r0
3278 mtspr SPRN_CIABR, r0
3279 mtspr SPRN_DAWRX, r0
3280
3281 /* Flush the ERAT on radix P9 DD1 guest exit */
3282BEGIN_FTR_SECTION
3283 PPC_INVALIDATE_ERAT
3284END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
3285
3286BEGIN_MMU_FTR_SECTION
3287 b 4f
3288END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3289
3290 slbmte r0, r0
3291 slbia
3292 ptesync
3293 ld r8, PACA_SLBSHADOWPTR(r13)
3294 .rept SLB_NUM_BOLTED
3295 li r3, SLBSHADOW_SAVEAREA
3296 LDX_BE r5, r8, r3
3297 addi r3, r3, 8
3298 LDX_BE r6, r8, r3
3299 andis. r7, r5, SLB_ESID_V@h
3300 beq 3f
3301 slbmte r6, r5
33023: addi r8, r8, 16
3303 .endr
3304
33054: lwz r7, KVM_HOST_LPID(r10)
3306 mtspr SPRN_LPID, r7
3307 mtspr SPRN_PID, r0
3308 ld r8, KVM_HOST_LPCR(r10)
3309 mtspr SPRN_LPCR, r8
3310 isync
3311 li r0, KVM_GUEST_MODE_NONE
3312 stb r0, HSTATE_IN_GUEST(r13)
3313
3314 /*
3315 * Turn on the MMU and jump to C code
3316 */
3317 bcl 20, 31, .+4
33185: mflr r3
3319 addi r3, r3, 9f - 5b
3320 ld r4, PACAKMSR(r13)
3321 mtspr SPRN_SRR0, r3
3322 mtspr SPRN_SRR1, r4
3323 rfid
33249: addi r3, r1, STACK_FRAME_OVERHEAD
3325 bl kvmppc_bad_interrupt
3326 b 9b
Michael Neulinge4e38122014-03-25 10:47:02 +11003327
3328/*
3329 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3330 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3331 * r11 has the guest MSR value (in/out)
3332 * r9 has a vcpu pointer (in)
3333 * r0 is used as a scratch register
3334 */
3335kvmppc_msr_interrupt:
3336 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3337 cmpwi r0, 2 /* Check if we are in transactional state.. */
3338 ld r11, VCPU_INTR_MSR(r9)
3339 bne 1f
3340 /* ... if transactional, change to suspended */
3341 li r0, 1
33421: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3343 blr
Paul Mackerras9bc01a92014-05-26 19:48:40 +10003344
3345/*
3346 * This works around a hardware bug on POWER8E processors, where
3347 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3348 * performance monitor interrupt. Instead, when we need to have
3349 * an interrupt pending, we have to arrange for a counter to overflow.
3350 */
3351kvmppc_fix_pmao:
3352 li r3, 0
3353 mtspr SPRN_MMCR2, r3
3354 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3355 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3356 mtspr SPRN_MMCR0, r3
3357 lis r3, 0x7fff
3358 ori r3, r3, 0xffff
3359 mtspr SPRN_PMC6, r3
3360 isync
3361 blr
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003362
3363#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3364/*
3365 * Start timing an activity
3366 * r3 = pointer to time accumulation struct, r4 = vcpu
3367 */
3368kvmhv_start_timing:
3369 ld r5, HSTATE_KVM_VCORE(r13)
3370 lbz r6, VCORE_IN_GUEST(r5)
3371 cmpwi r6, 0
3372 beq 5f /* if in guest, need to */
3373 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
33745: mftb r5
3375 subf r5, r6, r5
3376 std r3, VCPU_CUR_ACTIVITY(r4)
3377 std r5, VCPU_ACTIVITY_START(r4)
3378 blr
3379
3380/*
3381 * Accumulate time to one activity and start another.
3382 * r3 = pointer to new time accumulation struct, r4 = vcpu
3383 */
3384kvmhv_accumulate_time:
3385 ld r5, HSTATE_KVM_VCORE(r13)
3386 lbz r8, VCORE_IN_GUEST(r5)
3387 cmpwi r8, 0
3388 beq 4f /* if in guest, need to */
3389 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
33904: ld r5, VCPU_CUR_ACTIVITY(r4)
3391 ld r6, VCPU_ACTIVITY_START(r4)
3392 std r3, VCPU_CUR_ACTIVITY(r4)
3393 mftb r7
3394 subf r7, r8, r7
3395 std r7, VCPU_ACTIVITY_START(r4)
3396 cmpdi r5, 0
3397 beqlr
3398 subf r3, r6, r7
3399 ld r8, TAS_SEQCOUNT(r5)
3400 cmpdi r8, 0
3401 addi r8, r8, 1
3402 std r8, TAS_SEQCOUNT(r5)
3403 lwsync
3404 ld r7, TAS_TOTAL(r5)
3405 add r7, r7, r3
3406 std r7, TAS_TOTAL(r5)
3407 ld r6, TAS_MIN(r5)
3408 ld r7, TAS_MAX(r5)
3409 beq 3f
3410 cmpd r3, r6
3411 bge 1f
34123: std r3, TAS_MIN(r5)
34131: cmpd r3, r7
3414 ble 2f
3415 std r3, TAS_MAX(r5)
34162: lwsync
3417 addi r8, r8, 1
3418 std r8, TAS_SEQCOUNT(r5)
3419 blr
3420#endif