blob: 42bd2e694b1bc8b4c21b49db9b96b7aab81bc498 [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100023#include <asm/mmu.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000024#include <asm/page.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100025#include <asm/ptrace.h>
26#include <asm/hvcall.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000027#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
Paul Mackerrasf0888f72012-02-03 00:54:17 +000029#include <asm/kvm_book3s_asm.h>
Paul Mackerrasb4072df2012-11-23 22:37:50 +000030#include <asm/mmu-hash64.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110031#include <asm/tm.h>
32
33#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
Paul Mackerrasde56a942011-06-29 00:21:34 +000034
Anton Blanchard7ffcf8e2013-08-07 02:01:46 +100035#ifdef __LITTLE_ENDIAN__
36#error Need to fix lppaca and SLB shadow accesses in little endian mode
37#endif
38
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110039/* Values in HSTATE_NAPPING(r13) */
40#define NAPPING_CEDE 1
41#define NAPPING_NOVCPU 2
42
Paul Mackerrasde56a942011-06-29 00:21:34 +000043/*
Paul Mackerras19ccb762011-07-23 17:42:46 +100044 * Call kvmppc_hv_entry in real mode.
Paul Mackerrasde56a942011-06-29 00:21:34 +000045 * Must be called with interrupts hard-disabled.
46 *
47 * Input Registers:
48 *
49 * LR = return address to continue at after eventually re-enabling MMU
50 */
51_GLOBAL(kvmppc_hv_entry_trampoline)
Paul Mackerras218309b2013-09-06 13:23:44 +100052 mflr r0
53 std r0, PPC_LR_STKOFF(r1)
54 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +000055 mfmsr r10
Paul Mackerras218309b2013-09-06 13:23:44 +100056 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
Paul Mackerrasde56a942011-06-29 00:21:34 +000057 li r0,MSR_RI
58 andc r0,r10,r0
59 li r6,MSR_IR | MSR_DR
60 andc r6,r10,r6
61 mtmsrd r0,1 /* clear RI in MSR */
62 mtsrr0 r5
63 mtsrr1 r6
64 RFI
65
Paul Mackerras218309b2013-09-06 13:23:44 +100066kvmppc_call_hv_entry:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110067 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100068 bl kvmppc_hv_entry
69
70 /* Back from guest - restore host state and return to caller */
71
Michael Neulingeee7ff92014-01-08 21:25:19 +110072BEGIN_FTR_SECTION
Paul Mackerras218309b2013-09-06 13:23:44 +100073 /* Restore host DABR and DABRX */
74 ld r5,HSTATE_DABR(r13)
75 li r6,7
76 mtspr SPRN_DABR,r5
77 mtspr SPRN_DABRX,r6
Michael Neulingeee7ff92014-01-08 21:25:19 +110078END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +100079
80 /* Restore SPRG3 */
81 ld r3,PACA_SPRG3(r13)
82 mtspr SPRN_SPRG3,r3
83
Paul Mackerras218309b2013-09-06 13:23:44 +100084 /* Reload the host's PMU registers */
85 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
86 lbz r4, LPPACA_PMCINUSE(r3)
87 cmpwi r4, 0
88 beq 23f /* skip if not */
89 lwz r3, HSTATE_PMC(r13)
90 lwz r4, HSTATE_PMC + 4(r13)
91 lwz r5, HSTATE_PMC + 8(r13)
92 lwz r6, HSTATE_PMC + 12(r13)
93 lwz r8, HSTATE_PMC + 16(r13)
94 lwz r9, HSTATE_PMC + 20(r13)
95BEGIN_FTR_SECTION
96 lwz r10, HSTATE_PMC + 24(r13)
97 lwz r11, HSTATE_PMC + 28(r13)
98END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
99 mtspr SPRN_PMC1, r3
100 mtspr SPRN_PMC2, r4
101 mtspr SPRN_PMC3, r5
102 mtspr SPRN_PMC4, r6
103 mtspr SPRN_PMC5, r8
104 mtspr SPRN_PMC6, r9
105BEGIN_FTR_SECTION
106 mtspr SPRN_PMC7, r10
107 mtspr SPRN_PMC8, r11
108END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
109 ld r3, HSTATE_MMCR(r13)
110 ld r4, HSTATE_MMCR + 8(r13)
111 ld r5, HSTATE_MMCR + 16(r13)
112 mtspr SPRN_MMCR1, r4
113 mtspr SPRN_MMCRA, r5
114 mtspr SPRN_MMCR0, r3
115 isync
11623:
117
118 /*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100119 * Reload DEC. HDEC interrupts were disabled when
120 * we reloaded the host's LPCR value.
121 */
122 ld r3, HSTATE_DECEXP(r13)
123 mftb r4
124 subf r4, r4, r3
125 mtspr SPRN_DEC, r4
126
127 /*
Paul Mackerras218309b2013-09-06 13:23:44 +1000128 * For external and machine check interrupts, we need
129 * to call the Linux handler to process the interrupt.
130 * We do that by jumping to absolute address 0x500 for
131 * external interrupts, or the machine_check_fwnmi label
132 * for machine checks (since firmware might have patched
133 * the vector area at 0x200). The [h]rfid at the end of the
134 * handler will return to the book3s_hv_interrupts.S code.
135 * For other interrupts we do the rfid to get back
136 * to the book3s_hv_interrupts.S code here.
137 */
138 ld r8, 112+PPC_LR_STKOFF(r1)
139 addi r1, r1, 112
140 ld r7, HSTATE_HOST_MSR(r13)
141
142 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
143 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
144BEGIN_FTR_SECTION
145 beq 11f
146END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
147
148 /* RFI into the highmem handler, or branch to interrupt handler */
149 mfmsr r6
150 li r0, MSR_RI
151 andc r6, r6, r0
152 mtmsrd r6, 1 /* Clear RI in MSR */
153 mtsrr0 r8
154 mtsrr1 r7
155 beqa 0x500 /* external interrupt (PPC970) */
156 beq cr1, 13f /* machine check */
157 RFI
158
159 /* On POWER7, we have external interrupts set to use HSRR0/1 */
16011: mtspr SPRN_HSRR0, r8
161 mtspr SPRN_HSRR1, r7
162 ba 0x500
163
16413: b machine_check_fwnmi
165
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100166kvmppc_primary_no_guest:
167 /* We handle this much like a ceded vcpu */
168 /* set our bit in napping_threads */
169 ld r5, HSTATE_KVM_VCORE(r13)
170 lbz r7, HSTATE_PTID(r13)
171 li r0, 1
172 sld r0, r0, r7
173 addi r6, r5, VCORE_NAPPING_THREADS
1741: lwarx r3, 0, r6
175 or r3, r3, r0
176 stwcx. r3, 0, r6
177 bne 1b
178 /* order napping_threads update vs testing entry_exit_count */
179 isync
180 li r12, 0
181 lwz r7, VCORE_ENTRY_EXIT(r5)
182 cmpwi r7, 0x100
183 bge kvm_novcpu_exit /* another thread already exiting */
184 li r3, NAPPING_NOVCPU
185 stb r3, HSTATE_NAPPING(r13)
186 li r3, 1
187 stb r3, HSTATE_HWTHREAD_REQ(r13)
188
189 b kvm_do_nap
190
191kvm_novcpu_wakeup:
192 ld r1, HSTATE_HOST_R1(r13)
193 ld r5, HSTATE_KVM_VCORE(r13)
194 li r0, 0
195 stb r0, HSTATE_NAPPING(r13)
196 stb r0, HSTATE_HWTHREAD_REQ(r13)
197
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100198 /* check the wake reason */
199 bl kvmppc_check_wake_reason
200
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100201 /* see if any other thread is already exiting */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100202 lwz r0, VCORE_ENTRY_EXIT(r5)
203 cmpwi r0, 0x100
204 bge kvm_novcpu_exit
205
206 /* clear our bit in napping_threads */
207 lbz r7, HSTATE_PTID(r13)
208 li r0, 1
209 sld r0, r0, r7
210 addi r6, r5, VCORE_NAPPING_THREADS
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002114: lwarx r7, 0, r6
212 andc r7, r7, r0
213 stwcx. r7, 0, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100214 bne 4b
215
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100216 /* See if the wake reason means we need to exit */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100217 cmpdi r3, 0
218 bge kvm_novcpu_exit
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100219
220 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
221 ld r4, HSTATE_KVM_VCPU(r13)
222 cmpdi r4, 0
223 bne kvmppc_got_guest
224
225kvm_novcpu_exit:
226 b hdec_soon
227
Paul Mackerras371fefd2011-06-29 00:23:08 +0000228/*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100229 * We come in here when wakened from nap mode.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000230 * Relocation is off and most register values are lost.
231 * r13 points to the PACA.
232 */
233 .globl kvm_start_guest
234kvm_start_guest:
Paul Mackerras19ccb762011-07-23 17:42:46 +1000235 ld r2,PACATOC(r13)
236
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000237 li r0,KVM_HWTHREAD_IN_KVM
238 stb r0,HSTATE_HWTHREAD_STATE(r13)
239
240 /* NV GPR values from power7_idle() will no longer be valid */
241 li r0,1
242 stb r0,PACA_NAPSTATELOST(r13)
243
Paul Mackerras4619ac82013-04-17 20:31:41 +0000244 /* were we napping due to cede? */
245 lbz r0,HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100246 cmpwi r0,NAPPING_CEDE
247 beq kvm_end_cede
248 cmpwi r0,NAPPING_NOVCPU
249 beq kvm_novcpu_wakeup
250
251 ld r1,PACAEMERGSP(r13)
252 subi r1,r1,STACK_FRAME_OVERHEAD
Paul Mackerras4619ac82013-04-17 20:31:41 +0000253
254 /*
255 * We weren't napping due to cede, so this must be a secondary
256 * thread being woken up to run a guest, or being woken up due
257 * to a stray IPI. (Or due to some machine check or hypervisor
258 * maintenance interrupt while the core is in KVM.)
259 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000260
261 /* Check the wake reason in SRR1 to see why we got here */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100262 bl kvmppc_check_wake_reason
263 cmpdi r3, 0
264 bge kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000265
Paul Mackerras4619ac82013-04-17 20:31:41 +0000266 /* get vcpu pointer, NULL if we have no vcpu to run */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000267 ld r4,HSTATE_KVM_VCPU(r13)
268 cmpdi r4,0
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000269 /* if we have no vcpu to run, go back to sleep */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000270 beq kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000271
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100272 /* Set HSTATE_DSCR(r13) to something sensible */
273 LOAD_REG_ADDR(r6, dscr_default)
274 ld r6, 0(r6)
275 std r6, HSTATE_DSCR(r13)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000276
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100277 bl kvmppc_hv_entry
Paul Mackerras218309b2013-09-06 13:23:44 +1000278
279 /* Back from the guest, go back to nap */
280 /* Clear our vcpu pointer so we don't come back in early */
281 li r0, 0
282 std r0, HSTATE_KVM_VCPU(r13)
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100283 /*
284 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
285 * the nap_count, because once the increment to nap_count is
286 * visible we could be given another vcpu.
287 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000288 lwsync
Paul Mackerras218309b2013-09-06 13:23:44 +1000289
290 /* increment the nap count and then go to nap mode */
291 ld r4, HSTATE_KVM_VCORE(r13)
292 addi r4, r4, VCORE_NAP_COUNT
Paul Mackerras218309b2013-09-06 13:23:44 +100029351: lwarx r3, 0, r4
294 addi r3, r3, 1
295 stwcx. r3, 0, r4
296 bne 51b
297
298kvm_no_guest:
299 li r0, KVM_HWTHREAD_IN_NAP
300 stb r0, HSTATE_HWTHREAD_STATE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100301kvm_do_nap:
Paul Mackerras218309b2013-09-06 13:23:44 +1000302 li r3, LPCR_PECE0
303 mfspr r4, SPRN_LPCR
304 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
305 mtspr SPRN_LPCR, r4
306 isync
307 std r0, HSTATE_SCRATCH0(r13)
308 ptesync
309 ld r0, HSTATE_SCRATCH0(r13)
3101: cmpd r0, r0
311 bne 1b
312 nap
313 b .
314
315/******************************************************************************
316 * *
317 * Entry code *
318 * *
319 *****************************************************************************/
320
Paul Mackerrasde56a942011-06-29 00:21:34 +0000321.global kvmppc_hv_entry
322kvmppc_hv_entry:
323
324 /* Required state:
325 *
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100326 * R4 = vcpu pointer (or NULL)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000327 * MSR = ~IR|DR
328 * R13 = PACA
329 * R1 = host R1
330 * all other volatile GPRS = free
331 */
332 mflr r0
Paul Mackerras218309b2013-09-06 13:23:44 +1000333 std r0, PPC_LR_STKOFF(r1)
334 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000335
Paul Mackerrasde56a942011-06-29 00:21:34 +0000336 /* Save R1 in the PACA */
337 std r1, HSTATE_HOST_R1(r13)
338
Paul Mackerras44a3add2013-10-04 21:45:04 +1000339 li r6, KVM_GUEST_MODE_HOST_HV
340 stb r6, HSTATE_IN_GUEST(r13)
341
Paul Mackerrasde56a942011-06-29 00:21:34 +0000342 /* Clear out SLB */
343 li r6,0
344 slbmte r6,r6
345 slbia
346 ptesync
347
Paul Mackerras9e368f22011-06-29 00:40:08 +0000348BEGIN_FTR_SECTION
349 b 30f
350END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
351 /*
352 * POWER7 host -> guest partition switch code.
353 * We don't have to lock against concurrent tlbies,
354 * but we do have to coordinate across hardware threads.
355 */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000356 /* Increment entry count iff exit count is zero. */
357 ld r5,HSTATE_KVM_VCORE(r13)
358 addi r9,r5,VCORE_ENTRY_EXIT
35921: lwarx r3,0,r9
360 cmpwi r3,0x100 /* any threads starting to exit? */
361 bge secondary_too_late /* if so we're too late to the party */
362 addi r3,r3,1
363 stwcx. r3,0,r9
364 bne 21b
365
366 /* Primary thread switches to guest partition. */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100367 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
368 lbz r6,HSTATE_PTID(r13)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000369 cmpwi r6,0
370 bne 20f
Paul Mackerrasde56a942011-06-29 00:21:34 +0000371 ld r6,KVM_SDR1(r9)
372 lwz r7,KVM_LPID(r9)
373 li r0,LPID_RSVD /* switch to reserved LPID */
374 mtspr SPRN_LPID,r0
375 ptesync
376 mtspr SPRN_SDR1,r6 /* switch to partition page table */
377 mtspr SPRN_LPID,r7
378 isync
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000379
380 /* See if we need to flush the TLB */
381 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
382 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
383 srdi r6,r6,6 /* doubleword number */
384 sldi r6,r6,3 /* address offset */
385 add r6,r6,r9
386 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000387 li r0,1
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000388 sld r0,r0,r7
389 ld r7,0(r6)
390 and. r7,r7,r0
391 beq 22f
39223: ldarx r7,0,r6 /* if set, clear the bit */
393 andc r7,r7,r0
394 stdcx. r7,0,r6
395 bne 23b
Paul Mackerrasca252052014-01-08 21:25:22 +1100396 /* Flush the TLB of any entries for this LPID */
397 /* use arch 2.07S as a proxy for POWER8 */
398BEGIN_FTR_SECTION
399 li r6,512 /* POWER8 has 512 sets */
400FTR_SECTION_ELSE
401 li r6,128 /* POWER7 has 128 sets */
402ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000403 mtctr r6
404 li r7,0x800 /* IS field = 0b10 */
405 ptesync
40628: tlbiel r7
407 addi r7,r7,0x1000
408 bdnz 28b
409 ptesync
410
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000411 /* Add timebase offset onto timebase */
41222: ld r8,VCORE_TB_OFFSET(r5)
413 cmpdi r8,0
414 beq 37f
415 mftb r6 /* current host timebase */
416 add r8,r8,r6
417 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
418 mftb r7 /* check if lower 24 bits overflowed */
419 clrldi r6,r6,40
420 clrldi r7,r7,40
421 cmpld r7,r6
422 bge 37f
423 addis r8,r8,0x100 /* if so, increment upper 40 bits */
424 mtspr SPRN_TBU40,r8
425
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000426 /* Load guest PCR value to select appropriate compat mode */
42737: ld r7, VCORE_PCR(r5)
428 cmpdi r7, 0
429 beq 38f
430 mtspr SPRN_PCR, r7
43138:
Michael Neulingb005255e2014-01-08 21:25:21 +1100432
433BEGIN_FTR_SECTION
434 /* DPDES is shared between threads */
435 ld r8, VCORE_DPDES(r5)
436 mtspr SPRN_DPDES, r8
437END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
438
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000439 li r0,1
Paul Mackerras371fefd2011-06-29 00:23:08 +0000440 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
441 b 10f
442
443 /* Secondary threads wait for primary to have done partition switch */
44420: lbz r0,VCORE_IN_GUEST(r5)
445 cmpwi r0,0
446 beq 20b
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000447
Paul Mackerras19ccb762011-07-23 17:42:46 +1000448 /* Set LPCR and RMOR. */
Paul Mackerrasa0144e22013-09-20 14:52:38 +100044910: ld r8,VCORE_LPCR(r5)
Paul Mackerras19ccb762011-07-23 17:42:46 +1000450 mtspr SPRN_LPCR,r8
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000451 ld r8,KVM_RMOR(r9)
452 mtspr SPRN_RMOR,r8
Paul Mackerrasde56a942011-06-29 00:21:34 +0000453 isync
454
455 /* Check if HDEC expires soon */
456 mfspr r3,SPRN_HDEC
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100457 cmpwi r3,512 /* 1 microsecond */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000458 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerrasde56a942011-06-29 00:21:34 +0000459 blt hdec_soon
Paul Mackerras9e368f22011-06-29 00:40:08 +0000460 b 31f
461
462 /*
463 * PPC970 host -> guest partition switch code.
464 * We have to lock against concurrent tlbies,
465 * using native_tlbie_lock to lock against host tlbies
466 * and kvm->arch.tlbie_lock to lock against guest tlbies.
467 * We also have to invalidate the TLB since its
468 * entries aren't tagged with the LPID.
469 */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110047030: ld r5,HSTATE_KVM_VCORE(r13)
471 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000472
473 /* first take native_tlbie_lock */
474 .section ".toc","aw"
475toc_tlbie_lock:
476 .tc native_tlbie_lock[TC],native_tlbie_lock
477 .previous
478 ld r3,toc_tlbie_lock@toc(2)
Anton Blanchard54bb7f42013-08-07 02:01:51 +1000479#ifdef __BIG_ENDIAN__
Paul Mackerras9e368f22011-06-29 00:40:08 +0000480 lwz r8,PACA_LOCK_TOKEN(r13)
Anton Blanchard54bb7f42013-08-07 02:01:51 +1000481#else
482 lwz r8,PACAPACAINDEX(r13)
483#endif
Paul Mackerras9e368f22011-06-29 00:40:08 +000048424: lwarx r0,0,r3
485 cmpwi r0,0
486 bne 24b
487 stwcx. r8,0,r3
488 bne 24b
489 isync
490
Paul Mackerrasa0144e22013-09-20 14:52:38 +1000491 ld r5,HSTATE_KVM_VCORE(r13)
492 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000493 li r0,0x18f
494 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
495 or r0,r7,r0
496 ptesync
497 sync
498 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
499 isync
500 li r0,0
501 stw r0,0(r3) /* drop native_tlbie_lock */
502
503 /* invalidate the whole TLB */
504 li r0,256
505 mtctr r0
506 li r6,0
50725: tlbiel r6
508 addi r6,r6,0x1000
509 bdnz 25b
510 ptesync
511
512 /* Take the guest's tlbie_lock */
513 addi r3,r9,KVM_TLBIE_LOCK
51424: lwarx r0,0,r3
515 cmpwi r0,0
516 bne 24b
517 stwcx. r8,0,r3
518 bne 24b
519 isync
520 ld r6,KVM_SDR1(r9)
521 mtspr SPRN_SDR1,r6 /* switch to partition page table */
522
523 /* Set up HID4 with the guest's LPID etc. */
524 sync
525 mtspr SPRN_HID4,r7
526 isync
527
528 /* drop the guest's tlbie_lock */
529 li r0,0
530 stw r0,0(r3)
531
532 /* Check if HDEC expires soon */
533 mfspr r3,SPRN_HDEC
534 cmpwi r3,10
535 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerras9e368f22011-06-29 00:40:08 +0000536 blt hdec_soon
537
538 /* Enable HDEC interrupts */
539 mfspr r0,SPRN_HID0
540 li r3,1
541 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
542 sync
543 mtspr SPRN_HID0,r0
544 mfspr r0,SPRN_HID0
545 mfspr r0,SPRN_HID0
546 mfspr r0,SPRN_HID0
547 mfspr r0,SPRN_HID0
548 mfspr r0,SPRN_HID0
549 mfspr r0,SPRN_HID0
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110055031:
551 /* Do we have a guest vcpu to run? */
552 cmpdi r4, 0
553 beq kvmppc_primary_no_guest
554kvmppc_got_guest:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000555
556 /* Load up guest SLB entries */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100557 lwz r5,VCPU_SLB_MAX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000558 cmpwi r5,0
559 beq 9f
560 mtctr r5
561 addi r6,r4,VCPU_SLB
5621: ld r8,VCPU_SLB_E(r6)
563 ld r9,VCPU_SLB_V(r6)
564 slbmte r9,r8
565 addi r6,r6,VCPU_SLB_SIZE
566 bdnz 1b
5679:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100568 /* Increment yield count if they have a VPA */
569 ld r3, VCPU_VPA(r4)
570 cmpdi r3, 0
571 beq 25f
572 lwz r5, LPPACA_YIELDCOUNT(r3)
573 addi r5, r5, 1
574 stw r5, LPPACA_YIELDCOUNT(r3)
575 li r6, 1
576 stb r6, VCPU_VPA_DIRTY(r4)
57725:
578
579BEGIN_FTR_SECTION
580 /* Save purr/spurr */
581 mfspr r5,SPRN_PURR
582 mfspr r6,SPRN_SPURR
583 std r5,HSTATE_PURR(r13)
584 std r6,HSTATE_SPURR(r13)
585 ld r7,VCPU_PURR(r4)
586 ld r8,VCPU_SPURR(r4)
587 mtspr SPRN_PURR,r7
588 mtspr SPRN_SPURR,r8
589END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
590
Michael Neulingeee7ff92014-01-08 21:25:19 +1100591BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000592 /* Set partition DABR */
593 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
Paul Mackerras8563bf52014-01-08 21:25:29 +1100594 lwz r5,VCPU_DABRX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000595 ld r6,VCPU_DABR(r4)
596 mtspr SPRN_DABRX,r5
597 mtspr SPRN_DABR,r6
Michael Neulingeee7ff92014-01-08 21:25:19 +1100598 BEGIN_FTR_SECTION_NESTED(89)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000599 isync
Michael Neulingeee7ff92014-01-08 21:25:19 +1100600 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
601END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000602
Michael Neulinge4e38122014-03-25 10:47:02 +1100603#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
604BEGIN_FTR_SECTION
605 b skip_tm
606END_FTR_SECTION_IFCLR(CPU_FTR_TM)
607
608 /* Turn on TM/FP/VSX/VMX so we can restore them. */
609 mfmsr r5
610 li r6, MSR_TM >> 32
611 sldi r6, r6, 32
612 or r5, r5, r6
613 ori r5, r5, MSR_FP
614 oris r5, r5, (MSR_VEC | MSR_VSX)@h
615 mtmsrd r5
616
617 /*
618 * The user may change these outside of a transaction, so they must
619 * always be context switched.
620 */
621 ld r5, VCPU_TFHAR(r4)
622 ld r6, VCPU_TFIAR(r4)
623 ld r7, VCPU_TEXASR(r4)
624 mtspr SPRN_TFHAR, r5
625 mtspr SPRN_TFIAR, r6
626 mtspr SPRN_TEXASR, r7
627
628 ld r5, VCPU_MSR(r4)
629 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
630 beq skip_tm /* TM not active in guest */
631
632 /* Make sure the failure summary is set, otherwise we'll program check
633 * when we trechkpt. It's possible that this might have been not set
634 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
635 * host.
636 */
637 oris r7, r7, (TEXASR_FS)@h
638 mtspr SPRN_TEXASR, r7
639
640 /*
641 * We need to load up the checkpointed state for the guest.
642 * We need to do this early as it will blow away any GPRs, VSRs and
643 * some SPRs.
644 */
645
646 mr r31, r4
647 addi r3, r31, VCPU_FPRS_TM
648 bl .load_fp_state
649 addi r3, r31, VCPU_VRS_TM
650 bl .load_vr_state
651 mr r4, r31
652 lwz r7, VCPU_VRSAVE_TM(r4)
653 mtspr SPRN_VRSAVE, r7
654
655 ld r5, VCPU_LR_TM(r4)
656 lwz r6, VCPU_CR_TM(r4)
657 ld r7, VCPU_CTR_TM(r4)
658 ld r8, VCPU_AMR_TM(r4)
659 ld r9, VCPU_TAR_TM(r4)
660 mtlr r5
661 mtcr r6
662 mtctr r7
663 mtspr SPRN_AMR, r8
664 mtspr SPRN_TAR, r9
665
666 /*
667 * Load up PPR and DSCR values but don't put them in the actual SPRs
668 * till the last moment to avoid running with userspace PPR and DSCR for
669 * too long.
670 */
671 ld r29, VCPU_DSCR_TM(r4)
672 ld r30, VCPU_PPR_TM(r4)
673
674 std r2, PACATMSCRATCH(r13) /* Save TOC */
675
676 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
677 li r5, 0
678 mtmsrd r5, 1
679
680 /* Load GPRs r0-r28 */
681 reg = 0
682 .rept 29
683 ld reg, VCPU_GPRS_TM(reg)(r31)
684 reg = reg + 1
685 .endr
686
687 mtspr SPRN_DSCR, r29
688 mtspr SPRN_PPR, r30
689
690 /* Load final GPRs */
691 ld 29, VCPU_GPRS_TM(29)(r31)
692 ld 30, VCPU_GPRS_TM(30)(r31)
693 ld 31, VCPU_GPRS_TM(31)(r31)
694
695 /* TM checkpointed state is now setup. All GPRs are now volatile. */
696 TRECHKPT
697
698 /* Now let's get back the state we need. */
699 HMT_MEDIUM
700 GET_PACA(r13)
701 ld r29, HSTATE_DSCR(r13)
702 mtspr SPRN_DSCR, r29
703 ld r4, HSTATE_KVM_VCPU(r13)
704 ld r1, HSTATE_HOST_R1(r13)
705 ld r2, PACATMSCRATCH(r13)
706
707 /* Set the MSR RI since we have our registers back. */
708 li r5, MSR_RI
709 mtmsrd r5, 1
710skip_tm:
711#endif
712
Paul Mackerrasde56a942011-06-29 00:21:34 +0000713 /* Load guest PMU registers */
714 /* R4 is live here (vcpu pointer) */
715 li r3, 1
716 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
717 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
718 isync
719 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
720 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
721 lwz r6, VCPU_PMC + 8(r4)
722 lwz r7, VCPU_PMC + 12(r4)
723 lwz r8, VCPU_PMC + 16(r4)
724 lwz r9, VCPU_PMC + 20(r4)
725BEGIN_FTR_SECTION
726 lwz r10, VCPU_PMC + 24(r4)
727 lwz r11, VCPU_PMC + 28(r4)
728END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
729 mtspr SPRN_PMC1, r3
730 mtspr SPRN_PMC2, r5
731 mtspr SPRN_PMC3, r6
732 mtspr SPRN_PMC4, r7
733 mtspr SPRN_PMC5, r8
734 mtspr SPRN_PMC6, r9
735BEGIN_FTR_SECTION
736 mtspr SPRN_PMC7, r10
737 mtspr SPRN_PMC8, r11
738END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
739 ld r3, VCPU_MMCR(r4)
740 ld r5, VCPU_MMCR + 8(r4)
741 ld r6, VCPU_MMCR + 16(r4)
742 ld r7, VCPU_SIAR(r4)
743 ld r8, VCPU_SDAR(r4)
744 mtspr SPRN_MMCR1, r5
745 mtspr SPRN_MMCRA, r6
746 mtspr SPRN_SIAR, r7
747 mtspr SPRN_SDAR, r8
Michael Neulingb005255e2014-01-08 21:25:21 +1100748BEGIN_FTR_SECTION
749 ld r5, VCPU_MMCR + 24(r4)
750 ld r6, VCPU_SIER(r4)
751 lwz r7, VCPU_PMC + 24(r4)
752 lwz r8, VCPU_PMC + 28(r4)
753 ld r9, VCPU_MMCR + 32(r4)
754 mtspr SPRN_MMCR2, r5
755 mtspr SPRN_SIER, r6
756 mtspr SPRN_SPMC1, r7
757 mtspr SPRN_SPMC2, r8
758 mtspr SPRN_MMCRS, r9
759END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000760 mtspr SPRN_MMCR0, r3
761 isync
762
763 /* Load up FP, VMX and VSX registers */
764 bl kvmppc_load_fp
765
766 ld r14, VCPU_GPR(R14)(r4)
767 ld r15, VCPU_GPR(R15)(r4)
768 ld r16, VCPU_GPR(R16)(r4)
769 ld r17, VCPU_GPR(R17)(r4)
770 ld r18, VCPU_GPR(R18)(r4)
771 ld r19, VCPU_GPR(R19)(r4)
772 ld r20, VCPU_GPR(R20)(r4)
773 ld r21, VCPU_GPR(R21)(r4)
774 ld r22, VCPU_GPR(R22)(r4)
775 ld r23, VCPU_GPR(R23)(r4)
776 ld r24, VCPU_GPR(R24)(r4)
777 ld r25, VCPU_GPR(R25)(r4)
778 ld r26, VCPU_GPR(R26)(r4)
779 ld r27, VCPU_GPR(R27)(r4)
780 ld r28, VCPU_GPR(R28)(r4)
781 ld r29, VCPU_GPR(R29)(r4)
782 ld r30, VCPU_GPR(R30)(r4)
783 ld r31, VCPU_GPR(R31)(r4)
784
785BEGIN_FTR_SECTION
786 /* Switch DSCR to guest value */
787 ld r5, VCPU_DSCR(r4)
788 mtspr SPRN_DSCR, r5
789END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
790
Michael Neulingb005255e2014-01-08 21:25:21 +1100791BEGIN_FTR_SECTION
792 /* Skip next section on POWER7 or PPC970 */
793 b 8f
794END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
795 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
796 mfmsr r8
797 li r0, 1
798 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
799 mtmsrd r8
800
801 /* Load up POWER8-specific registers */
802 ld r5, VCPU_IAMR(r4)
803 lwz r6, VCPU_PSPB(r4)
804 ld r7, VCPU_FSCR(r4)
805 mtspr SPRN_IAMR, r5
806 mtspr SPRN_PSPB, r6
807 mtspr SPRN_FSCR, r7
808 ld r5, VCPU_DAWR(r4)
809 ld r6, VCPU_DAWRX(r4)
810 ld r7, VCPU_CIABR(r4)
811 ld r8, VCPU_TAR(r4)
812 mtspr SPRN_DAWR, r5
813 mtspr SPRN_DAWRX, r6
814 mtspr SPRN_CIABR, r7
815 mtspr SPRN_TAR, r8
816 ld r5, VCPU_IC(r4)
817 ld r6, VCPU_VTB(r4)
818 mtspr SPRN_IC, r5
819 mtspr SPRN_VTB, r6
Michael Neuling7b490412014-01-08 21:25:32 +1100820 ld r8, VCPU_EBBHR(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100821 mtspr SPRN_EBBHR, r8
822 ld r5, VCPU_EBBRR(r4)
823 ld r6, VCPU_BESCR(r4)
824 ld r7, VCPU_CSIGR(r4)
825 ld r8, VCPU_TACR(r4)
826 mtspr SPRN_EBBRR, r5
827 mtspr SPRN_BESCR, r6
828 mtspr SPRN_CSIGR, r7
829 mtspr SPRN_TACR, r8
830 ld r5, VCPU_TCSCR(r4)
831 ld r6, VCPU_ACOP(r4)
832 lwz r7, VCPU_GUEST_PID(r4)
833 ld r8, VCPU_WORT(r4)
834 mtspr SPRN_TCSCR, r5
835 mtspr SPRN_ACOP, r6
836 mtspr SPRN_PID, r7
837 mtspr SPRN_WORT, r8
8388:
839
Paul Mackerrasde56a942011-06-29 00:21:34 +0000840 /*
841 * Set the decrementer to the guest decrementer.
842 */
843 ld r8,VCPU_DEC_EXPIRES(r4)
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +1100844 /* r8 is a host timebase value here, convert to guest TB */
845 ld r5,HSTATE_KVM_VCORE(r13)
846 ld r6,VCORE_TB_OFFSET(r5)
847 add r8,r8,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000848 mftb r7
849 subf r3,r7,r8
850 mtspr SPRN_DEC,r3
851 stw r3,VCPU_DEC(r4)
852
853 ld r5, VCPU_SPRG0(r4)
854 ld r6, VCPU_SPRG1(r4)
855 ld r7, VCPU_SPRG2(r4)
856 ld r8, VCPU_SPRG3(r4)
857 mtspr SPRN_SPRG0, r5
858 mtspr SPRN_SPRG1, r6
859 mtspr SPRN_SPRG2, r7
860 mtspr SPRN_SPRG3, r8
861
Paul Mackerrasde56a942011-06-29 00:21:34 +0000862 /* Load up DAR and DSISR */
863 ld r5, VCPU_DAR(r4)
864 lwz r6, VCPU_DSISR(r4)
865 mtspr SPRN_DAR, r5
866 mtspr SPRN_DSISR, r6
867
Paul Mackerrasde56a942011-06-29 00:21:34 +0000868BEGIN_FTR_SECTION
869 /* Restore AMR and UAMOR, set AMOR to all 1s */
870 ld r5,VCPU_AMR(r4)
871 ld r6,VCPU_UAMOR(r4)
872 li r7,-1
873 mtspr SPRN_AMR,r5
874 mtspr SPRN_UAMOR,r6
875 mtspr SPRN_AMOR,r7
876END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000877
878 /* Restore state of CTRL run bit; assume 1 on entry */
879 lwz r5,VCPU_CTRL(r4)
880 andi. r5,r5,1
881 bne 4f
882 mfspr r6,SPRN_CTRLF
883 clrrdi r6,r6,1
884 mtspr SPRN_CTRLT,r6
8854:
886 ld r6, VCPU_CTR(r4)
887 lwz r7, VCPU_XER(r4)
888
889 mtctr r6
890 mtxer r7
891
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100892kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
Paul Mackerras4619ac82013-04-17 20:31:41 +0000893 ld r10, VCPU_PC(r4)
894 ld r11, VCPU_MSR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000895 ld r6, VCPU_SRR0(r4)
896 ld r7, VCPU_SRR1(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100897 mtspr SPRN_SRR0, r6
898 mtspr SPRN_SRR1, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000899
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100900deliver_guest_interrupt:
Paul Mackerras4619ac82013-04-17 20:31:41 +0000901 /* r11 = vcpu->arch.msr & ~MSR_HV */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000902 rldicl r11, r11, 63 - MSR_HV_LG, 1
903 rotldi r11, r11, 1 + MSR_HV_LG
904 ori r11, r11, MSR_ME
905
Paul Mackerras19ccb762011-07-23 17:42:46 +1000906 /* Check if we can deliver an external or decrementer interrupt now */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100907 ld r0, VCPU_PENDING_EXC(r4)
908 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
909 cmpdi cr1, r0, 0
910 andi. r8, r11, MSR_EE
Paul Mackerras19ccb762011-07-23 17:42:46 +1000911BEGIN_FTR_SECTION
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100912 mfspr r8, SPRN_LPCR
913 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
914 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
915 mtspr SPRN_LPCR, r8
Paul Mackerras19ccb762011-07-23 17:42:46 +1000916 isync
917END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
918 beq 5f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100919 li r0, BOOK3S_INTERRUPT_EXTERNAL
920 bne cr1, 12f
921 mfspr r0, SPRN_DEC
922 cmpwi r0, 0
923 li r0, BOOK3S_INTERRUPT_DECREMENTER
924 bge 5f
925
92612: mtspr SPRN_SRR0, r10
Paul Mackerras19ccb762011-07-23 17:42:46 +1000927 mr r10,r0
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100928 mtspr SPRN_SRR1, r11
Michael Neulinge4e38122014-03-25 10:47:02 +1100929 mr r9, r4
930 bl kvmppc_msr_interrupt
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11009315:
Paul Mackerras19ccb762011-07-23 17:42:46 +1000932
Liu Ping Fan27025a62013-11-19 14:12:48 +0800933/*
934 * Required state:
935 * R4 = vcpu
936 * R10: value for HSRR0
937 * R11: value for HSRR1
938 * R13 = PACA
939 */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000940fast_guest_return:
Paul Mackerras4619ac82013-04-17 20:31:41 +0000941 li r0,0
942 stb r0,VCPU_CEDED(r4) /* cancel cede */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000943 mtspr SPRN_HSRR0,r10
944 mtspr SPRN_HSRR1,r11
945
946 /* Activate guest mode, so faults get handled by KVM */
Paul Mackerras44a3add2013-10-04 21:45:04 +1000947 li r9, KVM_GUEST_MODE_GUEST_HV
Paul Mackerrasde56a942011-06-29 00:21:34 +0000948 stb r9, HSTATE_IN_GUEST(r13)
949
950 /* Enter guest */
951
Paul Mackerras0acb9112013-02-04 18:10:51 +0000952BEGIN_FTR_SECTION
953 ld r5, VCPU_CFAR(r4)
954 mtspr SPRN_CFAR, r5
955END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +1000956BEGIN_FTR_SECTION
957 ld r0, VCPU_PPR(r4)
958END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerras0acb9112013-02-04 18:10:51 +0000959
Paul Mackerrasde56a942011-06-29 00:21:34 +0000960 ld r5, VCPU_LR(r4)
961 lwz r6, VCPU_CR(r4)
962 mtlr r5
963 mtcr r6
964
Michael Neulingc75df6f2012-06-25 13:33:10 +0000965 ld r1, VCPU_GPR(R1)(r4)
966 ld r2, VCPU_GPR(R2)(r4)
967 ld r3, VCPU_GPR(R3)(r4)
968 ld r5, VCPU_GPR(R5)(r4)
969 ld r6, VCPU_GPR(R6)(r4)
970 ld r7, VCPU_GPR(R7)(r4)
971 ld r8, VCPU_GPR(R8)(r4)
972 ld r9, VCPU_GPR(R9)(r4)
973 ld r10, VCPU_GPR(R10)(r4)
974 ld r11, VCPU_GPR(R11)(r4)
975 ld r12, VCPU_GPR(R12)(r4)
976 ld r13, VCPU_GPR(R13)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000977
Paul Mackerras4b8473c2013-09-20 14:52:39 +1000978BEGIN_FTR_SECTION
979 mtspr SPRN_PPR, r0
980END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
981 ld r0, VCPU_GPR(R0)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000982 ld r4, VCPU_GPR(R4)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000983
984 hrfid
985 b .
986
987/******************************************************************************
988 * *
989 * Exit code *
990 * *
991 *****************************************************************************/
992
993/*
994 * We come here from the first-level interrupt handlers.
995 */
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +0530996 .globl kvmppc_interrupt_hv
997kvmppc_interrupt_hv:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000998 /*
999 * Register contents:
1000 * R12 = interrupt vector
1001 * R13 = PACA
1002 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
1003 * guest R13 saved in SPRN_SCRATCH0
1004 */
Aneesh Kumar K.V36e7bb32013-11-11 19:29:47 +05301005 std r9, HSTATE_SCRATCH2(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +10001006
1007 lbz r9, HSTATE_IN_GUEST(r13)
1008 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1009 beq kvmppc_bad_host_intr
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301010#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1011 cmpwi r9, KVM_GUEST_MODE_GUEST
Aneesh Kumar K.V36e7bb32013-11-11 19:29:47 +05301012 ld r9, HSTATE_SCRATCH2(r13)
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301013 beq kvmppc_interrupt_pr
1014#endif
Paul Mackerras44a3add2013-10-04 21:45:04 +10001015 /* We're now back in the host but in guest MMU context */
1016 li r9, KVM_GUEST_MODE_HOST_HV
1017 stb r9, HSTATE_IN_GUEST(r13)
1018
Paul Mackerrasde56a942011-06-29 00:21:34 +00001019 ld r9, HSTATE_KVM_VCPU(r13)
1020
1021 /* Save registers */
1022
Michael Neulingc75df6f2012-06-25 13:33:10 +00001023 std r0, VCPU_GPR(R0)(r9)
1024 std r1, VCPU_GPR(R1)(r9)
1025 std r2, VCPU_GPR(R2)(r9)
1026 std r3, VCPU_GPR(R3)(r9)
1027 std r4, VCPU_GPR(R4)(r9)
1028 std r5, VCPU_GPR(R5)(r9)
1029 std r6, VCPU_GPR(R6)(r9)
1030 std r7, VCPU_GPR(R7)(r9)
1031 std r8, VCPU_GPR(R8)(r9)
Aneesh Kumar K.V36e7bb32013-11-11 19:29:47 +05301032 ld r0, HSTATE_SCRATCH2(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001033 std r0, VCPU_GPR(R9)(r9)
1034 std r10, VCPU_GPR(R10)(r9)
1035 std r11, VCPU_GPR(R11)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001036 ld r3, HSTATE_SCRATCH0(r13)
1037 lwz r4, HSTATE_SCRATCH1(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001038 std r3, VCPU_GPR(R12)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001039 stw r4, VCPU_CR(r9)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001040BEGIN_FTR_SECTION
1041 ld r3, HSTATE_CFAR(r13)
1042 std r3, VCPU_CFAR(r9)
1043END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001044BEGIN_FTR_SECTION
1045 ld r4, HSTATE_PPR(r13)
1046 std r4, VCPU_PPR(r9)
1047END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001048
1049 /* Restore R1/R2 so we can handle faults */
1050 ld r1, HSTATE_HOST_R1(r13)
1051 ld r2, PACATOC(r13)
1052
1053 mfspr r10, SPRN_SRR0
1054 mfspr r11, SPRN_SRR1
1055 std r10, VCPU_SRR0(r9)
1056 std r11, VCPU_SRR1(r9)
1057 andi. r0, r12, 2 /* need to read HSRR0/1? */
1058 beq 1f
1059 mfspr r10, SPRN_HSRR0
1060 mfspr r11, SPRN_HSRR1
1061 clrrdi r12, r12, 2
10621: std r10, VCPU_PC(r9)
1063 std r11, VCPU_MSR(r9)
1064
1065 GET_SCRATCH0(r3)
1066 mflr r4
Michael Neulingc75df6f2012-06-25 13:33:10 +00001067 std r3, VCPU_GPR(R13)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001068 std r4, VCPU_LR(r9)
1069
Paul Mackerrasde56a942011-06-29 00:21:34 +00001070 stw r12,VCPU_TRAP(r9)
1071
Paul Mackerras697d3892011-12-12 12:36:37 +00001072 /* Save HEIR (HV emulation assist reg) in last_inst
1073 if this is an HEI (HV emulation interrupt, e40) */
1074 li r3,KVM_INST_FETCH_FAILED
1075BEGIN_FTR_SECTION
1076 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1077 bne 11f
1078 mfspr r3,SPRN_HEIR
1079END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
108011: stw r3,VCPU_LAST_INST(r9)
1081
1082 /* these are volatile across C function calls */
1083 mfctr r3
1084 mfxer r4
1085 std r3, VCPU_CTR(r9)
1086 stw r4, VCPU_XER(r9)
1087
1088BEGIN_FTR_SECTION
1089 /* If this is a page table miss then see if it's theirs or ours */
1090 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1091 beq kvmppc_hdsi
Paul Mackerras342d3db2011-12-12 12:38:05 +00001092 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1093 beq kvmppc_hisi
Paul Mackerras697d3892011-12-12 12:36:37 +00001094END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1095
Paul Mackerrasde56a942011-06-29 00:21:34 +00001096 /* See if this is a leftover HDEC interrupt */
1097 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1098 bne 2f
1099 mfspr r3,SPRN_HDEC
1100 cmpwi r3,0
1101 bge ignore_hdec
11022:
Paul Mackerras697d3892011-12-12 12:36:37 +00001103 /* See if this is an hcall we can handle in real mode */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001104 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1105 beq hcall_try_real_mode
Paul Mackerrasde56a942011-06-29 00:21:34 +00001106
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001107 /* Only handle external interrupts here on arch 206 and later */
Paul Mackerras9e368f22011-06-29 00:40:08 +00001108BEGIN_FTR_SECTION
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001109 b ext_interrupt_to_host
1110END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1111
1112 /* External interrupt ? */
1113 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1114 bne+ ext_interrupt_to_host
1115
1116 /* External interrupt, first check for host_ipi. If this is
1117 * set, we know the host wants us out so let's do it now
1118 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001119 bl kvmppc_read_intr
1120 cmpdi r3, 0
1121 bgt ext_interrupt_to_host
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001122
Paul Mackerras4619ac82013-04-17 20:31:41 +00001123 /* Check if any CPU is heading out to the host, if so head out too */
1124 ld r5, HSTATE_KVM_VCORE(r13)
1125 lwz r0, VCORE_ENTRY_EXIT(r5)
1126 cmpwi r0, 0x100
1127 bge ext_interrupt_to_host
1128
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001129 /* Return to guest after delivering any pending interrupt */
1130 mr r4, r9
1131 b deliver_guest_interrupt
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001132
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001133ext_interrupt_to_host:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001134
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001135guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001136 /* Save more register state */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001137 mfdar r6
1138 mfdsisr r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001139 std r6, VCPU_DAR(r9)
1140 stw r7, VCPU_DSISR(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001141BEGIN_FTR_SECTION
Paul Mackerras697d3892011-12-12 12:36:37 +00001142 /* don't overwrite fault_dar/fault_dsisr if HDSI */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001143 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1144 beq 6f
Paul Mackerras9e368f22011-06-29 00:40:08 +00001145END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerras697d3892011-12-12 12:36:37 +00001146 std r6, VCPU_FAULT_DAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001147 stw r7, VCPU_FAULT_DSISR(r9)
1148
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001149 /* See if it is a machine check */
1150 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1151 beq machine_check_realmode
1152mc_cont:
1153
Paul Mackerrasde56a942011-06-29 00:21:34 +00001154 /* Save guest CTRL register, set runlatch to 1 */
Paul Mackerras697d3892011-12-12 12:36:37 +000011556: mfspr r6,SPRN_CTRLF
Paul Mackerrasde56a942011-06-29 00:21:34 +00001156 stw r6,VCPU_CTRL(r9)
1157 andi. r0,r6,1
1158 bne 4f
1159 ori r6,r6,1
1160 mtspr SPRN_CTRLT,r6
11614:
1162 /* Read the guest SLB and save it away */
1163 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1164 mtctr r0
1165 li r6,0
1166 addi r7,r9,VCPU_SLB
1167 li r5,0
11681: slbmfee r8,r6
1169 andis. r0,r8,SLB_ESID_V@h
1170 beq 2f
1171 add r8,r8,r6 /* put index in */
1172 slbmfev r3,r6
1173 std r8,VCPU_SLB_E(r7)
1174 std r3,VCPU_SLB_V(r7)
1175 addi r7,r7,VCPU_SLB_SIZE
1176 addi r5,r5,1
11772: addi r6,r6,1
1178 bdnz 1b
1179 stw r5,VCPU_SLB_MAX(r9)
1180
1181 /*
1182 * Save the guest PURR/SPURR
1183 */
Paul Mackerras9e368f22011-06-29 00:40:08 +00001184BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +00001185 mfspr r5,SPRN_PURR
1186 mfspr r6,SPRN_SPURR
1187 ld r7,VCPU_PURR(r9)
1188 ld r8,VCPU_SPURR(r9)
1189 std r5,VCPU_PURR(r9)
1190 std r6,VCPU_SPURR(r9)
1191 subf r5,r7,r5
1192 subf r6,r8,r6
1193
1194 /*
1195 * Restore host PURR/SPURR and add guest times
1196 * so that the time in the guest gets accounted.
1197 */
1198 ld r3,HSTATE_PURR(r13)
1199 ld r4,HSTATE_SPURR(r13)
1200 add r3,r3,r5
1201 add r4,r4,r6
1202 mtspr SPRN_PURR,r3
1203 mtspr SPRN_SPURR,r4
Paul Mackerras9e368f22011-06-29 00:40:08 +00001204END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001205
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001206 /* Save DEC */
1207 mfspr r5,SPRN_DEC
1208 mftb r6
1209 extsw r5,r5
1210 add r5,r5,r6
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001211 /* r5 is a guest timebase value here, convert to host TB */
1212 ld r3,HSTATE_KVM_VCORE(r13)
1213 ld r4,VCORE_TB_OFFSET(r3)
1214 subf r5,r4,r5
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001215 std r5,VCPU_DEC_EXPIRES(r9)
1216
Michael Neulingb005255e2014-01-08 21:25:21 +11001217BEGIN_FTR_SECTION
1218 b 8f
1219END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +11001220 /* Save POWER8-specific registers */
1221 mfspr r5, SPRN_IAMR
1222 mfspr r6, SPRN_PSPB
1223 mfspr r7, SPRN_FSCR
1224 std r5, VCPU_IAMR(r9)
1225 stw r6, VCPU_PSPB(r9)
1226 std r7, VCPU_FSCR(r9)
1227 mfspr r5, SPRN_IC
1228 mfspr r6, SPRN_VTB
1229 mfspr r7, SPRN_TAR
1230 std r5, VCPU_IC(r9)
1231 std r6, VCPU_VTB(r9)
1232 std r7, VCPU_TAR(r9)
Michael Neuling7b490412014-01-08 21:25:32 +11001233 mfspr r8, SPRN_EBBHR
Michael Neulingb005255e2014-01-08 21:25:21 +11001234 std r8, VCPU_EBBHR(r9)
1235 mfspr r5, SPRN_EBBRR
1236 mfspr r6, SPRN_BESCR
1237 mfspr r7, SPRN_CSIGR
1238 mfspr r8, SPRN_TACR
1239 std r5, VCPU_EBBRR(r9)
1240 std r6, VCPU_BESCR(r9)
1241 std r7, VCPU_CSIGR(r9)
1242 std r8, VCPU_TACR(r9)
1243 mfspr r5, SPRN_TCSCR
1244 mfspr r6, SPRN_ACOP
1245 mfspr r7, SPRN_PID
1246 mfspr r8, SPRN_WORT
1247 std r5, VCPU_TCSCR(r9)
1248 std r6, VCPU_ACOP(r9)
1249 stw r7, VCPU_GUEST_PID(r9)
1250 std r8, VCPU_WORT(r9)
12518:
1252
Paul Mackerrasde56a942011-06-29 00:21:34 +00001253 /* Save and reset AMR and UAMOR before turning on the MMU */
Paul Mackerras9e368f22011-06-29 00:40:08 +00001254BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +00001255 mfspr r5,SPRN_AMR
1256 mfspr r6,SPRN_UAMOR
1257 std r5,VCPU_AMR(r9)
1258 std r6,VCPU_UAMOR(r9)
1259 li r6,0
1260 mtspr SPRN_AMR,r6
Paul Mackerras9e368f22011-06-29 00:40:08 +00001261END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001262
Paul Mackerrasde56a942011-06-29 00:21:34 +00001263 /* Switch DSCR back to host value */
Paul Mackerras9e368f22011-06-29 00:40:08 +00001264BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +00001265 mfspr r8, SPRN_DSCR
1266 ld r7, HSTATE_DSCR(r13)
Paul Mackerrascfc86022013-09-21 09:53:28 +10001267 std r8, VCPU_DSCR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001268 mtspr SPRN_DSCR, r7
Paul Mackerras9e368f22011-06-29 00:40:08 +00001269END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001270
1271 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001272 std r14, VCPU_GPR(R14)(r9)
1273 std r15, VCPU_GPR(R15)(r9)
1274 std r16, VCPU_GPR(R16)(r9)
1275 std r17, VCPU_GPR(R17)(r9)
1276 std r18, VCPU_GPR(R18)(r9)
1277 std r19, VCPU_GPR(R19)(r9)
1278 std r20, VCPU_GPR(R20)(r9)
1279 std r21, VCPU_GPR(R21)(r9)
1280 std r22, VCPU_GPR(R22)(r9)
1281 std r23, VCPU_GPR(R23)(r9)
1282 std r24, VCPU_GPR(R24)(r9)
1283 std r25, VCPU_GPR(R25)(r9)
1284 std r26, VCPU_GPR(R26)(r9)
1285 std r27, VCPU_GPR(R27)(r9)
1286 std r28, VCPU_GPR(R28)(r9)
1287 std r29, VCPU_GPR(R29)(r9)
1288 std r30, VCPU_GPR(R30)(r9)
1289 std r31, VCPU_GPR(R31)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001290
1291 /* Save SPRGs */
1292 mfspr r3, SPRN_SPRG0
1293 mfspr r4, SPRN_SPRG1
1294 mfspr r5, SPRN_SPRG2
1295 mfspr r6, SPRN_SPRG3
1296 std r3, VCPU_SPRG0(r9)
1297 std r4, VCPU_SPRG1(r9)
1298 std r5, VCPU_SPRG2(r9)
1299 std r6, VCPU_SPRG3(r9)
1300
Paul Mackerras89436332012-03-02 01:38:23 +00001301 /* save FP state */
1302 mr r3, r9
Paul Mackerras595e4f72013-10-15 20:43:04 +11001303 bl kvmppc_save_fp
Paul Mackerras89436332012-03-02 01:38:23 +00001304
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001305 /* Increment yield count if they have a VPA */
1306 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1307 cmpdi r8, 0
1308 beq 25f
1309 lwz r3, LPPACA_YIELDCOUNT(r8)
1310 addi r3, r3, 1
1311 stw r3, LPPACA_YIELDCOUNT(r8)
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001312 li r3, 1
1313 stb r3, VCPU_VPA_DIRTY(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000131425:
1315 /* Save PMU registers if requested */
1316 /* r8 and cr0.eq are live here */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001317 li r3, 1
1318 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1319 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1320 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
Paul Mackerras89436332012-03-02 01:38:23 +00001321 mfspr r6, SPRN_MMCRA
1322BEGIN_FTR_SECTION
1323 /* On P7, clear MMCRA in order to disable SDAR updates */
1324 li r7, 0
1325 mtspr SPRN_MMCRA, r7
1326END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001327 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001328 beq 21f /* if no VPA, save PMU stuff anyway */
1329 lbz r7, LPPACA_PMCINUSE(r8)
1330 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1331 bne 21f
1332 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1333 b 22f
133421: mfspr r5, SPRN_MMCR1
Paul Mackerras14941782013-09-06 13:11:18 +10001335 mfspr r7, SPRN_SIAR
1336 mfspr r8, SPRN_SDAR
Paul Mackerrasde56a942011-06-29 00:21:34 +00001337 std r4, VCPU_MMCR(r9)
1338 std r5, VCPU_MMCR + 8(r9)
1339 std r6, VCPU_MMCR + 16(r9)
Paul Mackerras14941782013-09-06 13:11:18 +10001340 std r7, VCPU_SIAR(r9)
1341 std r8, VCPU_SDAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001342 mfspr r3, SPRN_PMC1
1343 mfspr r4, SPRN_PMC2
1344 mfspr r5, SPRN_PMC3
1345 mfspr r6, SPRN_PMC4
1346 mfspr r7, SPRN_PMC5
1347 mfspr r8, SPRN_PMC6
Paul Mackerras9e368f22011-06-29 00:40:08 +00001348BEGIN_FTR_SECTION
1349 mfspr r10, SPRN_PMC7
1350 mfspr r11, SPRN_PMC8
1351END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001352 stw r3, VCPU_PMC(r9)
1353 stw r4, VCPU_PMC + 4(r9)
1354 stw r5, VCPU_PMC + 8(r9)
1355 stw r6, VCPU_PMC + 12(r9)
1356 stw r7, VCPU_PMC + 16(r9)
1357 stw r8, VCPU_PMC + 20(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001358BEGIN_FTR_SECTION
1359 stw r10, VCPU_PMC + 24(r9)
1360 stw r11, VCPU_PMC + 28(r9)
1361END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
Michael Neulingb005255e2014-01-08 21:25:21 +11001362BEGIN_FTR_SECTION
1363 mfspr r4, SPRN_MMCR2
1364 mfspr r5, SPRN_SIER
1365 mfspr r6, SPRN_SPMC1
1366 mfspr r7, SPRN_SPMC2
1367 mfspr r8, SPRN_MMCRS
1368 std r4, VCPU_MMCR + 24(r9)
1369 std r5, VCPU_SIER(r9)
1370 stw r6, VCPU_PMC + 24(r9)
1371 stw r7, VCPU_PMC + 28(r9)
1372 std r8, VCPU_MMCR + 32(r9)
1373 lis r4, 0x8000
1374 mtspr SPRN_MMCRS, r4
1375END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000137622:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001377 /* Clear out SLB */
1378 li r5,0
1379 slbmte r5,r5
1380 slbia
1381 ptesync
1382
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001383hdec_soon: /* r12 = trap, r13 = paca */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001384BEGIN_FTR_SECTION
1385 b 32f
1386END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1387 /*
1388 * POWER7 guest -> host partition switch code.
1389 * We don't have to lock against tlbies but we do
1390 * have to coordinate the hardware threads.
1391 */
1392 /* Increment the threads-exiting-guest count in the 0xff00
1393 bits of vcore->entry_exit_count */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001394 ld r5,HSTATE_KVM_VCORE(r13)
1395 addi r6,r5,VCORE_ENTRY_EXIT
139641: lwarx r3,0,r6
1397 addi r0,r3,0x100
1398 stwcx. r0,0,r6
1399 bne 41b
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11001400 isync /* order stwcx. vs. reading napping_threads */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001401
1402 /*
1403 * At this point we have an interrupt that we have to pass
1404 * up to the kernel or qemu; we can't handle it in real mode.
1405 * Thus we have to do a partition switch, so we have to
1406 * collect the other threads, if we are the first thread
1407 * to take an interrupt. To do this, we set the HDEC to 0,
1408 * which causes an HDEC interrupt in all threads within 2ns
1409 * because the HDEC register is shared between all 4 threads.
1410 * However, we don't need to bother if this is an HDEC
1411 * interrupt, since the other threads will already be on their
1412 * way here in that case.
1413 */
1414 cmpwi r3,0x100 /* Are we the first here? */
1415 bge 43f
Paul Mackerrasde56a942011-06-29 00:21:34 +00001416 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1417 beq 40f
1418 li r0,0
1419 mtspr SPRN_HDEC,r0
142040:
1421 /*
1422 * Send an IPI to any napping threads, since an HDEC interrupt
1423 * doesn't wake CPUs up from nap.
1424 */
1425 lwz r3,VCORE_NAPPING_THREADS(r5)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001426 lbz r4,HSTATE_PTID(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001427 li r0,1
1428 sld r0,r0,r4
1429 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1430 beq 43f
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11001431 /* Order entry/exit update vs. IPIs */
1432 sync
Paul Mackerrasde56a942011-06-29 00:21:34 +00001433 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1434 subf r6,r4,r13
143542: andi. r0,r3,1
1436 beq 44f
1437 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
1438 li r0,IPI_PRIORITY
1439 li r7,XICS_MFRR
1440 stbcix r0,r7,r8 /* trigger the IPI */
144144: srdi. r3,r3,1
1442 addi r6,r6,PACA_SIZE
1443 bne 42b
1444
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001445secondary_too_late:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001446 /* Secondary threads wait for primary to do partition switch */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100144743: ld r5,HSTATE_KVM_VCORE(r13)
1448 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1449 lbz r3,HSTATE_PTID(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001450 cmpwi r3,0
1451 beq 15f
1452 HMT_LOW
145313: lbz r3,VCORE_IN_GUEST(r5)
1454 cmpwi r3,0
1455 bne 13b
1456 HMT_MEDIUM
1457 b 16f
1458
1459 /* Primary thread waits for all the secondaries to exit guest */
146015: lwz r3,VCORE_ENTRY_EXIT(r5)
1461 srwi r0,r3,8
1462 clrldi r3,r3,56
1463 cmpw r3,r0
1464 bne 15b
1465 isync
1466
1467 /* Primary thread switches back to host partition */
1468 ld r6,KVM_HOST_SDR1(r4)
1469 lwz r7,KVM_HOST_LPID(r4)
1470 li r8,LPID_RSVD /* switch to reserved LPID */
1471 mtspr SPRN_LPID,r8
1472 ptesync
1473 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1474 mtspr SPRN_LPID,r7
1475 isync
1476
Michael Neulingb005255e2014-01-08 21:25:21 +11001477BEGIN_FTR_SECTION
1478 /* DPDES is shared between threads */
1479 mfspr r7, SPRN_DPDES
1480 std r7, VCORE_DPDES(r5)
1481 /* clear DPDES so we don't get guest doorbells in the host */
1482 li r8, 0
1483 mtspr SPRN_DPDES, r8
1484END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1485
Paul Mackerrasde56a942011-06-29 00:21:34 +00001486 /* Subtract timebase offset from timebase */
1487 ld r8,VCORE_TB_OFFSET(r5)
1488 cmpdi r8,0
1489 beq 17f
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001490 mftb r6 /* current guest timebase */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001491 subf r8,r8,r6
1492 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1493 mftb r7 /* check if lower 24 bits overflowed */
1494 clrldi r6,r6,40
1495 clrldi r7,r7,40
1496 cmpld r7,r6
1497 bge 17f
1498 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1499 mtspr SPRN_TBU40,r8
1500
1501 /* Reset PCR */
150217: ld r0, VCORE_PCR(r5)
1503 cmpdi r0, 0
1504 beq 18f
1505 li r0, 0
1506 mtspr SPRN_PCR, r0
150718:
1508 /* Signal secondary CPUs to continue */
1509 stb r0,VCORE_IN_GUEST(r5)
1510 lis r8,0x7fff /* MAX_INT@h */
1511 mtspr SPRN_HDEC,r8
1512
151316: ld r8,KVM_HOST_LPCR(r4)
1514 mtspr SPRN_LPCR,r8
1515 isync
1516 b 33f
1517
1518 /*
1519 * PPC970 guest -> host partition switch code.
1520 * We have to lock against concurrent tlbies, and
1521 * we have to flush the whole TLB.
1522 */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100152332: ld r5,HSTATE_KVM_VCORE(r13)
1524 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001525
1526 /* Take the guest's tlbie_lock */
1527#ifdef __BIG_ENDIAN__
1528 lwz r8,PACA_LOCK_TOKEN(r13)
1529#else
1530 lwz r8,PACAPACAINDEX(r13)
1531#endif
1532 addi r3,r4,KVM_TLBIE_LOCK
153324: lwarx r0,0,r3
1534 cmpwi r0,0
1535 bne 24b
1536 stwcx. r8,0,r3
1537 bne 24b
1538 isync
1539
1540 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
1541 li r0,0x18f
1542 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
1543 or r0,r7,r0
1544 ptesync
1545 sync
1546 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
1547 isync
1548 li r0,0
1549 stw r0,0(r3) /* drop guest tlbie_lock */
1550
1551 /* invalidate the whole TLB */
1552 li r0,256
1553 mtctr r0
1554 li r6,0
155525: tlbiel r6
1556 addi r6,r6,0x1000
1557 bdnz 25b
1558 ptesync
1559
1560 /* take native_tlbie_lock */
1561 ld r3,toc_tlbie_lock@toc(2)
156224: lwarx r0,0,r3
1563 cmpwi r0,0
1564 bne 24b
1565 stwcx. r8,0,r3
1566 bne 24b
1567 isync
1568
1569 ld r6,KVM_HOST_SDR1(r4)
1570 mtspr SPRN_SDR1,r6 /* switch to host page table */
1571
1572 /* Set up host HID4 value */
1573 sync
1574 mtspr SPRN_HID4,r7
1575 isync
1576 li r0,0
1577 stw r0,0(r3) /* drop native_tlbie_lock */
1578
1579 lis r8,0x7fff /* MAX_INT@h */
1580 mtspr SPRN_HDEC,r8
1581
1582 /* Disable HDEC interrupts */
1583 mfspr r0,SPRN_HID0
1584 li r3,0
1585 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
1586 sync
1587 mtspr SPRN_HID0,r0
1588 mfspr r0,SPRN_HID0
1589 mfspr r0,SPRN_HID0
1590 mfspr r0,SPRN_HID0
1591 mfspr r0,SPRN_HID0
1592 mfspr r0,SPRN_HID0
1593 mfspr r0,SPRN_HID0
1594
1595 /* load host SLB entries */
159633: ld r8,PACA_SLBSHADOWPTR(r13)
1597
1598 .rept SLB_NUM_BOLTED
1599 ld r5,SLBSHADOW_SAVEAREA(r8)
1600 ld r6,SLBSHADOW_SAVEAREA+8(r8)
1601 andis. r7,r5,SLB_ESID_V@h
1602 beq 1f
1603 slbmte r6,r5
16041: addi r8,r8,16
1605 .endr
1606
Paul Mackerrasde56a942011-06-29 00:21:34 +00001607 /* Unset guest mode */
1608 li r0, KVM_GUEST_MODE_NONE
1609 stb r0, HSTATE_IN_GUEST(r13)
1610
Paul Mackerras218309b2013-09-06 13:23:44 +10001611 ld r0, 112+PPC_LR_STKOFF(r1)
1612 addi r1, r1, 112
1613 mtlr r0
1614 blr
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001615
Paul Mackerras697d3892011-12-12 12:36:37 +00001616/*
1617 * Check whether an HDSI is an HPTE not found fault or something else.
1618 * If it is an HPTE not found fault that is due to the guest accessing
1619 * a page that they have mapped but which we have paged out, then
1620 * we continue on with the guest exit path. In all other cases,
1621 * reflect the HDSI to the guest as a DSI.
1622 */
1623kvmppc_hdsi:
1624 mfspr r4, SPRN_HDAR
1625 mfspr r6, SPRN_HDSISR
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001626 /* HPTE not found fault or protection fault? */
1627 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
Paul Mackerras697d3892011-12-12 12:36:37 +00001628 beq 1f /* if not, send it to the guest */
1629 andi. r0, r11, MSR_DR /* data relocation enabled? */
1630 beq 3f
1631 clrrdi r0, r4, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001632 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerras697d3892011-12-12 12:36:37 +00001633 bne 1f /* if no SLB entry found */
16344: std r4, VCPU_FAULT_DAR(r9)
1635 stw r6, VCPU_FAULT_DSISR(r9)
1636
1637 /* Search the hash table. */
1638 mr r3, r9 /* vcpu pointer */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001639 li r7, 1 /* data fault */
Paul Mackerras697d3892011-12-12 12:36:37 +00001640 bl .kvmppc_hpte_hv_fault
1641 ld r9, HSTATE_KVM_VCPU(r13)
1642 ld r10, VCPU_PC(r9)
1643 ld r11, VCPU_MSR(r9)
1644 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1645 cmpdi r3, 0 /* retry the instruction */
1646 beq 6f
1647 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001648 beq guest_exit_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001649 cmpdi r3, -2 /* MMIO emulation; need instr word */
1650 beq 2f
1651
1652 /* Synthesize a DSI for the guest */
1653 ld r4, VCPU_FAULT_DAR(r9)
1654 mr r6, r3
16551: mtspr SPRN_DAR, r4
1656 mtspr SPRN_DSISR, r6
1657 mtspr SPRN_SRR0, r10
1658 mtspr SPRN_SRR1, r11
1659 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
Michael Neulinge4e38122014-03-25 10:47:02 +11001660 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001661fast_interrupt_c_return:
Paul Mackerras697d3892011-12-12 12:36:37 +000016626: ld r7, VCPU_CTR(r9)
1663 lwz r8, VCPU_XER(r9)
1664 mtctr r7
1665 mtxer r8
1666 mr r4, r9
1667 b fast_guest_return
1668
16693: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1670 ld r5, KVM_VRMA_SLB_V(r5)
1671 b 4b
1672
1673 /* If this is for emulated MMIO, load the instruction word */
16742: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1675
1676 /* Set guest mode to 'jump over instruction' so if lwz faults
1677 * we'll just continue at the next IP. */
1678 li r0, KVM_GUEST_MODE_SKIP
1679 stb r0, HSTATE_IN_GUEST(r13)
1680
1681 /* Do the access with MSR:DR enabled */
1682 mfmsr r3
1683 ori r4, r3, MSR_DR /* Enable paging for data */
1684 mtmsrd r4
1685 lwz r8, 0(r10)
1686 mtmsrd r3
1687
1688 /* Store the result */
1689 stw r8, VCPU_LAST_INST(r9)
1690
1691 /* Unset guest mode. */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001692 li r0, KVM_GUEST_MODE_HOST_HV
Paul Mackerras697d3892011-12-12 12:36:37 +00001693 stb r0, HSTATE_IN_GUEST(r13)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001694 b guest_exit_cont
Paul Mackerrasde56a942011-06-29 00:21:34 +00001695
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001696/*
Paul Mackerras342d3db2011-12-12 12:38:05 +00001697 * Similarly for an HISI, reflect it to the guest as an ISI unless
1698 * it is an HPTE not found fault for a page that we have paged out.
1699 */
1700kvmppc_hisi:
1701 andis. r0, r11, SRR1_ISI_NOPT@h
1702 beq 1f
1703 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1704 beq 3f
1705 clrrdi r0, r10, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001706 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001707 bne 1f /* if no SLB entry found */
17084:
1709 /* Search the hash table. */
1710 mr r3, r9 /* vcpu pointer */
1711 mr r4, r10
1712 mr r6, r11
1713 li r7, 0 /* instruction fault */
1714 bl .kvmppc_hpte_hv_fault
1715 ld r9, HSTATE_KVM_VCPU(r13)
1716 ld r10, VCPU_PC(r9)
1717 ld r11, VCPU_MSR(r9)
1718 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1719 cmpdi r3, 0 /* retry the instruction */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001720 beq fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001721 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001722 beq guest_exit_cont
Paul Mackerras342d3db2011-12-12 12:38:05 +00001723
1724 /* Synthesize an ISI for the guest */
1725 mr r11, r3
17261: mtspr SPRN_SRR0, r10
1727 mtspr SPRN_SRR1, r11
1728 li r10, BOOK3S_INTERRUPT_INST_STORAGE
Michael Neulinge4e38122014-03-25 10:47:02 +11001729 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001730 b fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001731
17323: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1733 ld r5, KVM_VRMA_SLB_V(r6)
1734 b 4b
1735
1736/*
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001737 * Try to handle an hcall in real mode.
1738 * Returns to the guest if we handle it, or continues on up to
1739 * the kernel if we can't (i.e. if we don't have a handler for
1740 * it, or if the handler returns H_TOO_HARD).
1741 */
1742 .globl hcall_try_real_mode
1743hcall_try_real_mode:
Michael Neulingc75df6f2012-06-25 13:33:10 +00001744 ld r3,VCPU_GPR(R3)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001745 andi. r0,r11,MSR_PR
Liu Ping Fan27025a62013-11-19 14:12:48 +08001746 /* sc 1 from userspace - reflect to guest syscall */
1747 bne sc_1_fast_return
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001748 clrrdi r3,r3,2
1749 cmpldi r3,hcall_real_table_end - hcall_real_table
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001750 bge guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001751 LOAD_REG_ADDR(r4, hcall_real_table)
Paul Mackerras4baa1d82013-07-08 20:09:53 +10001752 lwax r3,r3,r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001753 cmpwi r3,0
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001754 beq guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001755 add r3,r3,r4
1756 mtctr r3
1757 mr r3,r9 /* get vcpu pointer */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001758 ld r4,VCPU_GPR(R4)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001759 bctrl
1760 cmpdi r3,H_TOO_HARD
1761 beq hcall_real_fallback
1762 ld r4,HSTATE_KVM_VCPU(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001763 std r3,VCPU_GPR(R3)(r4)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001764 ld r10,VCPU_PC(r4)
1765 ld r11,VCPU_MSR(r4)
1766 b fast_guest_return
1767
Liu Ping Fan27025a62013-11-19 14:12:48 +08001768sc_1_fast_return:
1769 mtspr SPRN_SRR0,r10
1770 mtspr SPRN_SRR1,r11
1771 li r10, BOOK3S_INTERRUPT_SYSCALL
Michael Neulinge4e38122014-03-25 10:47:02 +11001772 bl kvmppc_msr_interrupt
Liu Ping Fan27025a62013-11-19 14:12:48 +08001773 mr r4,r9
1774 b fast_guest_return
1775
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001776 /* We've attempted a real mode hcall, but it's punted it back
1777 * to userspace. We need to restore some clobbered volatiles
1778 * before resuming the pass-it-to-qemu path */
1779hcall_real_fallback:
1780 li r12,BOOK3S_INTERRUPT_SYSCALL
1781 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001782
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001783 b guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001784
1785 .globl hcall_real_table
1786hcall_real_table:
1787 .long 0 /* 0 - unused */
1788 .long .kvmppc_h_remove - hcall_real_table
1789 .long .kvmppc_h_enter - hcall_real_table
1790 .long .kvmppc_h_read - hcall_real_table
1791 .long 0 /* 0x10 - H_CLEAR_MOD */
1792 .long 0 /* 0x14 - H_CLEAR_REF */
1793 .long .kvmppc_h_protect - hcall_real_table
Laurent Dufour69e9fbb22014-02-21 16:31:10 +01001794 .long .kvmppc_h_get_tce - hcall_real_table
David Gibson54738c02011-06-29 00:22:41 +00001795 .long .kvmppc_h_put_tce - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001796 .long 0 /* 0x24 - H_SET_SPRG0 */
1797 .long .kvmppc_h_set_dabr - hcall_real_table
1798 .long 0 /* 0x2c */
1799 .long 0 /* 0x30 */
1800 .long 0 /* 0x34 */
1801 .long 0 /* 0x38 */
1802 .long 0 /* 0x3c */
1803 .long 0 /* 0x40 */
1804 .long 0 /* 0x44 */
1805 .long 0 /* 0x48 */
1806 .long 0 /* 0x4c */
1807 .long 0 /* 0x50 */
1808 .long 0 /* 0x54 */
1809 .long 0 /* 0x58 */
1810 .long 0 /* 0x5c */
1811 .long 0 /* 0x60 */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001812#ifdef CONFIG_KVM_XICS
1813 .long .kvmppc_rm_h_eoi - hcall_real_table
1814 .long .kvmppc_rm_h_cppr - hcall_real_table
1815 .long .kvmppc_rm_h_ipi - hcall_real_table
1816 .long 0 /* 0x70 - H_IPOLL */
1817 .long .kvmppc_rm_h_xirr - hcall_real_table
1818#else
1819 .long 0 /* 0x64 - H_EOI */
1820 .long 0 /* 0x68 - H_CPPR */
1821 .long 0 /* 0x6c - H_IPI */
1822 .long 0 /* 0x70 - H_IPOLL */
1823 .long 0 /* 0x74 - H_XIRR */
1824#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001825 .long 0 /* 0x78 */
1826 .long 0 /* 0x7c */
1827 .long 0 /* 0x80 */
1828 .long 0 /* 0x84 */
1829 .long 0 /* 0x88 */
1830 .long 0 /* 0x8c */
1831 .long 0 /* 0x90 */
1832 .long 0 /* 0x94 */
1833 .long 0 /* 0x98 */
1834 .long 0 /* 0x9c */
1835 .long 0 /* 0xa0 */
1836 .long 0 /* 0xa4 */
1837 .long 0 /* 0xa8 */
1838 .long 0 /* 0xac */
1839 .long 0 /* 0xb0 */
1840 .long 0 /* 0xb4 */
1841 .long 0 /* 0xb8 */
1842 .long 0 /* 0xbc */
1843 .long 0 /* 0xc0 */
1844 .long 0 /* 0xc4 */
1845 .long 0 /* 0xc8 */
1846 .long 0 /* 0xcc */
1847 .long 0 /* 0xd0 */
1848 .long 0 /* 0xd4 */
1849 .long 0 /* 0xd8 */
1850 .long 0 /* 0xdc */
Paul Mackerras19ccb762011-07-23 17:42:46 +10001851 .long .kvmppc_h_cede - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001852 .long 0 /* 0xe4 */
1853 .long 0 /* 0xe8 */
1854 .long 0 /* 0xec */
1855 .long 0 /* 0xf0 */
1856 .long 0 /* 0xf4 */
1857 .long 0 /* 0xf8 */
1858 .long 0 /* 0xfc */
1859 .long 0 /* 0x100 */
1860 .long 0 /* 0x104 */
1861 .long 0 /* 0x108 */
1862 .long 0 /* 0x10c */
1863 .long 0 /* 0x110 */
1864 .long 0 /* 0x114 */
1865 .long 0 /* 0x118 */
1866 .long 0 /* 0x11c */
1867 .long 0 /* 0x120 */
1868 .long .kvmppc_h_bulk_remove - hcall_real_table
Paul Mackerras8563bf52014-01-08 21:25:29 +11001869 .long 0 /* 0x128 */
1870 .long 0 /* 0x12c */
1871 .long 0 /* 0x130 */
1872 .long .kvmppc_h_set_xdabr - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001873hcall_real_table_end:
1874
Paul Mackerrasde56a942011-06-29 00:21:34 +00001875ignore_hdec:
1876 mr r4,r9
1877 b fast_guest_return
1878
Paul Mackerras8563bf52014-01-08 21:25:29 +11001879_GLOBAL(kvmppc_h_set_xdabr)
1880 andi. r0, r5, DABRX_USER | DABRX_KERNEL
1881 beq 6f
1882 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
1883 andc. r0, r5, r0
1884 beq 3f
18856: li r3, H_PARAMETER
1886 blr
1887
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001888_GLOBAL(kvmppc_h_set_dabr)
Paul Mackerras8563bf52014-01-08 21:25:29 +11001889 li r5, DABRX_USER | DABRX_KERNEL
18903:
Michael Neulingeee7ff92014-01-08 21:25:19 +11001891BEGIN_FTR_SECTION
1892 b 2f
1893END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001894 std r4,VCPU_DABR(r3)
Paul Mackerras8563bf52014-01-08 21:25:29 +11001895 stw r5, VCPU_DABRX(r3)
1896 mtspr SPRN_DABRX, r5
Paul Mackerras89436332012-03-02 01:38:23 +00001897 /* Work around P7 bug where DABR can get corrupted on mtspr */
18981: mtspr SPRN_DABR,r4
1899 mfspr r5, SPRN_DABR
1900 cmpd r4, r5
1901 bne 1b
1902 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001903 li r3,0
1904 blr
1905
Paul Mackerras8563bf52014-01-08 21:25:29 +11001906 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
19072: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
1908 rlwimi r5, r4, 1, DAWRX_WT
1909 clrrdi r4, r4, 3
1910 std r4, VCPU_DAWR(r3)
1911 std r5, VCPU_DAWRX(r3)
1912 mtspr SPRN_DAWR, r4
1913 mtspr SPRN_DAWRX, r5
1914 li r3, 0
Paul Mackerrasde56a942011-06-29 00:21:34 +00001915 blr
1916
Paul Mackerras19ccb762011-07-23 17:42:46 +10001917_GLOBAL(kvmppc_h_cede)
1918 ori r11,r11,MSR_EE
1919 std r11,VCPU_MSR(r3)
1920 li r0,1
1921 stb r0,VCPU_CEDED(r3)
1922 sync /* order setting ceded vs. testing prodded */
1923 lbz r5,VCPU_PRODDED(r3)
1924 cmpwi r5,0
Paul Mackerras04f995a2012-08-06 00:03:28 +00001925 bne kvm_cede_prodded
Paul Mackerras19ccb762011-07-23 17:42:46 +10001926 li r0,0 /* set trap to 0 to say hcall is handled */
1927 stw r0,VCPU_TRAP(r3)
1928 li r0,H_SUCCESS
Michael Neulingc75df6f2012-06-25 13:33:10 +00001929 std r0,VCPU_GPR(R3)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001930BEGIN_FTR_SECTION
Paul Mackerras04f995a2012-08-06 00:03:28 +00001931 b kvm_cede_exit /* just send it up to host on 970 */
Paul Mackerras19ccb762011-07-23 17:42:46 +10001932END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1933
1934 /*
1935 * Set our bit in the bitmask of napping threads unless all the
1936 * other threads are already napping, in which case we send this
1937 * up to the host.
1938 */
1939 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001940 lbz r6,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001941 lwz r8,VCORE_ENTRY_EXIT(r5)
1942 clrldi r8,r8,56
1943 li r0,1
1944 sld r0,r0,r6
1945 addi r6,r5,VCORE_NAPPING_THREADS
194631: lwarx r4,0,r6
1947 or r4,r4,r0
Michael Neulingc75df6f2012-06-25 13:33:10 +00001948 PPC_POPCNTW(R7,R4)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001949 cmpw r7,r8
Paul Mackerras04f995a2012-08-06 00:03:28 +00001950 bge kvm_cede_exit
Paul Mackerras19ccb762011-07-23 17:42:46 +10001951 stwcx. r4,0,r6
1952 bne 31b
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11001953 /* order napping_threads update vs testing entry_exit_count */
1954 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001955 li r0,NAPPING_CEDE
Paul Mackerras19ccb762011-07-23 17:42:46 +10001956 stb r0,HSTATE_NAPPING(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001957 lwz r7,VCORE_ENTRY_EXIT(r5)
1958 cmpwi r7,0x100
1959 bge 33f /* another thread already exiting */
1960
1961/*
1962 * Although not specifically required by the architecture, POWER7
1963 * preserves the following registers in nap mode, even if an SMT mode
1964 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
1965 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1966 */
1967 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001968 std r14, VCPU_GPR(R14)(r3)
1969 std r15, VCPU_GPR(R15)(r3)
1970 std r16, VCPU_GPR(R16)(r3)
1971 std r17, VCPU_GPR(R17)(r3)
1972 std r18, VCPU_GPR(R18)(r3)
1973 std r19, VCPU_GPR(R19)(r3)
1974 std r20, VCPU_GPR(R20)(r3)
1975 std r21, VCPU_GPR(R21)(r3)
1976 std r22, VCPU_GPR(R22)(r3)
1977 std r23, VCPU_GPR(R23)(r3)
1978 std r24, VCPU_GPR(R24)(r3)
1979 std r25, VCPU_GPR(R25)(r3)
1980 std r26, VCPU_GPR(R26)(r3)
1981 std r27, VCPU_GPR(R27)(r3)
1982 std r28, VCPU_GPR(R28)(r3)
1983 std r29, VCPU_GPR(R29)(r3)
1984 std r30, VCPU_GPR(R30)(r3)
1985 std r31, VCPU_GPR(R31)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001986
1987 /* save FP state */
Paul Mackerras595e4f72013-10-15 20:43:04 +11001988 bl kvmppc_save_fp
Paul Mackerras19ccb762011-07-23 17:42:46 +10001989
1990 /*
Paul Mackerrasaa31e842014-01-08 21:25:26 +11001991 * Take a nap until a decrementer or external or doobell interrupt
1992 * occurs, with PECE1, PECE0 and PECEDP set in LPCR
Paul Mackerras19ccb762011-07-23 17:42:46 +10001993 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +00001994 li r0,1
1995 stb r0,HSTATE_HWTHREAD_REQ(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001996 mfspr r5,SPRN_LPCR
1997 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11001998BEGIN_FTR_SECTION
1999 oris r5,r5,LPCR_PECEDP@h
2000END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002001 mtspr SPRN_LPCR,r5
2002 isync
2003 li r0, 0
2004 std r0, HSTATE_SCRATCH0(r13)
2005 ptesync
2006 ld r0, HSTATE_SCRATCH0(r13)
20071: cmpd r0, r0
2008 bne 1b
2009 nap
2010 b .
2011
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100201233: mr r4, r3
2013 li r3, 0
2014 li r12, 0
2015 b 34f
2016
Paul Mackerras19ccb762011-07-23 17:42:46 +10002017kvm_end_cede:
Paul Mackerras4619ac82013-04-17 20:31:41 +00002018 /* get vcpu pointer */
2019 ld r4, HSTATE_KVM_VCPU(r13)
2020
Paul Mackerras19ccb762011-07-23 17:42:46 +10002021 /* Woken by external or decrementer interrupt */
2022 ld r1, HSTATE_HOST_R1(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002023
Paul Mackerras19ccb762011-07-23 17:42:46 +10002024 /* load up FP state */
2025 bl kvmppc_load_fp
2026
2027 /* Load NV GPRS */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002028 ld r14, VCPU_GPR(R14)(r4)
2029 ld r15, VCPU_GPR(R15)(r4)
2030 ld r16, VCPU_GPR(R16)(r4)
2031 ld r17, VCPU_GPR(R17)(r4)
2032 ld r18, VCPU_GPR(R18)(r4)
2033 ld r19, VCPU_GPR(R19)(r4)
2034 ld r20, VCPU_GPR(R20)(r4)
2035 ld r21, VCPU_GPR(R21)(r4)
2036 ld r22, VCPU_GPR(R22)(r4)
2037 ld r23, VCPU_GPR(R23)(r4)
2038 ld r24, VCPU_GPR(R24)(r4)
2039 ld r25, VCPU_GPR(R25)(r4)
2040 ld r26, VCPU_GPR(R26)(r4)
2041 ld r27, VCPU_GPR(R27)(r4)
2042 ld r28, VCPU_GPR(R28)(r4)
2043 ld r29, VCPU_GPR(R29)(r4)
2044 ld r30, VCPU_GPR(R30)(r4)
2045 ld r31, VCPU_GPR(R31)(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002046
2047 /* Check the wake reason in SRR1 to see why we got here */
2048 bl kvmppc_check_wake_reason
Paul Mackerras19ccb762011-07-23 17:42:46 +10002049
2050 /* clear our bit in vcore->napping_threads */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100205134: ld r5,HSTATE_KVM_VCORE(r13)
2052 lbz r7,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002053 li r0,1
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002054 sld r0,r0,r7
Paul Mackerras19ccb762011-07-23 17:42:46 +10002055 addi r6,r5,VCORE_NAPPING_THREADS
205632: lwarx r7,0,r6
2057 andc r7,r7,r0
2058 stwcx. r7,0,r6
2059 bne 32b
2060 li r0,0
2061 stb r0,HSTATE_NAPPING(r13)
2062
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002063 /* See if the wake reason means we need to exit */
2064 stw r12, VCPU_TRAP(r4)
Paul Mackerras4619ac82013-04-17 20:31:41 +00002065 mr r9, r4
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002066 cmpdi r3, 0
2067 bgt guest_exit_cont
Paul Mackerras4619ac82013-04-17 20:31:41 +00002068
Paul Mackerras19ccb762011-07-23 17:42:46 +10002069 /* see if any other thread is already exiting */
2070 lwz r0,VCORE_ENTRY_EXIT(r5)
2071 cmpwi r0,0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002072 bge guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002073
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002074 b kvmppc_cede_reentry /* if not go back to guest */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002075
2076 /* cede when already previously prodded case */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002077kvm_cede_prodded:
2078 li r0,0
Paul Mackerras19ccb762011-07-23 17:42:46 +10002079 stb r0,VCPU_PRODDED(r3)
2080 sync /* order testing prodded vs. clearing ceded */
2081 stb r0,VCPU_CEDED(r3)
2082 li r3,H_SUCCESS
2083 blr
2084
2085 /* we've ceded but we want to give control to the host */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002086kvm_cede_exit:
Paul Mackerras4619ac82013-04-17 20:31:41 +00002087 b hcall_real_fallback
Paul Mackerras19ccb762011-07-23 17:42:46 +10002088
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002089 /* Try to handle a machine check in real mode */
2090machine_check_realmode:
2091 mr r3, r9 /* get vcpu pointer */
2092 bl .kvmppc_realmode_machine_check
2093 nop
2094 cmpdi r3, 0 /* continue exiting from guest? */
2095 ld r9, HSTATE_KVM_VCPU(r13)
2096 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2097 beq mc_cont
2098 /* If not, deliver a machine check. SRR0/1 are already set */
2099 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
Michael Neulinge4e38122014-03-25 10:47:02 +11002100 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002101 b fast_interrupt_c_return
2102
Paul Mackerrasde56a942011-06-29 00:21:34 +00002103/*
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002104 * Check the reason we woke from nap, and take appropriate action.
2105 * Returns:
2106 * 0 if nothing needs to be done
2107 * 1 if something happened that needs to be handled by the host
2108 * -1 if there was a guest wakeup (IPI)
2109 *
2110 * Also sets r12 to the interrupt vector for any interrupt that needs
2111 * to be handled now by the host (0x500 for external interrupt), or zero.
2112 */
2113kvmppc_check_wake_reason:
2114 mfspr r6, SPRN_SRR1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002115BEGIN_FTR_SECTION
2116 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2117FTR_SECTION_ELSE
2118 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2119ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2120 cmpwi r6, 8 /* was it an external interrupt? */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002121 li r12, BOOK3S_INTERRUPT_EXTERNAL
2122 beq kvmppc_read_intr /* if so, see what it was */
2123 li r3, 0
2124 li r12, 0
2125 cmpwi r6, 6 /* was it the decrementer? */
2126 beq 0f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002127BEGIN_FTR_SECTION
2128 cmpwi r6, 5 /* privileged doorbell? */
2129 beq 0f
Paul Mackerras5d00f662014-01-08 21:25:28 +11002130 cmpwi r6, 3 /* hypervisor doorbell? */
2131 beq 3f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002132END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002133 li r3, 1 /* anything else, return 1 */
21340: blr
2135
Paul Mackerras5d00f662014-01-08 21:25:28 +11002136 /* hypervisor doorbell */
21373: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2138 li r3, 1
2139 blr
2140
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002141/*
Paul Mackerrasc9342432013-09-06 13:24:13 +10002142 * Determine what sort of external interrupt is pending (if any).
2143 * Returns:
2144 * 0 if no interrupt is pending
2145 * 1 if an interrupt is pending that needs to be handled by the host
2146 * -1 if there was a guest wakeup IPI (which has now been cleared)
2147 */
2148kvmppc_read_intr:
2149 /* see if a host IPI is pending */
2150 li r3, 1
2151 lbz r0, HSTATE_HOST_IPI(r13)
2152 cmpwi r0, 0
2153 bne 1f
Paul Mackerrasde56a942011-06-29 00:21:34 +00002154
Paul Mackerrasc9342432013-09-06 13:24:13 +10002155 /* Now read the interrupt from the ICP */
2156 ld r6, HSTATE_XICS_PHYS(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002157 li r7, XICS_XIRR
Paul Mackerrasc9342432013-09-06 13:24:13 +10002158 cmpdi r6, 0
2159 beq- 1f
2160 lwzcix r0, r6, r7
2161 rlwinm. r3, r0, 0, 0xffffff
Paul Mackerrasde56a942011-06-29 00:21:34 +00002162 sync
Paul Mackerrasc9342432013-09-06 13:24:13 +10002163 beq 1f /* if nothing pending in the ICP */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002164
Paul Mackerrasc9342432013-09-06 13:24:13 +10002165 /* We found something in the ICP...
2166 *
2167 * If it's not an IPI, stash it in the PACA and return to
2168 * the host, we don't (yet) handle directing real external
2169 * interrupts directly to the guest
2170 */
2171 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
Paul Mackerrasc9342432013-09-06 13:24:13 +10002172 bne 42f
Paul Mackerrasde56a942011-06-29 00:21:34 +00002173
Paul Mackerrasc9342432013-09-06 13:24:13 +10002174 /* It's an IPI, clear the MFRR and EOI it */
2175 li r3, 0xff
2176 li r8, XICS_MFRR
2177 stbcix r3, r6, r8 /* clear the IPI */
2178 stwcix r0, r6, r7 /* EOI it */
2179 sync
Paul Mackerrasde56a942011-06-29 00:21:34 +00002180
Paul Mackerrasc9342432013-09-06 13:24:13 +10002181 /* We need to re-check host IPI now in case it got set in the
2182 * meantime. If it's clear, we bounce the interrupt to the
2183 * guest
2184 */
2185 lbz r0, HSTATE_HOST_IPI(r13)
2186 cmpwi r0, 0
2187 bne- 43f
2188
2189 /* OK, it's an IPI for us */
2190 li r3, -1
21911: blr
2192
219342: /* It's not an IPI and it's for the host, stash it in the PACA
2194 * before exit, it will be picked up by the host ICP driver
2195 */
2196 stw r0, HSTATE_SAVED_XIRR(r13)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002197 li r3, 1
Paul Mackerrasc9342432013-09-06 13:24:13 +10002198 b 1b
2199
220043: /* We raced with the host, we need to resend that IPI, bummer */
2201 li r0, IPI_PRIORITY
2202 stbcix r0, r6, r8 /* set the IPI */
2203 sync
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002204 li r3, 1
Paul Mackerrasc9342432013-09-06 13:24:13 +10002205 b 1b
Paul Mackerrasde56a942011-06-29 00:21:34 +00002206
2207/*
2208 * Save away FP, VMX and VSX registers.
2209 * r3 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002210 * N.B. r30 and r31 are volatile across this function,
2211 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002212 */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002213kvmppc_save_fp:
2214 mflr r30
2215 mr r31,r3
Paul Mackerrasde56a942011-06-29 00:21:34 +00002216 mfmsr r5
2217 ori r8,r5,MSR_FP
2218#ifdef CONFIG_ALTIVEC
2219BEGIN_FTR_SECTION
2220 oris r8,r8,MSR_VEC@h
2221END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2222#endif
2223#ifdef CONFIG_VSX
2224BEGIN_FTR_SECTION
2225 oris r8,r8,MSR_VSX@h
2226END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2227#endif
2228 mtmsrd r8
2229 isync
Paul Mackerras595e4f72013-10-15 20:43:04 +11002230 addi r3,r3,VCPU_FPRS
2231 bl .store_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002232#ifdef CONFIG_ALTIVEC
2233BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002234 addi r3,r31,VCPU_VRS
2235 bl .store_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002236END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2237#endif
2238 mfspr r6,SPRN_VRSAVE
Paul Mackerrase724f082014-03-13 20:02:48 +11002239 stw r6,VCPU_VRSAVE(r31)
Paul Mackerras595e4f72013-10-15 20:43:04 +11002240 mtlr r30
Paul Mackerrasde56a942011-06-29 00:21:34 +00002241 blr
2242
2243/*
2244 * Load up FP, VMX and VSX registers
2245 * r4 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002246 * N.B. r30 and r31 are volatile across this function,
2247 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002248 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002249kvmppc_load_fp:
Paul Mackerras595e4f72013-10-15 20:43:04 +11002250 mflr r30
2251 mr r31,r4
Paul Mackerrasde56a942011-06-29 00:21:34 +00002252 mfmsr r9
2253 ori r8,r9,MSR_FP
2254#ifdef CONFIG_ALTIVEC
2255BEGIN_FTR_SECTION
2256 oris r8,r8,MSR_VEC@h
2257END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2258#endif
2259#ifdef CONFIG_VSX
2260BEGIN_FTR_SECTION
2261 oris r8,r8,MSR_VSX@h
2262END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2263#endif
2264 mtmsrd r8
2265 isync
Paul Mackerras595e4f72013-10-15 20:43:04 +11002266 addi r3,r4,VCPU_FPRS
2267 bl .load_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002268#ifdef CONFIG_ALTIVEC
2269BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002270 addi r3,r31,VCPU_VRS
2271 bl .load_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002272END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2273#endif
Paul Mackerrase724f082014-03-13 20:02:48 +11002274 lwz r7,VCPU_VRSAVE(r31)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002275 mtspr SPRN_VRSAVE,r7
Paul Mackerras595e4f72013-10-15 20:43:04 +11002276 mtlr r30
2277 mr r4,r31
Paul Mackerrasde56a942011-06-29 00:21:34 +00002278 blr
Paul Mackerras44a3add2013-10-04 21:45:04 +10002279
2280/*
2281 * We come here if we get any exception or interrupt while we are
2282 * executing host real mode code while in guest MMU context.
2283 * For now just spin, but we should do something better.
2284 */
2285kvmppc_bad_host_intr:
2286 b .
Michael Neulinge4e38122014-03-25 10:47:02 +11002287
2288/*
2289 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2290 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2291 * r11 has the guest MSR value (in/out)
2292 * r9 has a vcpu pointer (in)
2293 * r0 is used as a scratch register
2294 */
2295kvmppc_msr_interrupt:
2296 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2297 cmpwi r0, 2 /* Check if we are in transactional state.. */
2298 ld r11, VCPU_INTR_MSR(r9)
2299 bne 1f
2300 /* ... if transactional, change to suspended */
2301 li r0, 1
23021: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2303 blr