blob: e5c542a7c5aca41c5969ffe0e3cf27f5c17e2b97 [file] [log] [blame]
Alexander Graf07372792010-04-16 00:11:35 +02001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20/* Real mode helpers */
21
Christophe Leroyec0c4642018-07-05 16:24:57 +000022#include <asm/asm-compat.h>
Christophe Leroy2c86cd12018-07-05 16:25:01 +000023#include <asm/feature-fixups.h>
Christophe Leroyec0c4642018-07-05 16:24:57 +000024
Alexander Graf07372792010-04-16 00:11:35 +020025#if defined(CONFIG_PPC_BOOK3S_64)
26
27#define GET_SHADOW_VCPU(reg) \
Paul Mackerras3c42bf82011-06-29 00:20:58 +000028 mr reg, r13
Alexander Graf07372792010-04-16 00:11:35 +020029
30#elif defined(CONFIG_PPC_BOOK3S_32)
31
32#define GET_SHADOW_VCPU(reg) \
33 tophys(reg, r2); \
34 lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \
35 tophys(reg, reg)
36
37#endif
38
39/* Disable for nested KVM */
40#define USE_QUICK_LAST_INST
41
42
43/* Get helper functions for subarch specific functionality */
44
45#if defined(CONFIG_PPC_BOOK3S_64)
46#include "book3s_64_slb.S"
47#elif defined(CONFIG_PPC_BOOK3S_32)
48#include "book3s_32_sr.S"
49#endif
50
51/******************************************************************************
52 * *
53 * Entry code *
54 * *
55 *****************************************************************************/
56
57.global kvmppc_handler_trampoline_enter
58kvmppc_handler_trampoline_enter:
59
60 /* Required state:
61 *
62 * MSR = ~IR|DR
Alexander Graf07372792010-04-16 00:11:35 +020063 * R1 = host R1
64 * R2 = host R2
Paul Mackerras02143942011-07-23 17:41:44 +100065 * R4 = guest shadow MSR
66 * R5 = normal host MSR
67 * R6 = current host MSR (EE, IR, DR off)
68 * LR = highmem guest exit code
Alexander Graf07372792010-04-16 00:11:35 +020069 * all other volatile GPRS = free
70 * SVCPU[CR] = guest CR
71 * SVCPU[XER] = guest XER
72 * SVCPU[CTR] = guest CTR
73 * SVCPU[LR] = guest LR
74 */
75
76 /* r3 = shadow vcpu */
77 GET_SHADOW_VCPU(r3)
78
Paul Mackerras02143942011-07-23 17:41:44 +100079 /* Save guest exit handler address and MSR */
80 mflr r0
81 PPC_STL r0, HSTATE_VMHANDLER(r3)
82 PPC_STL r5, HSTATE_HOST_MSR(r3)
83
Paul Mackerras3c42bf82011-06-29 00:20:58 +000084 /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */
85 PPC_STL r1, HSTATE_HOST_R1(r3)
86 PPC_STL r2, HSTATE_HOST_R2(r3)
87
Alexander Graf07372792010-04-16 00:11:35 +020088 /* Activate guest mode, so faults get handled by KVM */
89 li r11, KVM_GUEST_MODE_GUEST
Paul Mackerras3c42bf82011-06-29 00:20:58 +000090 stb r11, HSTATE_IN_GUEST(r3)
Alexander Graf07372792010-04-16 00:11:35 +020091
92 /* Switch to guest segment. This is subarch specific. */
93 LOAD_GUEST_SEGMENTS
94
Paul Mackerras02143942011-07-23 17:41:44 +100095#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf616dff82014-04-29 16:48:44 +020096BEGIN_FTR_SECTION
97 /* Save host FSCR */
98 mfspr r8, SPRN_FSCR
99 std r8, HSTATE_HOST_FSCR(r13)
100 /* Set FSCR during guest execution */
101 ld r9, SVCPU_SHADOW_FSCR(r13)
102 mtspr SPRN_FSCR, r9
103END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
104
Paul Mackerras02143942011-07-23 17:41:44 +1000105 /* Some guests may need to have dcbz set to 32 byte length.
106 *
107 * Usually we ensure that by patching the guest's instructions
108 * to trap on dcbz and emulate it in the hypervisor.
109 *
110 * If we can, we should tell the CPU to use 32 byte dcbz though,
111 * because that's a lot faster.
112 */
113 lbz r0, HSTATE_RESTORE_HID5(r3)
114 cmpwi r0, 0
115 beq no_dcbz32_on
116
117 mfspr r0,SPRN_HID5
118 ori r0, r0, 0x80 /* XXX HID5_dcbz32 = 0x80 */
119 mtspr SPRN_HID5,r0
120no_dcbz32_on:
121
122#endif /* CONFIG_PPC_BOOK3S_64 */
123
Alexander Graf07372792010-04-16 00:11:35 +0200124 /* Enter guest */
125
Paul Mackerras02143942011-07-23 17:41:44 +1000126 PPC_LL r8, SVCPU_CTR(r3)
127 PPC_LL r9, SVCPU_LR(r3)
128 lwz r10, SVCPU_CR(r3)
Sam bobroffc63517c2015-05-27 09:56:57 +1000129 PPC_LL r11, SVCPU_XER(r3)
Alexander Graf07372792010-04-16 00:11:35 +0200130
Paul Mackerras02143942011-07-23 17:41:44 +1000131 mtctr r8
132 mtlr r9
133 mtcr r10
134 mtxer r11
135
136 /* Move SRR0 and SRR1 into the respective regs */
137 PPC_LL r9, SVCPU_PC(r3)
138 /* First clear RI in our current MSR value */
139 li r0, MSR_RI
140 andc r6, r6, r0
Alexander Graf07372792010-04-16 00:11:35 +0200141
Paul Mackerrasde56a942011-06-29 00:21:34 +0000142 PPC_LL r0, SVCPU_R0(r3)
143 PPC_LL r1, SVCPU_R1(r3)
144 PPC_LL r2, SVCPU_R2(r3)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000145 PPC_LL r5, SVCPU_R5(r3)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000146 PPC_LL r7, SVCPU_R7(r3)
147 PPC_LL r8, SVCPU_R8(r3)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000148 PPC_LL r10, SVCPU_R10(r3)
149 PPC_LL r11, SVCPU_R11(r3)
150 PPC_LL r12, SVCPU_R12(r3)
151 PPC_LL r13, SVCPU_R13(r3)
Alexander Graf07372792010-04-16 00:11:35 +0200152
Alexander Graf8c2d0be2012-04-25 14:28:23 +0200153 MTMSR_EERI(r6)
154 mtsrr0 r9
155 mtsrr1 r4
156
157 PPC_LL r4, SVCPU_R4(r3)
158 PPC_LL r6, SVCPU_R6(r3)
159 PPC_LL r9, SVCPU_R9(r3)
Alexander Graf07372792010-04-16 00:11:35 +0200160 PPC_LL r3, (SVCPU_R3)(r3)
161
Nicholas Piggin222f20f2018-01-10 03:07:15 +1100162 RFI_TO_GUEST
Alexander Graf07372792010-04-16 00:11:35 +0200163kvmppc_handler_trampoline_enter_end:
164
165
166
167/******************************************************************************
168 * *
169 * Exit code *
170 * *
171 *****************************************************************************/
172
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +0530173.global kvmppc_interrupt_pr
174kvmppc_interrupt_pr:
Nicholas Piggind3918e72016-12-22 04:29:25 +1000175 /* 64-bit entry. Register usage at this point:
176 *
177 * SPRG_SCRATCH0 = guest R13
178 * R12 = (guest CR << 32) | exit handler id
179 * R13 = PACA
180 * HSTATE.SCRATCH0 = guest R12
Nicholas Piggina97a65d2017-01-27 14:00:34 +1000181 * HSTATE.SCRATCH1 = guest CTR if RELOCATABLE
Nicholas Piggind3918e72016-12-22 04:29:25 +1000182 */
183#ifdef CONFIG_PPC64
184 /* Match 32-bit entry */
Nicholas Piggina97a65d2017-01-27 14:00:34 +1000185#ifdef CONFIG_RELOCATABLE
186 std r9, HSTATE_SCRATCH2(r13)
187 ld r9, HSTATE_SCRATCH1(r13)
188 mtctr r9
189 ld r9, HSTATE_SCRATCH2(r13)
190#endif
Nicholas Piggind3918e72016-12-22 04:29:25 +1000191 rotldi r12, r12, 32 /* Flip R12 halves for stw */
192 stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */
193 srdi r12, r12, 32 /* shift trap into low half */
194#endif
Paul Mackerrasb01c8b52011-06-29 00:18:26 +0000195
Nicholas Piggind3918e72016-12-22 04:29:25 +1000196.global kvmppc_handler_trampoline_exit
197kvmppc_handler_trampoline_exit:
Alexander Graf07372792010-04-16 00:11:35 +0200198 /* Register usage at this point:
199 *
Nicholas Piggind3918e72016-12-22 04:29:25 +1000200 * SPRG_SCRATCH0 = guest R13
201 * R12 = exit handler id
202 * R13 = shadow vcpu (32-bit) or PACA (64-bit)
Paul Mackerras3c42bf82011-06-29 00:20:58 +0000203 * HSTATE.SCRATCH0 = guest R12
204 * HSTATE.SCRATCH1 = guest CR
Alexander Graf07372792010-04-16 00:11:35 +0200205 */
206
207 /* Save registers */
208
Paul Mackerras3c42bf82011-06-29 00:20:58 +0000209 PPC_STL r0, SVCPU_R0(r13)
210 PPC_STL r1, SVCPU_R1(r13)
211 PPC_STL r2, SVCPU_R2(r13)
212 PPC_STL r3, SVCPU_R3(r13)
213 PPC_STL r4, SVCPU_R4(r13)
214 PPC_STL r5, SVCPU_R5(r13)
215 PPC_STL r6, SVCPU_R6(r13)
216 PPC_STL r7, SVCPU_R7(r13)
217 PPC_STL r8, SVCPU_R8(r13)
218 PPC_STL r9, SVCPU_R9(r13)
219 PPC_STL r10, SVCPU_R10(r13)
220 PPC_STL r11, SVCPU_R11(r13)
Alexander Graf07372792010-04-16 00:11:35 +0200221
222 /* Restore R1/R2 so we can handle faults */
Paul Mackerras3c42bf82011-06-29 00:20:58 +0000223 PPC_LL r1, HSTATE_HOST_R1(r13)
224 PPC_LL r2, HSTATE_HOST_R2(r13)
Alexander Graf07372792010-04-16 00:11:35 +0200225
226 /* Save guest PC and MSR */
Paul Mackerrasb01c8b52011-06-29 00:18:26 +0000227#ifdef CONFIG_PPC64
228BEGIN_FTR_SECTION
Alexander Graf32c7dbf2012-05-10 03:58:50 +0200229 andi. r0, r12, 0x2
230 cmpwi cr1, r0, 0
Benjamin Herrenschmidta5d4f3a2011-04-05 14:20:31 +1000231 beq 1f
232 mfspr r3,SPRN_HSRR0
233 mfspr r4,SPRN_HSRR1
234 andi. r12,r12,0x3ffd
235 b 2f
Paul Mackerras969391c2011-06-29 00:26:11 +0000236END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
Paul Mackerrasb01c8b52011-06-29 00:18:26 +0000237#endif
Benjamin Herrenschmidta5d4f3a2011-04-05 14:20:31 +10002381: mfsrr0 r3
Alexander Graf07372792010-04-16 00:11:35 +0200239 mfsrr1 r4
Benjamin Herrenschmidta5d4f3a2011-04-05 14:20:31 +10002402:
Paul Mackerras3c42bf82011-06-29 00:20:58 +0000241 PPC_STL r3, SVCPU_PC(r13)
242 PPC_STL r4, SVCPU_SHADOW_SRR1(r13)
Alexander Graf07372792010-04-16 00:11:35 +0200243
244 /* Get scratch'ed off registers */
Paul Mackerras673b1892011-04-05 13:59:58 +1000245 GET_SCRATCH0(r9)
Paul Mackerras3c42bf82011-06-29 00:20:58 +0000246 PPC_LL r8, HSTATE_SCRATCH0(r13)
247 lwz r7, HSTATE_SCRATCH1(r13)
Alexander Graf07372792010-04-16 00:11:35 +0200248
Paul Mackerras3c42bf82011-06-29 00:20:58 +0000249 PPC_STL r9, SVCPU_R13(r13)
250 PPC_STL r8, SVCPU_R12(r13)
251 stw r7, SVCPU_CR(r13)
Alexander Graf07372792010-04-16 00:11:35 +0200252
253 /* Save more register state */
254
255 mfxer r5
256 mfdar r6
257 mfdsisr r7
258 mfctr r8
259 mflr r9
260
Sam bobroffc63517c2015-05-27 09:56:57 +1000261 PPC_STL r5, SVCPU_XER(r13)
Paul Mackerras3c42bf82011-06-29 00:20:58 +0000262 PPC_STL r6, SVCPU_FAULT_DAR(r13)
263 stw r7, SVCPU_FAULT_DSISR(r13)
264 PPC_STL r8, SVCPU_CTR(r13)
265 PPC_STL r9, SVCPU_LR(r13)
Alexander Graf07372792010-04-16 00:11:35 +0200266
267 /*
268 * In order for us to easily get the last instruction,
269 * we got the #vmexit at, we exploit the fact that the
270 * virtual layout is still the same here, so we can just
271 * ld from the guest's PC address
272 */
273
274 /* We only load the last instruction when it's safe */
275 cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE
276 beq ld_last_inst
277 cmpwi r12, BOOK3S_INTERRUPT_PROGRAM
278 beq ld_last_inst
Alexander Graf77e675a2011-08-08 16:11:36 +0200279 cmpwi r12, BOOK3S_INTERRUPT_SYSCALL
280 beq ld_last_prev_inst
Alexander Graf6fc55822010-04-20 02:49:49 +0200281 cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT
282 beq- ld_last_inst
Alexander Graf7ef4e982012-05-10 03:54:58 +0200283#ifdef CONFIG_PPC64
284BEGIN_FTR_SECTION
285 cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST
286 beq- ld_last_inst
287END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
Alexander Graf616dff82014-04-29 16:48:44 +0200288BEGIN_FTR_SECTION
289 cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL
290 beq- ld_last_inst
291END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Alexander Graf7ef4e982012-05-10 03:54:58 +0200292#endif
Alexander Graf07372792010-04-16 00:11:35 +0200293
294 b no_ld_last_inst
295
Alexander Graf77e675a2011-08-08 16:11:36 +0200296ld_last_prev_inst:
297 addi r3, r3, -4
298
Alexander Graf07372792010-04-16 00:11:35 +0200299ld_last_inst:
300 /* Save off the guest instruction we're at */
301
302 /* In case lwz faults */
303 li r0, KVM_INST_FETCH_FAILED
304
305#ifdef USE_QUICK_LAST_INST
306
307 /* Set guest mode to 'jump over instruction' so if lwz faults
308 * we'll just continue at the next IP. */
309 li r9, KVM_GUEST_MODE_SKIP
Paul Mackerras3c42bf82011-06-29 00:20:58 +0000310 stb r9, HSTATE_IN_GUEST(r13)
Alexander Graf07372792010-04-16 00:11:35 +0200311
312 /* 1) enable paging for data */
313 mfmsr r9
314 ori r11, r9, MSR_DR /* Enable paging for data */
315 mtmsr r11
316 sync
317 /* 2) fetch the instruction */
318 lwz r0, 0(r3)
319 /* 3) disable paging again */
320 mtmsr r9
321 sync
322
323#endif
Paul Mackerras3c42bf82011-06-29 00:20:58 +0000324 stw r0, SVCPU_LAST_INST(r13)
Alexander Graf07372792010-04-16 00:11:35 +0200325
326no_ld_last_inst:
327
328 /* Unset guest mode */
329 li r9, KVM_GUEST_MODE_NONE
Paul Mackerras3c42bf82011-06-29 00:20:58 +0000330 stb r9, HSTATE_IN_GUEST(r13)
Alexander Graf07372792010-04-16 00:11:35 +0200331
332 /* Switch back to host MMU */
333 LOAD_HOST_SEGMENTS
334
Paul Mackerras02143942011-07-23 17:41:44 +1000335#ifdef CONFIG_PPC_BOOK3S_64
336
337 lbz r5, HSTATE_RESTORE_HID5(r13)
338 cmpwi r5, 0
339 beq no_dcbz32_off
340
341 li r4, 0
342 mfspr r5,SPRN_HID5
343 rldimi r5,r4,6,56
344 mtspr SPRN_HID5,r5
345
346no_dcbz32_off:
347
Alexander Graf616dff82014-04-29 16:48:44 +0200348BEGIN_FTR_SECTION
349 /* Save guest FSCR on a FAC_UNAVAIL interrupt */
350 cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL
351 bne+ no_fscr_save
352 mfspr r7, SPRN_FSCR
353 std r7, SVCPU_SHADOW_FSCR(r13)
354no_fscr_save:
355 /* Restore host FSCR */
356 ld r8, HSTATE_HOST_FSCR(r13)
357 mtspr SPRN_FSCR, r8
358END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
359
Paul Mackerras02143942011-07-23 17:41:44 +1000360#endif /* CONFIG_PPC_BOOK3S_64 */
361
362 /*
363 * For some interrupts, we need to call the real Linux
364 * handler, so it can do work for us. This has to happen
365 * as if the interrupt arrived from the kernel though,
366 * so let's fake it here where most state is restored.
367 *
368 * Having set up SRR0/1 with the address where we want
369 * to continue with relocation on (potentially in module
370 * space), we either just go straight there with rfi[d],
Alexander Graf56e13db2012-04-27 16:33:35 +0200371 * or we jump to an interrupt handler if there is an
372 * interrupt to be handled first. In the latter case,
373 * the rfi[d] at the end of the interrupt handler will
374 * get us back to where we want to continue.
Paul Mackerras02143942011-07-23 17:41:44 +1000375 */
376
Alexander Graf07372792010-04-16 00:11:35 +0200377 /* Register usage at this point:
378 *
379 * R1 = host R1
380 * R2 = host R2
Alexander Graf56e13db2012-04-27 16:33:35 +0200381 * R10 = raw exit handler id
Alexander Graf07372792010-04-16 00:11:35 +0200382 * R12 = exit handler id
Paul Mackerras3c42bf82011-06-29 00:20:58 +0000383 * R13 = shadow vcpu (32-bit) or PACA (64-bit)
Alexander Graf07372792010-04-16 00:11:35 +0200384 * SVCPU.* = guest *
385 *
386 */
387
Paul Mackerras02143942011-07-23 17:41:44 +1000388 PPC_LL r6, HSTATE_HOST_MSR(r13)
Simon Guo36383a02018-05-23 15:01:55 +0800389#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
390 /*
391 * We don't want to change MSR[TS] bits via rfi here.
392 * The actual TM handling logic will be in host with
393 * recovered DR/IR bits after HSTATE_VMHANDLER.
394 * And MSR_TM can be enabled in HOST_MSR so rfid may
395 * not suppress this change and can lead to exception.
396 * Manually set MSR to prevent TS state change here.
397 */
398 mfmsr r7
399 rldicl r7, r7, 64 - MSR_TS_S_LG, 62
400 rldimi r6, r7, MSR_TS_S_LG, 63 - MSR_TS_T_LG
401#endif
Paul Mackerras3c42bf82011-06-29 00:20:58 +0000402 PPC_LL r8, HSTATE_VMHANDLER(r13)
Paul Mackerras02143942011-07-23 17:41:44 +1000403
Alexander Graf56e13db2012-04-27 16:33:35 +0200404#ifdef CONFIG_PPC64
405BEGIN_FTR_SECTION
Alexander Graf32c7dbf2012-05-10 03:58:50 +0200406 beq cr1, 1f
Alexander Graf56e13db2012-04-27 16:33:35 +0200407 mtspr SPRN_HSRR1, r6
408 mtspr SPRN_HSRR0, r8
409END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
410#endif
4111: /* Restore host msr -> SRR1 */
Paul Mackerras02143942011-07-23 17:41:44 +1000412 mtsrr1 r6
413 /* Load highmem handler address */
Alexander Graf07372792010-04-16 00:11:35 +0200414 mtsrr0 r8
415
Paul Mackerras02143942011-07-23 17:41:44 +1000416 /* RFI into the highmem handler, or jump to interrupt handler */
Alexander Graf56e13db2012-04-27 16:33:35 +0200417 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
418 beqa BOOK3S_INTERRUPT_EXTERNAL
419 cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
420 beqa BOOK3S_INTERRUPT_DECREMENTER
421 cmpwi r12, BOOK3S_INTERRUPT_PERFMON
422 beqa BOOK3S_INTERRUPT_PERFMON
Paul Mackerras40688902014-01-08 21:25:36 +1100423 cmpwi r12, BOOK3S_INTERRUPT_DOORBELL
424 beqa BOOK3S_INTERRUPT_DOORBELL
Alexander Graf56e13db2012-04-27 16:33:35 +0200425
Nicholas Piggin222f20f2018-01-10 03:07:15 +1100426 RFI_TO_KERNEL
Alexander Graf07372792010-04-16 00:11:35 +0200427kvmppc_handler_trampoline_exit_end: