blob: 3e1c9f08e302d58ed97df3ea953bcae6cee1363c [file] [log] [blame]
Thomas Gleixnerd94d71c2019-05-29 07:12:40 -07001// SPDX-License-Identifier: GPL-2.0-only
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002/*
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05003 *
4 * Copyright IBM Corp. 2007
Scott Wood4cd35f62011-06-14 18:34:31 -05005 * Copyright 2010-2011 Freescale Semiconductor, Inc.
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05006 *
7 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
8 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Scott Woodd30f6e42011-12-20 15:34:43 +00009 * Scott Wood <scottwood@freescale.com>
10 * Varun Sethi <varun.sethi@freescale.com>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050011 */
12
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/gfp.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050017#include <linux/module.h>
18#include <linux/vmalloc.h>
19#include <linux/fs.h>
Hollis Blanchard7924bd42008-12-02 15:51:55 -060020
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050021#include <asm/cputable.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080022#include <linux/uaccess.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050023#include <asm/kvm_ppc.h>
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060024#include <asm/cacheflush.h>
Scott Woodd30f6e42011-12-20 15:34:43 +000025#include <asm/dbell.h>
26#include <asm/hw_irq.h>
27#include <asm/irq.h>
Mihai Caramanb50df192012-10-11 06:13:19 +000028#include <asm/time.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050029
Scott Woodd30f6e42011-12-20 15:34:43 +000030#include "timing.h"
Hollis Blanchard75f74f02008-11-05 09:36:16 -060031#include "booke.h"
Aneesh Kumar K.Vdba291f2013-10-07 22:17:58 +053032
33#define CREATE_TRACE_POINTS
34#include "trace_booke.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050035
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060036unsigned long kvmppc_booke_handlers;
37
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050038struct kvm_stats_debugfs_item debugfs_entries[] = {
Emanuele Giuseppe Esposito812756a2020-04-14 17:56:25 +020039 VCPU_STAT("mmio", mmio_exits),
40 VCPU_STAT("sig", signal_exits),
41 VCPU_STAT("itlb_r", itlb_real_miss_exits),
42 VCPU_STAT("itlb_v", itlb_virt_miss_exits),
43 VCPU_STAT("dtlb_r", dtlb_real_miss_exits),
44 VCPU_STAT("dtlb_v", dtlb_virt_miss_exits),
45 VCPU_STAT("sysc", syscall_exits),
46 VCPU_STAT("isi", isi_exits),
47 VCPU_STAT("dsi", dsi_exits),
48 VCPU_STAT("inst_emu", emulated_inst_exits),
49 VCPU_STAT("dec", dec_exits),
50 VCPU_STAT("ext_intr", ext_intr_exits),
51 VCPU_STAT("halt_successful_poll", halt_successful_poll),
52 VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
53 VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
54 VCPU_STAT("halt_wakeup", halt_wakeup),
55 VCPU_STAT("doorbell", dbell_exits),
56 VCPU_STAT("guest doorbell", gdbell_exits),
David Matlackcb953122020-05-08 11:22:40 -070057 VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
58 VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
Emanuele Giuseppe Esposito812756a2020-04-14 17:56:25 +020059 VM_STAT("remote_tlb_flush", remote_tlb_flush),
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050060 { NULL }
61};
62
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050063/* TODO: use vcpu_printf() */
64void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
65{
66 int i;
67
Simon Guo173c5202018-05-07 14:20:08 +080068 printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip,
69 vcpu->arch.shared->msr);
70 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link,
71 vcpu->arch.regs.ctr);
Alexander Grafde7906c2010-07-29 14:47:46 +020072 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
73 vcpu->arch.shared->srr1);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050074
75 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
76
77 for (i = 0; i < 32; i += 4) {
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060078 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
Alexander Graf8e5b26b2010-01-08 02:58:01 +010079 kvmppc_get_gpr(vcpu, i),
80 kvmppc_get_gpr(vcpu, i+1),
81 kvmppc_get_gpr(vcpu, i+2),
82 kvmppc_get_gpr(vcpu, i+3));
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050083 }
84}
85
Scott Wood4cd35f62011-06-14 18:34:31 -050086#ifdef CONFIG_SPE
87void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
88{
89 preempt_disable();
90 enable_kernel_spe();
91 kvmppc_save_guest_spe(vcpu);
Anton Blancharddc4fbba2015-10-29 11:44:05 +110092 disable_kernel_spe();
Scott Wood4cd35f62011-06-14 18:34:31 -050093 vcpu->arch.shadow_msr &= ~MSR_SPE;
94 preempt_enable();
95}
96
97static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
98{
99 preempt_disable();
100 enable_kernel_spe();
101 kvmppc_load_guest_spe(vcpu);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100102 disable_kernel_spe();
Scott Wood4cd35f62011-06-14 18:34:31 -0500103 vcpu->arch.shadow_msr |= MSR_SPE;
104 preempt_enable();
105}
106
107static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
108{
109 if (vcpu->arch.shared->msr & MSR_SPE) {
110 if (!(vcpu->arch.shadow_msr & MSR_SPE))
111 kvmppc_vcpu_enable_spe(vcpu);
112 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
113 kvmppc_vcpu_disable_spe(vcpu);
114 }
115}
116#else
117static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
118{
119}
120#endif
121
Mihai Caraman3efc7da2014-08-20 16:36:22 +0300122/*
123 * Load up guest vcpu FP state if it's needed.
124 * It also set the MSR_FP in thread so that host know
125 * we're holding FPU, and then host can help to save
126 * guest vcpu FP state if other threads require to use FPU.
127 * This simulates an FP unavailable fault.
128 *
129 * It requires to be called with preemption disabled.
130 */
131static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
132{
133#ifdef CONFIG_PPC_FPU
134 if (!(current->thread.regs->msr & MSR_FP)) {
135 enable_kernel_fp();
136 load_fp_state(&vcpu->arch.fp);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100137 disable_kernel_fp();
Mihai Caraman3efc7da2014-08-20 16:36:22 +0300138 current->thread.fp_save_area = &vcpu->arch.fp;
139 current->thread.regs->msr |= MSR_FP;
140 }
141#endif
142}
143
144/*
145 * Save guest vcpu FP state into thread.
146 * It requires to be called with preemption disabled.
147 */
148static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
149{
150#ifdef CONFIG_PPC_FPU
151 if (current->thread.regs->msr & MSR_FP)
152 giveup_fpu(current);
153 current->thread.fp_save_area = NULL;
154#endif
155}
156
Alexander Graf7a08c272012-08-16 13:10:16 +0200157static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
158{
159#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
160 /* We always treat the FP bit as enabled from the host
161 perspective, so only need to adjust the shadow MSR */
162 vcpu->arch.shadow_msr &= ~MSR_FP;
163 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
164#endif
165}
166
Mihai Caraman95d80a22014-08-20 16:36:23 +0300167/*
168 * Simulate AltiVec unavailable fault to load guest state
169 * from thread to AltiVec unit.
170 * It requires to be called with preemption disabled.
171 */
172static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
173{
174#ifdef CONFIG_ALTIVEC
175 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
176 if (!(current->thread.regs->msr & MSR_VEC)) {
177 enable_kernel_altivec();
178 load_vr_state(&vcpu->arch.vr);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100179 disable_kernel_altivec();
Mihai Caraman95d80a22014-08-20 16:36:23 +0300180 current->thread.vr_save_area = &vcpu->arch.vr;
181 current->thread.regs->msr |= MSR_VEC;
182 }
183 }
184#endif
185}
186
187/*
188 * Save guest vcpu AltiVec state into thread.
189 * It requires to be called with preemption disabled.
190 */
191static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
192{
193#ifdef CONFIG_ALTIVEC
194 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
195 if (current->thread.regs->msr & MSR_VEC)
196 giveup_altivec(current);
197 current->thread.vr_save_area = NULL;
198 }
199#endif
200}
201
Bharat Bhushance11e482013-07-04 12:27:47 +0530202static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
203{
204 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
205#ifndef CONFIG_KVM_BOOKE_HV
206 vcpu->arch.shadow_msr &= ~MSR_DE;
207 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
208#endif
209
210 /* Force enable debug interrupts when user space wants to debug */
211 if (vcpu->guest_debug) {
212#ifdef CONFIG_KVM_BOOKE_HV
213 /*
214 * Since there is no shadow MSR, sync MSR_DE into the guest
215 * visible MSR.
216 */
217 vcpu->arch.shared->msr |= MSR_DE;
218#else
219 vcpu->arch.shadow_msr |= MSR_DE;
220 vcpu->arch.shared->msr &= ~MSR_DE;
221#endif
222 }
223}
224
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500225/*
226 * Helper function for "full" MSR writes. No need to call this if only
227 * EE/CE/ME/DE/RI are changing.
228 */
Scott Wood4cd35f62011-06-14 18:34:31 -0500229void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
230{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500231 u32 old_msr = vcpu->arch.shared->msr;
Scott Wood4cd35f62011-06-14 18:34:31 -0500232
Scott Woodd30f6e42011-12-20 15:34:43 +0000233#ifdef CONFIG_KVM_BOOKE_HV
234 new_msr |= MSR_GS;
235#endif
236
Scott Wood4cd35f62011-06-14 18:34:31 -0500237 vcpu->arch.shared->msr = new_msr;
238
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500239 kvmppc_mmu_msr_notify(vcpu, old_msr);
Scott Wood4cd35f62011-06-14 18:34:31 -0500240 kvmppc_vcpu_sync_spe(vcpu);
Alexander Graf7a08c272012-08-16 13:10:16 +0200241 kvmppc_vcpu_sync_fpu(vcpu);
Bharat Bhushance11e482013-07-04 12:27:47 +0530242 kvmppc_vcpu_sync_debug(vcpu);
Scott Wood4cd35f62011-06-14 18:34:31 -0500243}
244
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600245static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
246 unsigned int priority)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600247{
Alexander Graf63460462012-08-08 00:44:52 +0200248 trace_kvm_booke_queue_irqprio(vcpu, priority);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600249 set_bit(priority, &vcpu->arch.pending_exceptions);
250}
251
Alexander Graf8de12012014-06-18 21:56:55 +0200252void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
253 ulong dear_flags, ulong esr_flags)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600254{
Liu Yudaf5e272010-02-02 19:44:35 +0800255 vcpu->arch.queued_dear = dear_flags;
256 vcpu->arch.queued_esr = esr_flags;
257 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
258}
259
Alexander Graf8de12012014-06-18 21:56:55 +0200260void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
261 ulong dear_flags, ulong esr_flags)
Liu Yudaf5e272010-02-02 19:44:35 +0800262{
263 vcpu->arch.queued_dear = dear_flags;
264 vcpu->arch.queued_esr = esr_flags;
265 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
266}
267
Alexander Graf8de12012014-06-18 21:56:55 +0200268void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
269{
270 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
271}
272
273void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
Liu Yudaf5e272010-02-02 19:44:35 +0800274{
275 vcpu->arch.queued_esr = esr_flags;
276 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
277}
278
Alexander Graf011da892013-01-31 14:17:38 +0100279static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
280 ulong esr_flags)
281{
282 vcpu->arch.queued_dear = dear_flags;
283 vcpu->arch.queued_esr = esr_flags;
284 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
285}
286
Liu Yudaf5e272010-02-02 19:44:35 +0800287void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
288{
289 vcpu->arch.queued_esr = esr_flags;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600290 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600291}
292
Paul Mackerras307d9272017-03-22 21:02:08 +1100293void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
294{
295 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
296}
297
Laurentiu Tudorb2d7ecb2018-04-26 15:33:19 +0300298#ifdef CONFIG_ALTIVEC
299void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
300{
301 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
302}
303#endif
304
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600305void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
306{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600307 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600308}
309
310int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
311{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600312 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600313}
314
Alexander Graf7706664d2009-12-21 20:21:24 +0100315void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
316{
317 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
318}
319
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600320void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
321 struct kvm_interrupt *irq)
322{
Alexander Grafc5335f12010-08-30 14:03:24 +0200323 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
324
325 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
326 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
327
328 kvmppc_booke_queue_irqprio(vcpu, prio);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600329}
330
Paul Mackerras4fe27d22013-02-14 14:00:25 +0000331void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
Alexander Graf4496f972010-04-07 10:03:25 +0200332{
333 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
Alexander Grafc5335f12010-08-30 14:03:24 +0200334 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
Alexander Graf4496f972010-04-07 10:03:25 +0200335}
336
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000337static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
338{
339 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
340}
341
342static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
343{
344 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
345}
346
Bharat Bhushan2f699a52014-08-13 14:39:44 +0530347void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
348{
349 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
350}
351
352void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
353{
354 clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
355}
356
Scott Woodd30f6e42011-12-20 15:34:43 +0000357static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
358{
Bharat Bhushan31579ee2014-07-17 17:01:36 +0530359 kvmppc_set_srr0(vcpu, srr0);
360 kvmppc_set_srr1(vcpu, srr1);
Scott Woodd30f6e42011-12-20 15:34:43 +0000361}
362
363static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
364{
365 vcpu->arch.csrr0 = srr0;
366 vcpu->arch.csrr1 = srr1;
367}
368
369static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
370{
371 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
372 vcpu->arch.dsrr0 = srr0;
373 vcpu->arch.dsrr1 = srr1;
374 } else {
375 set_guest_csrr(vcpu, srr0, srr1);
376 }
377}
378
379static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
380{
381 vcpu->arch.mcsrr0 = srr0;
382 vcpu->arch.mcsrr1 = srr1;
383}
384
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600385/* Deliver the interrupt of the corresponding priority, if possible. */
386static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
387 unsigned int priority)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500388{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600389 int allowed = 0;
Alexander Graf79300f82012-02-15 19:12:29 +0000390 ulong msr_mask = 0;
Alexander Graf1c810632013-01-04 18:12:48 +0100391 bool update_esr = false, update_dear = false, update_epr = false;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200392 ulong crit_raw = vcpu->arch.shared->critical;
393 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
394 bool crit;
Alexander Grafc5335f12010-08-30 14:03:24 +0200395 bool keep_irq = false;
Scott Woodd30f6e42011-12-20 15:34:43 +0000396 enum int_class int_class;
Mihai Caraman95e90b42012-10-11 06:13:26 +0000397 ulong new_msr = vcpu->arch.shared->msr;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200398
399 /* Truncate crit indicators in 32 bit mode */
400 if (!(vcpu->arch.shared->msr & MSR_SF)) {
401 crit_raw &= 0xffffffff;
402 crit_r1 &= 0xffffffff;
403 }
404
405 /* Critical section when crit == r1 */
406 crit = (crit_raw == crit_r1);
407 /* ... and we're in supervisor mode */
408 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500409
Alexander Grafc5335f12010-08-30 14:03:24 +0200410 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
411 priority = BOOKE_IRQPRIO_EXTERNAL;
412 keep_irq = true;
413 }
414
Scott Wood5df554ad2013-04-12 14:08:46 +0000415 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
Alexander Graf1c810632013-01-04 18:12:48 +0100416 update_epr = true;
417
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600418 switch (priority) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600419 case BOOKE_IRQPRIO_DTLB_MISS:
Liu Yudaf5e272010-02-02 19:44:35 +0800420 case BOOKE_IRQPRIO_DATA_STORAGE:
Alexander Graf011da892013-01-31 14:17:38 +0100421 case BOOKE_IRQPRIO_ALIGNMENT:
Liu Yudaf5e272010-02-02 19:44:35 +0800422 update_dear = true;
Joe Perches8fc6ba02020-03-10 21:51:30 -0700423 fallthrough;
Liu Yudaf5e272010-02-02 19:44:35 +0800424 case BOOKE_IRQPRIO_INST_STORAGE:
425 case BOOKE_IRQPRIO_PROGRAM:
426 update_esr = true;
Joe Perches8fc6ba02020-03-10 21:51:30 -0700427 fallthrough;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600428 case BOOKE_IRQPRIO_ITLB_MISS:
429 case BOOKE_IRQPRIO_SYSCALL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600430 case BOOKE_IRQPRIO_FP_UNAVAIL:
Mihai Caraman95d80a22014-08-20 16:36:23 +0300431#ifdef CONFIG_SPE_POSSIBLE
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600432 case BOOKE_IRQPRIO_SPE_UNAVAIL:
433 case BOOKE_IRQPRIO_SPE_FP_DATA:
434 case BOOKE_IRQPRIO_SPE_FP_ROUND:
Mihai Caraman95d80a22014-08-20 16:36:23 +0300435#endif
436#ifdef CONFIG_ALTIVEC
437 case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
438 case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
439#endif
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600440 case BOOKE_IRQPRIO_AP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600441 allowed = 1;
Alexander Graf79300f82012-02-15 19:12:29 +0000442 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000443 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500444 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000445 case BOOKE_IRQPRIO_WATCHDOG:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600446 case BOOKE_IRQPRIO_CRITICAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000447 case BOOKE_IRQPRIO_DBELL_CRIT:
Alexander Graf666e7252010-07-29 14:47:43 +0200448 allowed = vcpu->arch.shared->msr & MSR_CE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000449 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000450 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000451 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500452 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600453 case BOOKE_IRQPRIO_MACHINE_CHECK:
Alexander Graf666e7252010-07-29 14:47:43 +0200454 allowed = vcpu->arch.shared->msr & MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000455 allowed = allowed && !crit;
Scott Woodd30f6e42011-12-20 15:34:43 +0000456 int_class = INT_CLASS_MC;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500457 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600458 case BOOKE_IRQPRIO_DECREMENTER:
459 case BOOKE_IRQPRIO_FIT:
Scott Wooddfd4d472011-11-17 12:39:59 +0000460 keep_irq = true;
Joe Perches8fc6ba02020-03-10 21:51:30 -0700461 fallthrough;
Scott Wooddfd4d472011-11-17 12:39:59 +0000462 case BOOKE_IRQPRIO_EXTERNAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000463 case BOOKE_IRQPRIO_DBELL:
Alexander Graf666e7252010-07-29 14:47:43 +0200464 allowed = vcpu->arch.shared->msr & MSR_EE;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200465 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000466 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000467 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500468 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600469 case BOOKE_IRQPRIO_DEBUG:
Alexander Graf666e7252010-07-29 14:47:43 +0200470 allowed = vcpu->arch.shared->msr & MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000471 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000472 msr_mask = MSR_ME;
Bharat Bhushan9fee7562014-08-06 12:08:51 +0530473 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
474 int_class = INT_CLASS_DBG;
475 else
476 int_class = INT_CLASS_CRIT;
477
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500478 break;
479 }
480
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600481 if (allowed) {
Scott Woodd30f6e42011-12-20 15:34:43 +0000482 switch (int_class) {
483 case INT_CLASS_NONCRIT:
Simon Guo173c5202018-05-07 14:20:08 +0800484 set_guest_srr(vcpu, vcpu->arch.regs.nip,
Scott Woodd30f6e42011-12-20 15:34:43 +0000485 vcpu->arch.shared->msr);
486 break;
487 case INT_CLASS_CRIT:
Simon Guo173c5202018-05-07 14:20:08 +0800488 set_guest_csrr(vcpu, vcpu->arch.regs.nip,
Scott Woodd30f6e42011-12-20 15:34:43 +0000489 vcpu->arch.shared->msr);
490 break;
491 case INT_CLASS_DBG:
Simon Guo173c5202018-05-07 14:20:08 +0800492 set_guest_dsrr(vcpu, vcpu->arch.regs.nip,
Scott Woodd30f6e42011-12-20 15:34:43 +0000493 vcpu->arch.shared->msr);
494 break;
495 case INT_CLASS_MC:
Simon Guo173c5202018-05-07 14:20:08 +0800496 set_guest_mcsrr(vcpu, vcpu->arch.regs.nip,
Scott Woodd30f6e42011-12-20 15:34:43 +0000497 vcpu->arch.shared->msr);
498 break;
499 }
500
Simon Guo173c5202018-05-07 14:20:08 +0800501 vcpu->arch.regs.nip = vcpu->arch.ivpr |
502 vcpu->arch.ivor[priority];
Liu Yudaf5e272010-02-02 19:44:35 +0800503 if (update_esr == true)
Bharat Bhushandc168542014-07-17 17:01:38 +0530504 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
Liu Yudaf5e272010-02-02 19:44:35 +0800505 if (update_dear == true)
Bharat Bhushana5414d42014-07-17 17:01:37 +0530506 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
Scott Wood5df554ad2013-04-12 14:08:46 +0000507 if (update_epr == true) {
508 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
509 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000510 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
511 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
512 kvmppc_mpic_set_epr(vcpu);
513 }
Scott Wood5df554ad2013-04-12 14:08:46 +0000514 }
Mihai Caraman95e90b42012-10-11 06:13:26 +0000515
516 new_msr &= msr_mask;
517#if defined(CONFIG_64BIT)
518 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
519 new_msr |= MSR_CM;
520#endif
521 kvmppc_set_msr(vcpu, new_msr);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600522
Alexander Grafc5335f12010-08-30 14:03:24 +0200523 if (!keep_irq)
524 clear_bit(priority, &vcpu->arch.pending_exceptions);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600525 }
526
Scott Woodd30f6e42011-12-20 15:34:43 +0000527#ifdef CONFIG_KVM_BOOKE_HV
528 /*
529 * If an interrupt is pending but masked, raise a guest doorbell
530 * so that we are notified when the guest enables the relevant
531 * MSR bit.
532 */
533 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
534 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
535 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
536 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
537 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
538 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
539#endif
540
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600541 return allowed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500542}
543
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000544/*
545 * Return the number of jiffies until the next timeout. If the timeout is
546 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
547 * because the larger value can break the timer APIs.
548 */
549static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
550{
551 u64 tb, wdt_tb, wdt_ticks = 0;
552 u64 nr_jiffies = 0;
553 u32 period = TCR_GET_WP(vcpu->arch.tcr);
554
555 wdt_tb = 1ULL << (63 - period);
556 tb = get_tb();
557 /*
558 * The watchdog timeout will hapeen when TB bit corresponding
559 * to watchdog will toggle from 0 to 1.
560 */
561 if (tb & wdt_tb)
562 wdt_ticks = wdt_tb;
563
564 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
565
566 /* Convert timebase ticks to jiffies */
567 nr_jiffies = wdt_ticks;
568
569 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
570 nr_jiffies++;
571
572 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
573}
574
575static void arm_next_watchdog(struct kvm_vcpu *vcpu)
576{
577 unsigned long nr_jiffies;
578 unsigned long flags;
579
580 /*
581 * If TSR_ENW and TSR_WIS are not set then no need to exit to
582 * userspace, so clear the KVM_REQ_WATCHDOG request.
583 */
584 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
Radim Krčmář72875d82017-04-26 22:32:19 +0200585 kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000586
587 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
588 nr_jiffies = watchdog_next_timeout(vcpu);
589 /*
590 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
591 * then do not run the watchdog timer as this can break timer APIs.
592 */
593 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
594 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
595 else
596 del_timer(&vcpu->arch.wdt_timer);
597 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
598}
599
Kees Cook86cb30e2017-10-17 20:21:24 -0700600void kvmppc_watchdog_func(struct timer_list *t)
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000601{
Kees Cook86cb30e2017-10-17 20:21:24 -0700602 struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000603 u32 tsr, new_tsr;
604 int final;
605
606 do {
607 new_tsr = tsr = vcpu->arch.tsr;
608 final = 0;
609
610 /* Time out event */
611 if (tsr & TSR_ENW) {
612 if (tsr & TSR_WIS)
613 final = 1;
614 else
615 new_tsr = tsr | TSR_WIS;
616 } else {
617 new_tsr = tsr | TSR_ENW;
618 }
619 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
620
621 if (new_tsr & TSR_WIS) {
622 smp_wmb();
623 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
624 kvm_vcpu_kick(vcpu);
625 }
626
627 /*
628 * If this is final watchdog expiry and some action is required
629 * then exit to userspace.
630 */
631 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
632 vcpu->arch.watchdog_enabled) {
633 smp_wmb();
634 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
635 kvm_vcpu_kick(vcpu);
636 }
637
638 /*
639 * Stop running the watchdog timer after final expiration to
640 * prevent the host from being flooded with timers if the
641 * guest sets a short period.
642 * Timers will resume when TSR/TCR is updated next time.
643 */
644 if (!final)
645 arm_next_watchdog(vcpu);
646}
647
Scott Wooddfd4d472011-11-17 12:39:59 +0000648static void update_timer_ints(struct kvm_vcpu *vcpu)
649{
650 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
651 kvmppc_core_queue_dec(vcpu);
652 else
653 kvmppc_core_dequeue_dec(vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000654
655 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
656 kvmppc_core_queue_watchdog(vcpu);
657 else
658 kvmppc_core_dequeue_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +0000659}
660
Scott Woodc59a6a32011-11-08 18:23:25 -0600661static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500662{
663 unsigned long *pending = &vcpu->arch.pending_exceptions;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500664 unsigned int priority;
665
Hollis Blanchard9ab80842008-11-05 09:36:22 -0600666 priority = __ffs(*pending);
Alexander Graf8b3a00f2012-02-16 14:12:46 +0000667 while (priority < BOOKE_IRQPRIO_MAX) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600668 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500669 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500670
671 priority = find_next_bit(pending,
672 BITS_PER_BYTE * sizeof(*pending),
673 priority + 1);
674 }
Alexander Graf90bba352010-07-29 14:47:51 +0200675
676 /* Tell the guest about our interrupt status */
Scott Wood29ac26e2011-11-08 18:23:27 -0600677 vcpu->arch.shared->int_pending = !!*pending;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500678}
679
Scott Woodc59a6a32011-11-08 18:23:25 -0600680/* Check pending exceptions and deliver one, if possible. */
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000681int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
Scott Woodc59a6a32011-11-08 18:23:25 -0600682{
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000683 int r = 0;
Scott Woodc59a6a32011-11-08 18:23:25 -0600684 WARN_ON_ONCE(!irqs_disabled());
685
686 kvmppc_core_check_exceptions(vcpu);
687
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +0200688 if (kvm_request_pending(vcpu)) {
Alexander Grafb8c649a2012-12-20 04:52:39 +0000689 /* Exception delivery raised request; start over */
690 return 1;
691 }
692
Scott Woodc59a6a32011-11-08 18:23:25 -0600693 if (vcpu->arch.shared->msr & MSR_WE) {
694 local_irq_enable();
695 kvm_vcpu_block(vcpu);
Radim Krčmář72875d82017-04-26 22:32:19 +0200696 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -0600697 hard_irq_disable();
Scott Woodc59a6a32011-11-08 18:23:25 -0600698
699 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000700 r = 1;
Scott Woodc59a6a32011-11-08 18:23:25 -0600701 };
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000702
703 return r;
704}
705
Alexander Graf7c973a22012-08-13 12:50:35 +0200706int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
Alexander Graf4ffc6352012-08-08 20:31:13 +0200707{
Alexander Graf7c973a22012-08-13 12:50:35 +0200708 int r = 1; /* Indicate we want to get back into the guest */
709
Alexander Graf2d8185d2012-08-10 12:31:12 +0200710 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
711 update_timer_ints(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200712#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Alexander Graf2d8185d2012-08-10 12:31:12 +0200713 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
714 kvmppc_core_flush_tlb(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200715#endif
Alexander Graf7c973a22012-08-13 12:50:35 +0200716
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000717 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
718 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
719 r = 0;
720 }
721
Alexander Graf1c810632013-01-04 18:12:48 +0100722 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
723 vcpu->run->epr.epr = 0;
724 vcpu->arch.epr_needed = true;
725 vcpu->run->exit_reason = KVM_EXIT_EPR;
726 r = 0;
727 }
728
Alexander Graf7c973a22012-08-13 12:50:35 +0200729 return r;
Alexander Graf4ffc6352012-08-08 20:31:13 +0200730}
731
Tianjia Zhang8c99d342020-04-27 12:35:11 +0800732int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000733{
Alexander Graf7ee78852012-08-13 12:44:41 +0200734 int ret, s;
Scott Woodf5f97212013-11-22 15:52:29 -0600735 struct debug_reg debug;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000736
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200737 if (!vcpu->arch.sane) {
Tianjia Zhang7ec21d92020-06-23 21:14:16 +0800738 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200739 return -EINVAL;
740 }
741
Alexander Graf7ee78852012-08-13 12:44:41 +0200742 s = kvmppc_prepare_to_enter(vcpu);
743 if (s <= 0) {
Alexander Graf7ee78852012-08-13 12:44:41 +0200744 ret = s;
Scott Wood1d1ef222011-11-08 16:11:59 -0600745 goto out;
746 }
Scott Wood6c85f522014-01-09 19:18:40 -0600747 /* interrupts now hard-disabled */
Scott Wood1d1ef222011-11-08 16:11:59 -0600748
Scott Wood8fae8452011-12-20 15:34:45 +0000749#ifdef CONFIG_PPC_FPU
750 /* Save userspace FPU state in stack */
751 enable_kernel_fp();
Scott Wood8fae8452011-12-20 15:34:45 +0000752
753 /*
754 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
Mihai Caraman3efc7da2014-08-20 16:36:22 +0300755 * as always using the FPU.
Scott Wood8fae8452011-12-20 15:34:45 +0000756 */
Scott Wood8fae8452011-12-20 15:34:45 +0000757 kvmppc_load_guest_fp(vcpu);
758#endif
759
Mihai Caraman95d80a22014-08-20 16:36:23 +0300760#ifdef CONFIG_ALTIVEC
761 /* Save userspace AltiVec state in stack */
762 if (cpu_has_feature(CPU_FTR_ALTIVEC))
763 enable_kernel_altivec();
764 /*
765 * Since we can't trap on MSR_VEC in GS-mode, we consider the guest
766 * as always using the AltiVec.
767 */
768 kvmppc_load_guest_altivec(vcpu);
769#endif
770
Bharat Bhushance11e482013-07-04 12:27:47 +0530771 /* Switch to guest debug context */
Bharat Bhushan348ba712014-08-06 12:08:55 +0530772 debug = vcpu->arch.dbg_reg;
Scott Woodf5f97212013-11-22 15:52:29 -0600773 switch_booke_debug_regs(&debug);
774 debug = current->thread.debug;
Bharat Bhushan348ba712014-08-06 12:08:55 +0530775 current->thread.debug = vcpu->arch.dbg_reg;
Bharat Bhushance11e482013-07-04 12:27:47 +0530776
Leonardo Brase1bd0a72019-11-26 19:36:31 -0300777 vcpu->arch.pgdir = vcpu->kvm->mm->pgd;
Scott Wood5f1c2482013-07-10 17:47:39 -0500778 kvmppc_fix_ee_before_entry();
Scott Woodf8941fbe2013-06-11 11:38:31 -0500779
Tianjia Zhang7ec21d92020-06-23 21:14:16 +0800780 ret = __kvmppc_vcpu_run(vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000781
Paolo Bonzini6edaa532016-06-15 15:18:26 +0200782 /* No need for guest_exit. It's done in handle_exit.
Alexander Graf24afa372012-08-12 12:42:30 +0200783 We also get here with interrupts enabled. */
784
Bharat Bhushance11e482013-07-04 12:27:47 +0530785 /* Switch back to user space debug context */
Scott Woodf5f97212013-11-22 15:52:29 -0600786 switch_booke_debug_regs(&debug);
787 current->thread.debug = debug;
Bharat Bhushance11e482013-07-04 12:27:47 +0530788
Scott Wood8fae8452011-12-20 15:34:45 +0000789#ifdef CONFIG_PPC_FPU
790 kvmppc_save_guest_fp(vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000791#endif
792
Mihai Caraman95d80a22014-08-20 16:36:23 +0300793#ifdef CONFIG_ALTIVEC
794 kvmppc_save_guest_altivec(vcpu);
795#endif
796
Scott Wood1d1ef222011-11-08 16:11:59 -0600797out:
Alexander Grafd69c6432012-08-08 20:44:20 +0200798 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000799 return ret;
800}
801
Tianjia Zhang8c99d342020-04-27 12:35:11 +0800802static int emulation_exit(struct kvm_vcpu *vcpu)
Scott Woodd30f6e42011-12-20 15:34:43 +0000803{
804 enum emulation_result er;
805
Tianjia Zhang8c99d342020-04-27 12:35:11 +0800806 er = kvmppc_emulate_instruction(vcpu);
Scott Woodd30f6e42011-12-20 15:34:43 +0000807 switch (er) {
808 case EMULATE_DONE:
809 /* don't overwrite subtypes, just account kvm_stats */
810 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
811 /* Future optimization: only reload non-volatiles if
812 * they were actually modified by emulation. */
813 return RESUME_GUEST_NV;
814
Mihai Caraman51f04722014-07-23 19:06:21 +0300815 case EMULATE_AGAIN:
816 return RESUME_GUEST;
817
Scott Woodd30f6e42011-12-20 15:34:43 +0000818 case EMULATE_FAIL:
Scott Woodd30f6e42011-12-20 15:34:43 +0000819 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
Simon Guo173c5202018-05-07 14:20:08 +0800820 __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
Scott Woodd30f6e42011-12-20 15:34:43 +0000821 /* For debugging, encode the failing instruction and
822 * report it to userspace. */
Tianjia Zhang8c99d342020-04-27 12:35:11 +0800823 vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
824 vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
Alexander Grafd1ff5492012-02-16 13:24:03 +0000825 kvmppc_core_queue_program(vcpu, ESR_PIL);
Scott Woodd30f6e42011-12-20 15:34:43 +0000826 return RESUME_HOST;
827
Bharat Bhushan9b4f5302013-04-08 00:32:15 +0000828 case EMULATE_EXIT_USER:
829 return RESUME_HOST;
830
Scott Woodd30f6e42011-12-20 15:34:43 +0000831 default:
832 BUG();
833 }
834}
835
Tianjia Zhang8c99d342020-04-27 12:35:11 +0800836static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
Bharat Bhushance11e482013-07-04 12:27:47 +0530837{
Tianjia Zhang8c99d342020-04-27 12:35:11 +0800838 struct kvm_run *run = vcpu->run;
Bharat Bhushan348ba712014-08-06 12:08:55 +0530839 struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
Bharat Bhushance11e482013-07-04 12:27:47 +0530840 u32 dbsr = vcpu->arch.dbsr;
841
Bharat Bhushan2f699a52014-08-13 14:39:44 +0530842 if (vcpu->guest_debug == 0) {
843 /*
844 * Debug resources belong to Guest.
845 * Imprecise debug event is not injected
846 */
847 if (dbsr & DBSR_IDE) {
848 dbsr &= ~DBSR_IDE;
849 if (!dbsr)
850 return RESUME_GUEST;
851 }
852
853 if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
854 (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
855 kvmppc_core_queue_debug(vcpu);
856
857 /* Inject a program interrupt if trap debug is not allowed */
858 if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
859 kvmppc_core_queue_program(vcpu, ESR_PTR);
860
861 return RESUME_GUEST;
862 }
863
864 /*
865 * Debug resource owned by userspace.
866 * Clear guest dbsr (vcpu->arch.dbsr)
867 */
Bharat Bhushan21909912014-08-06 12:08:54 +0530868 vcpu->arch.dbsr = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +0530869 run->debug.arch.status = 0;
Simon Guo173c5202018-05-07 14:20:08 +0800870 run->debug.arch.address = vcpu->arch.regs.nip;
Bharat Bhushance11e482013-07-04 12:27:47 +0530871
872 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
873 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
874 } else {
875 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
876 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
877 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
878 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
879 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
880 run->debug.arch.address = dbg_reg->dac1;
881 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
882 run->debug.arch.address = dbg_reg->dac2;
883 }
884
885 return RESUME_HOST;
886}
887
Alexander Graf4e642cc2012-02-20 23:57:26 +0100888static void kvmppc_fill_pt_regs(struct pt_regs *regs)
889{
890 ulong r1, ip, msr, lr;
891
892 asm("mr %0, 1" : "=r"(r1));
893 asm("mflr %0" : "=r"(lr));
894 asm("mfmsr %0" : "=r"(msr));
895 asm("bl 1f; 1: mflr %0" : "=r"(ip));
896
897 memset(regs, 0, sizeof(*regs));
898 regs->gpr[1] = r1;
899 regs->nip = ip;
900 regs->msr = msr;
901 regs->link = lr;
902}
903
Bharat Bhushan6328e592012-06-20 05:56:53 +0000904/*
905 * For interrupts needed to be handled by host interrupt handlers,
906 * corresponding host handler are called from here in similar way
907 * (but not exact) as they are called from low level handler
908 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
909 */
Alexander Graf4e642cc2012-02-20 23:57:26 +0100910static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
911 unsigned int exit_nr)
912{
913 struct pt_regs regs;
914
915 switch (exit_nr) {
916 case BOOKE_INTERRUPT_EXTERNAL:
917 kvmppc_fill_pt_regs(&regs);
918 do_IRQ(&regs);
919 break;
920 case BOOKE_INTERRUPT_DECREMENTER:
921 kvmppc_fill_pt_regs(&regs);
922 timer_interrupt(&regs);
923 break;
Tiejun Chen5f17ce82013-05-13 10:00:45 +0800924#if defined(CONFIG_PPC_DOORBELL)
Alexander Graf4e642cc2012-02-20 23:57:26 +0100925 case BOOKE_INTERRUPT_DOORBELL:
926 kvmppc_fill_pt_regs(&regs);
927 doorbell_exception(&regs);
928 break;
929#endif
930 case BOOKE_INTERRUPT_MACHINE_CHECK:
931 /* FIXME */
932 break;
Alexander Graf7cc1e8e2012-02-22 16:26:34 +0100933 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
934 kvmppc_fill_pt_regs(&regs);
935 performance_monitor_exception(&regs);
936 break;
Bharat Bhushan6328e592012-06-20 05:56:53 +0000937 case BOOKE_INTERRUPT_WATCHDOG:
938 kvmppc_fill_pt_regs(&regs);
939#ifdef CONFIG_BOOKE_WDT
940 WatchdogException(&regs);
941#else
942 unknown_exception(&regs);
943#endif
944 break;
945 case BOOKE_INTERRUPT_CRITICAL:
Tudor Laurentiu845ac982015-05-18 15:44:27 +0300946 kvmppc_fill_pt_regs(&regs);
Bharat Bhushan6328e592012-06-20 05:56:53 +0000947 unknown_exception(&regs);
948 break;
Bharat Bhushance11e482013-07-04 12:27:47 +0530949 case BOOKE_INTERRUPT_DEBUG:
950 /* Save DBSR before preemption is enabled */
951 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
952 kvmppc_clear_dbsr();
953 break;
Alexander Graf4e642cc2012-02-20 23:57:26 +0100954 }
955}
956
Tianjia Zhang8c99d342020-04-27 12:35:11 +0800957static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu,
Mihai Caramanf5250472014-07-23 19:06:22 +0300958 enum emulation_result emulated, u32 last_inst)
959{
960 switch (emulated) {
961 case EMULATE_AGAIN:
962 return RESUME_GUEST;
963
964 case EMULATE_FAIL:
965 pr_debug("%s: load instruction from guest address %lx failed\n",
Simon Guo173c5202018-05-07 14:20:08 +0800966 __func__, vcpu->arch.regs.nip);
Mihai Caramanf5250472014-07-23 19:06:22 +0300967 /* For debugging, encode the failing instruction and
968 * report it to userspace. */
Tianjia Zhang8c99d342020-04-27 12:35:11 +0800969 vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
970 vcpu->run->hw.hardware_exit_reason |= last_inst;
Mihai Caramanf5250472014-07-23 19:06:22 +0300971 kvmppc_core_queue_program(vcpu, ESR_PIL);
972 return RESUME_HOST;
973
974 default:
975 BUG();
976 }
977}
978
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500979/**
980 * kvmppc_handle_exit
981 *
982 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
983 */
Tianjia Zhang7ec21d92020-06-23 21:14:16 +0800984int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500985{
Tianjia Zhang7ec21d92020-06-23 21:14:16 +0800986 struct kvm_run *run = vcpu->run;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500987 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200988 int s;
Scott Woodf1e89022013-06-06 19:16:31 -0500989 int idx;
Mihai Caramanf5250472014-07-23 19:06:22 +0300990 u32 last_inst = KVM_INST_FETCH_FAILED;
991 enum emulation_result emulated = EMULATE_DONE;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500992
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600993 /* update before a new last_exit_type is rewritten */
994 kvmppc_update_timing_stats(vcpu);
995
Alexander Graf4e642cc2012-02-20 23:57:26 +0100996 /* restart interrupts if they were meant for the host */
997 kvmppc_restart_interrupt(vcpu, exit_nr);
Scott Woodd30f6e42011-12-20 15:34:43 +0000998
Mihai Caramanf5250472014-07-23 19:06:22 +0300999 /*
Adam Buchbinder446957b2016-02-24 10:51:11 -08001000 * get last instruction before being preempted
Mihai Caramanf5250472014-07-23 19:06:22 +03001001 * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA
1002 */
1003 switch (exit_nr) {
1004 case BOOKE_INTERRUPT_DATA_STORAGE:
1005 case BOOKE_INTERRUPT_DTLB_MISS:
1006 case BOOKE_INTERRUPT_HV_PRIV:
Alexander Graf8d0eff62014-09-10 14:37:29 +02001007 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Mihai Caramanf5250472014-07-23 19:06:22 +03001008 break;
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +05301009 case BOOKE_INTERRUPT_PROGRAM:
1010 /* SW breakpoints arrive as illegal instructions on HV */
1011 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Alexander Graf8d0eff62014-09-10 14:37:29 +02001012 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +05301013 break;
Mihai Caramanf5250472014-07-23 19:06:22 +03001014 default:
1015 break;
1016 }
1017
Alexander Graf97c95052012-08-02 15:10:00 +02001018 trace_kvm_exit(exit_nr, vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02001019 guest_exit_irqoff();
Paolo Bonzinie233d542015-04-30 14:39:40 +02001020
1021 local_irq_enable();
Alexander Graf97c95052012-08-02 15:10:00 +02001022
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001023 run->exit_reason = KVM_EXIT_UNKNOWN;
1024 run->ready_for_interrupt_injection = 1;
1025
Mihai Caramanf5250472014-07-23 19:06:22 +03001026 if (emulated != EMULATE_DONE) {
Tianjia Zhang8c99d342020-04-27 12:35:11 +08001027 r = kvmppc_resume_inst_load(vcpu, emulated, last_inst);
Mihai Caramanf5250472014-07-23 19:06:22 +03001028 goto out;
1029 }
1030
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001031 switch (exit_nr) {
1032 case BOOKE_INTERRUPT_MACHINE_CHECK:
Alexander Grafc35c9d82012-02-20 12:21:18 +01001033 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
1034 kvmppc_dump_vcpu(vcpu);
1035 /* For debugging, send invalid exit reason to user space */
1036 run->hw.hardware_exit_reason = ~1ULL << 32;
1037 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
1038 r = RESUME_HOST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001039 break;
1040
1041 case BOOKE_INTERRUPT_EXTERNAL:
Hollis Blanchard7b701592008-12-02 15:51:58 -06001042 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
Hollis Blanchard1b6766c2008-11-05 09:36:21 -06001043 r = RESUME_GUEST;
1044 break;
1045
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001046 case BOOKE_INTERRUPT_DECREMENTER:
Hollis Blanchard7b701592008-12-02 15:51:58 -06001047 kvmppc_account_exit(vcpu, DEC_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001048 r = RESUME_GUEST;
1049 break;
1050
Bharat Bhushan6328e592012-06-20 05:56:53 +00001051 case BOOKE_INTERRUPT_WATCHDOG:
1052 r = RESUME_GUEST;
1053 break;
1054
Scott Woodd30f6e42011-12-20 15:34:43 +00001055 case BOOKE_INTERRUPT_DOORBELL:
1056 kvmppc_account_exit(vcpu, DBELL_EXITS);
Scott Woodd30f6e42011-12-20 15:34:43 +00001057 r = RESUME_GUEST;
1058 break;
1059
1060 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
1061 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1062
1063 /*
1064 * We are here because there is a pending guest interrupt
1065 * which could not be delivered as MSR_CE or MSR_ME was not
1066 * set. Once we break from here we will retry delivery.
1067 */
1068 r = RESUME_GUEST;
1069 break;
1070
1071 case BOOKE_INTERRUPT_GUEST_DBELL:
1072 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1073
1074 /*
1075 * We are here because there is a pending guest interrupt
1076 * which could not be delivered as MSR_EE was not set. Once
1077 * we break from here we will retry delivery.
1078 */
1079 r = RESUME_GUEST;
1080 break;
1081
Alexander Graf95f2e922012-02-20 22:45:12 +01001082 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
1083 r = RESUME_GUEST;
1084 break;
1085
Scott Woodd30f6e42011-12-20 15:34:43 +00001086 case BOOKE_INTERRUPT_HV_PRIV:
Tianjia Zhang8c99d342020-04-27 12:35:11 +08001087 r = emulation_exit(vcpu);
Scott Woodd30f6e42011-12-20 15:34:43 +00001088 break;
1089
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001090 case BOOKE_INTERRUPT_PROGRAM:
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +05301091 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
1092 (last_inst == KVMPPC_INST_SW_BREAKPOINT)) {
1093 /*
1094 * We are here because of an SW breakpoint instr,
1095 * so lets return to host to handle.
1096 */
Tianjia Zhang8c99d342020-04-27 12:35:11 +08001097 r = kvmppc_handle_debug(vcpu);
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +05301098 run->exit_reason = KVM_EXIT_DEBUG;
1099 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1100 break;
1101 }
1102
Scott Woodd30f6e42011-12-20 15:34:43 +00001103 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
Alexander Graf02685972012-02-20 12:33:22 +01001104 /*
1105 * Program traps generated by user-level software must
1106 * be handled by the guest kernel.
1107 *
1108 * In GS mode, hypervisor privileged instructions trap
1109 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
1110 * actual program interrupts, handled by the guest.
1111 */
Liu Yudaf5e272010-02-02 19:44:35 +08001112 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001113 r = RESUME_GUEST;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001114 kvmppc_account_exit(vcpu, USR_PR_INST);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001115 break;
1116 }
1117
Tianjia Zhang8c99d342020-04-27 12:35:11 +08001118 r = emulation_exit(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001119 break;
1120
Christian Ehrhardtde368dc2008-04-29 18:18:23 +02001121 case BOOKE_INTERRUPT_FP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001122 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001123 kvmppc_account_exit(vcpu, FP_UNAVAIL);
Christian Ehrhardtde368dc2008-04-29 18:18:23 +02001124 r = RESUME_GUEST;
1125 break;
1126
Scott Wood4cd35f62011-06-14 18:34:31 -05001127#ifdef CONFIG_SPE
1128 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
1129 if (vcpu->arch.shared->msr & MSR_SPE)
1130 kvmppc_vcpu_enable_spe(vcpu);
1131 else
1132 kvmppc_booke_queue_irqprio(vcpu,
1133 BOOKE_IRQPRIO_SPE_UNAVAIL);
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001134 r = RESUME_GUEST;
1135 break;
Scott Wood4cd35f62011-06-14 18:34:31 -05001136 }
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001137
1138 case BOOKE_INTERRUPT_SPE_FP_DATA:
1139 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
1140 r = RESUME_GUEST;
1141 break;
1142
1143 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1144 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
1145 r = RESUME_GUEST;
1146 break;
Mihai Caraman95d80a22014-08-20 16:36:23 +03001147#elif defined(CONFIG_SPE_POSSIBLE)
Scott Wood4cd35f62011-06-14 18:34:31 -05001148 case BOOKE_INTERRUPT_SPE_UNAVAIL:
1149 /*
1150 * Guest wants SPE, but host kernel doesn't support it. Send
1151 * an "unimplemented operation" program check to the guest.
1152 */
1153 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1154 r = RESUME_GUEST;
1155 break;
1156
1157 /*
1158 * These really should never happen without CONFIG_SPE,
1159 * as we should never enable the real MSR[SPE] in the guest.
1160 */
1161 case BOOKE_INTERRUPT_SPE_FP_DATA:
1162 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1163 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
Simon Guo173c5202018-05-07 14:20:08 +08001164 __func__, exit_nr, vcpu->arch.regs.nip);
Scott Wood4cd35f62011-06-14 18:34:31 -05001165 run->hw.hardware_exit_reason = exit_nr;
1166 r = RESUME_HOST;
1167 break;
Mihai Caraman95d80a22014-08-20 16:36:23 +03001168#endif /* CONFIG_SPE_POSSIBLE */
1169
1170/*
1171 * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC,
1172 * see kvmppc_core_check_processor_compat().
1173 */
1174#ifdef CONFIG_ALTIVEC
1175 case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
1176 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
1177 r = RESUME_GUEST;
1178 break;
1179
1180 case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
1181 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
1182 r = RESUME_GUEST;
1183 break;
Scott Wood4cd35f62011-06-14 18:34:31 -05001184#endif
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001185
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001186 case BOOKE_INTERRUPT_DATA_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +08001187 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1188 vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001189 kvmppc_account_exit(vcpu, DSI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001190 r = RESUME_GUEST;
1191 break;
1192
1193 case BOOKE_INTERRUPT_INST_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +08001194 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001195 kvmppc_account_exit(vcpu, ISI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001196 r = RESUME_GUEST;
1197 break;
1198
Alexander Graf011da892013-01-31 14:17:38 +01001199 case BOOKE_INTERRUPT_ALIGNMENT:
1200 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1201 vcpu->arch.fault_esr);
1202 r = RESUME_GUEST;
1203 break;
1204
Scott Woodd30f6e42011-12-20 15:34:43 +00001205#ifdef CONFIG_KVM_BOOKE_HV
1206 case BOOKE_INTERRUPT_HV_SYSCALL:
1207 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1208 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1209 } else {
1210 /*
1211 * hcall from guest userspace -- send privileged
1212 * instruction program check.
1213 */
1214 kvmppc_core_queue_program(vcpu, ESR_PPR);
1215 }
1216
1217 r = RESUME_GUEST;
1218 break;
1219#else
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001220 case BOOKE_INTERRUPT_SYSCALL:
Alexander Graf2a342ed2010-07-29 14:47:48 +02001221 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1222 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1223 /* KVM PV hypercalls */
1224 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1225 r = RESUME_GUEST;
1226 } else {
1227 /* Guest syscalls */
1228 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1229 }
Hollis Blanchard7b701592008-12-02 15:51:58 -06001230 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001231 r = RESUME_GUEST;
1232 break;
Scott Woodd30f6e42011-12-20 15:34:43 +00001233#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001234
1235 case BOOKE_INTERRUPT_DTLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001236 unsigned long eaddr = vcpu->arch.fault_dear;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001237 int gtlb_index;
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001238 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001239 gfn_t gfn;
1240
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001241#ifdef CONFIG_KVM_E500V2
Scott Wooda4cd8b22011-06-14 18:34:41 -05001242 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1243 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1244 kvmppc_map_magic(vcpu);
1245 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1246 r = RESUME_GUEST;
1247
1248 break;
1249 }
1250#endif
1251
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001252 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001253 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001254 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001255 /* The guest didn't have a mapping for it. */
Liu Yudaf5e272010-02-02 19:44:35 +08001256 kvmppc_core_queue_dtlb_miss(vcpu,
1257 vcpu->arch.fault_dear,
1258 vcpu->arch.fault_esr);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001259 kvmppc_mmu_dtlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001260 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001261 r = RESUME_GUEST;
1262 break;
1263 }
1264
Scott Woodf1e89022013-06-06 19:16:31 -05001265 idx = srcu_read_lock(&vcpu->kvm->srcu);
1266
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001267 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001268 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001269
1270 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1271 /* The guest TLB had a mapping, but the shadow TLB
1272 * didn't, and it is RAM. This could be because:
1273 * a) the entry is mapping the host kernel, or
1274 * b) the guest used a large mapping which we're faking
1275 * Either way, we need to satisfy the fault without
1276 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001277 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001278 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001279 r = RESUME_GUEST;
1280 } else {
1281 /* Guest has mapped and accessed a page which is not
1282 * actually RAM. */
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001283 vcpu->arch.paddr_accessed = gpaddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +01001284 vcpu->arch.vaddr_accessed = eaddr;
Tianjia Zhang8c99d342020-04-27 12:35:11 +08001285 r = kvmppc_emulate_mmio(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001286 kvmppc_account_exit(vcpu, MMIO_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001287 }
1288
Scott Woodf1e89022013-06-06 19:16:31 -05001289 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001290 break;
1291 }
1292
1293 case BOOKE_INTERRUPT_ITLB_MISS: {
Simon Guo173c5202018-05-07 14:20:08 +08001294 unsigned long eaddr = vcpu->arch.regs.nip;
Hollis Blanchard89168612008-12-02 15:51:53 -06001295 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001296 gfn_t gfn;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001297 int gtlb_index;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001298
1299 r = RESUME_GUEST;
1300
1301 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001302 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001303 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001304 /* The guest didn't have a mapping for it. */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001305 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001306 kvmppc_mmu_itlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001307 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001308 break;
1309 }
1310
Hollis Blanchard7b701592008-12-02 15:51:58 -06001311 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001312
Scott Woodf1e89022013-06-06 19:16:31 -05001313 idx = srcu_read_lock(&vcpu->kvm->srcu);
1314
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001315 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard89168612008-12-02 15:51:53 -06001316 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001317
1318 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1319 /* The guest TLB had a mapping, but the shadow TLB
1320 * didn't. This could be because:
1321 * a) the entry is mapping the host kernel, or
1322 * b) the guest used a large mapping which we're faking
1323 * Either way, we need to satisfy the fault without
1324 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001325 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001326 } else {
1327 /* Guest mapped and leaped at non-RAM! */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001328 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001329 }
1330
Scott Woodf1e89022013-06-06 19:16:31 -05001331 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001332 break;
1333 }
1334
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001335 case BOOKE_INTERRUPT_DEBUG: {
Tianjia Zhang8c99d342020-04-27 12:35:11 +08001336 r = kvmppc_handle_debug(vcpu);
Bharat Bhushance11e482013-07-04 12:27:47 +05301337 if (r == RESUME_HOST)
1338 run->exit_reason = KVM_EXIT_DEBUG;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001339 kvmppc_account_exit(vcpu, DEBUG_EXITS);
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001340 break;
1341 }
1342
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001343 default:
1344 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1345 BUG();
1346 }
1347
Mihai Caramanf5250472014-07-23 19:06:22 +03001348out:
Alexander Grafa8e4ef82012-02-16 14:07:37 +00001349 /*
1350 * To avoid clobbering exit_reason, only check for signals if we
1351 * aren't already exiting to userspace for some other reason.
1352 */
Alexander Graf03660ba2012-02-28 12:00:41 +01001353 if (!(r & RESUME_HOST)) {
Alexander Graf7ee78852012-08-13 12:44:41 +02001354 s = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001355 if (s <= 0)
Alexander Graf7ee78852012-08-13 12:44:41 +02001356 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
Scott Wood6c85f522014-01-09 19:18:40 -06001357 else {
1358 /* interrupts now hard-disabled */
Scott Wood5f1c2482013-07-10 17:47:39 -05001359 kvmppc_fix_ee_before_entry();
Mihai Caraman3efc7da2014-08-20 16:36:22 +03001360 kvmppc_load_guest_fp(vcpu);
Mihai Caraman95d80a22014-08-20 16:36:23 +03001361 kvmppc_load_guest_altivec(vcpu);
Alexander Graf03660ba2012-02-28 12:00:41 +01001362 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001363 }
1364
1365 return r;
1366}
1367
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001368static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1369{
1370 u32 old_tsr = vcpu->arch.tsr;
1371
1372 vcpu->arch.tsr = new_tsr;
1373
1374 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1375 arm_next_watchdog(vcpu);
1376
1377 update_timer_ints(vcpu);
1378}
1379
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001380int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1381{
1382 /* setup watchdog timer once */
1383 spin_lock_init(&vcpu->arch.wdt_lock);
Kees Cook86cb30e2017-10-17 20:21:24 -07001384 timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001385
Bharat Bhushan2f699a52014-08-13 14:39:44 +05301386 /*
1387 * Clear DBSR.MRR to avoid guest debug interrupt as
1388 * this is of host interest
1389 */
1390 mtspr(SPRN_DBSR, DBSR_MRR);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001391 return 0;
1392}
1393
1394void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1395{
1396 del_timer_sync(&vcpu->arch.wdt_timer);
1397}
1398
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001399int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1400{
1401 int i;
1402
Christoffer Dall1fc9b762017-12-04 21:35:26 +01001403 vcpu_load(vcpu);
1404
Simon Guo173c5202018-05-07 14:20:08 +08001405 regs->pc = vcpu->arch.regs.nip;
Alexander Graf992b5b22010-01-08 02:58:02 +01001406 regs->cr = kvmppc_get_cr(vcpu);
Simon Guo173c5202018-05-07 14:20:08 +08001407 regs->ctr = vcpu->arch.regs.ctr;
1408 regs->lr = vcpu->arch.regs.link;
Alexander Graf992b5b22010-01-08 02:58:02 +01001409 regs->xer = kvmppc_get_xer(vcpu);
Alexander Graf666e7252010-07-29 14:47:43 +02001410 regs->msr = vcpu->arch.shared->msr;
Bharat Bhushan31579ee2014-07-17 17:01:36 +05301411 regs->srr0 = kvmppc_get_srr0(vcpu);
1412 regs->srr1 = kvmppc_get_srr1(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001413 regs->pid = vcpu->arch.pid;
Bharat Bhushanc1b8a012014-07-17 17:01:39 +05301414 regs->sprg0 = kvmppc_get_sprg0(vcpu);
1415 regs->sprg1 = kvmppc_get_sprg1(vcpu);
1416 regs->sprg2 = kvmppc_get_sprg2(vcpu);
1417 regs->sprg3 = kvmppc_get_sprg3(vcpu);
1418 regs->sprg4 = kvmppc_get_sprg4(vcpu);
1419 regs->sprg5 = kvmppc_get_sprg5(vcpu);
1420 regs->sprg6 = kvmppc_get_sprg6(vcpu);
1421 regs->sprg7 = kvmppc_get_sprg7(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001422
1423 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001424 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001425
Christoffer Dall1fc9b762017-12-04 21:35:26 +01001426 vcpu_put(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001427 return 0;
1428}
1429
1430int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1431{
1432 int i;
1433
Christoffer Dall875656f2017-12-04 21:35:27 +01001434 vcpu_load(vcpu);
1435
Simon Guo173c5202018-05-07 14:20:08 +08001436 vcpu->arch.regs.nip = regs->pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001437 kvmppc_set_cr(vcpu, regs->cr);
Simon Guo173c5202018-05-07 14:20:08 +08001438 vcpu->arch.regs.ctr = regs->ctr;
1439 vcpu->arch.regs.link = regs->lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001440 kvmppc_set_xer(vcpu, regs->xer);
Hollis Blanchardb8fd68a2008-11-05 09:36:20 -06001441 kvmppc_set_msr(vcpu, regs->msr);
Bharat Bhushan31579ee2014-07-17 17:01:36 +05301442 kvmppc_set_srr0(vcpu, regs->srr0);
1443 kvmppc_set_srr1(vcpu, regs->srr1);
Scott Wood5ce941e2011-04-27 17:24:21 -05001444 kvmppc_set_pid(vcpu, regs->pid);
Bharat Bhushanc1b8a012014-07-17 17:01:39 +05301445 kvmppc_set_sprg0(vcpu, regs->sprg0);
1446 kvmppc_set_sprg1(vcpu, regs->sprg1);
1447 kvmppc_set_sprg2(vcpu, regs->sprg2);
1448 kvmppc_set_sprg3(vcpu, regs->sprg3);
1449 kvmppc_set_sprg4(vcpu, regs->sprg4);
1450 kvmppc_set_sprg5(vcpu, regs->sprg5);
1451 kvmppc_set_sprg6(vcpu, regs->sprg6);
1452 kvmppc_set_sprg7(vcpu, regs->sprg7);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001453
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001454 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1455 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001456
Christoffer Dall875656f2017-12-04 21:35:27 +01001457 vcpu_put(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001458 return 0;
1459}
1460
Scott Wood5ce941e2011-04-27 17:24:21 -05001461static void get_sregs_base(struct kvm_vcpu *vcpu,
1462 struct kvm_sregs *sregs)
1463{
1464 u64 tb = get_tb();
1465
1466 sregs->u.e.features |= KVM_SREGS_E_BASE;
1467
1468 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1469 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1470 sregs->u.e.mcsr = vcpu->arch.mcsr;
Bharat Bhushandc168542014-07-17 17:01:38 +05301471 sregs->u.e.esr = kvmppc_get_esr(vcpu);
Bharat Bhushana5414d42014-07-17 17:01:37 +05301472 sregs->u.e.dear = kvmppc_get_dar(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001473 sregs->u.e.tsr = vcpu->arch.tsr;
1474 sregs->u.e.tcr = vcpu->arch.tcr;
1475 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1476 sregs->u.e.tb = tb;
1477 sregs->u.e.vrsave = vcpu->arch.vrsave;
1478}
1479
1480static int set_sregs_base(struct kvm_vcpu *vcpu,
1481 struct kvm_sregs *sregs)
1482{
1483 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1484 return 0;
1485
1486 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1487 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1488 vcpu->arch.mcsr = sregs->u.e.mcsr;
Bharat Bhushandc168542014-07-17 17:01:38 +05301489 kvmppc_set_esr(vcpu, sregs->u.e.esr);
Bharat Bhushana5414d42014-07-17 17:01:37 +05301490 kvmppc_set_dar(vcpu, sregs->u.e.dear);
Scott Wood5ce941e2011-04-27 17:24:21 -05001491 vcpu->arch.vrsave = sregs->u.e.vrsave;
Scott Wooddfd4d472011-11-17 12:39:59 +00001492 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001493
Scott Wooddfd4d472011-11-17 12:39:59 +00001494 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
Scott Wood5ce941e2011-04-27 17:24:21 -05001495 vcpu->arch.dec = sregs->u.e.dec;
Scott Wooddfd4d472011-11-17 12:39:59 +00001496 kvmppc_emulate_dec(vcpu);
1497 }
Scott Wood5ce941e2011-04-27 17:24:21 -05001498
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001499 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1500 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001501
1502 return 0;
1503}
1504
1505static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1506 struct kvm_sregs *sregs)
1507{
1508 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1509
Scott Wood841741f2011-09-02 17:39:37 -05001510 sregs->u.e.pir = vcpu->vcpu_id;
Scott Wood5ce941e2011-04-27 17:24:21 -05001511 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1512 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1513 sregs->u.e.decar = vcpu->arch.decar;
1514 sregs->u.e.ivpr = vcpu->arch.ivpr;
1515}
1516
1517static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1518 struct kvm_sregs *sregs)
1519{
1520 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1521 return 0;
1522
Scott Wood841741f2011-09-02 17:39:37 -05001523 if (sregs->u.e.pir != vcpu->vcpu_id)
Scott Wood5ce941e2011-04-27 17:24:21 -05001524 return -EINVAL;
1525
1526 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1527 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1528 vcpu->arch.decar = sregs->u.e.decar;
1529 vcpu->arch.ivpr = sregs->u.e.ivpr;
1530
1531 return 0;
1532}
1533
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301534int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
Scott Wood5ce941e2011-04-27 17:24:21 -05001535{
1536 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1537
1538 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1539 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1540 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1541 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1542 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1543 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1544 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1545 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1546 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1547 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1548 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1549 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1550 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1551 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1552 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1553 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301554 return 0;
Scott Wood5ce941e2011-04-27 17:24:21 -05001555}
1556
1557int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1558{
1559 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1560 return 0;
1561
1562 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1563 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1564 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1565 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1566 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1567 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1568 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1569 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1570 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1571 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1572 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1573 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1574 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1575 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1576 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1577 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1578
1579 return 0;
1580}
1581
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001582int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1583 struct kvm_sregs *sregs)
1584{
Christoffer Dallbcdec412017-12-04 21:35:28 +01001585 int ret;
1586
1587 vcpu_load(vcpu);
1588
Scott Wood5ce941e2011-04-27 17:24:21 -05001589 sregs->pvr = vcpu->arch.pvr;
1590
1591 get_sregs_base(vcpu, sregs);
1592 get_sregs_arch206(vcpu, sregs);
Christoffer Dallbcdec412017-12-04 21:35:28 +01001593 ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
1594
1595 vcpu_put(vcpu);
1596 return ret;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001597}
1598
1599int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1600 struct kvm_sregs *sregs)
1601{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001602 int ret = -EINVAL;
Scott Wood5ce941e2011-04-27 17:24:21 -05001603
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001604 vcpu_load(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001605 if (vcpu->arch.pvr != sregs->pvr)
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001606 goto out;
Scott Wood5ce941e2011-04-27 17:24:21 -05001607
1608 ret = set_sregs_base(vcpu, sregs);
1609 if (ret < 0)
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001610 goto out;
Scott Wood5ce941e2011-04-27 17:24:21 -05001611
1612 ret = set_sregs_arch206(vcpu, sregs);
1613 if (ret < 0)
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001614 goto out;
Scott Wood5ce941e2011-04-27 17:24:21 -05001615
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001616 ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
1617
1618out:
1619 vcpu_put(vcpu);
1620 return ret;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001621}
1622
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001623int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
1624 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001625{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001626 int r = 0;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001627
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001628 switch (id) {
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001629 case KVM_REG_PPC_IAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001630 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001631 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301632 case KVM_REG_PPC_IAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001633 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301634 break;
1635#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1636 case KVM_REG_PPC_IAC3:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001637 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301638 break;
1639 case KVM_REG_PPC_IAC4:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001640 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301641 break;
1642#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001643 case KVM_REG_PPC_DAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001644 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301645 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001646 case KVM_REG_PPC_DAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001647 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
Bharat Bhushan2c509672014-08-06 12:08:56 +05301648 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001649 case KVM_REG_PPC_EPR: {
Bharat Bhushan34f754b2014-07-17 17:01:40 +05301650 u32 epr = kvmppc_get_epr(vcpu);
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001651 *val = get_reg_val(id, epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001652 break;
1653 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001654#if defined(CONFIG_64BIT)
1655 case KVM_REG_PPC_EPCR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001656 *val = get_reg_val(id, vcpu->arch.epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001657 break;
1658#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001659 case KVM_REG_PPC_TCR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001660 *val = get_reg_val(id, vcpu->arch.tcr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001661 break;
1662 case KVM_REG_PPC_TSR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001663 *val = get_reg_val(id, vcpu->arch.tsr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001664 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001665 case KVM_REG_PPC_DEBUG_INST:
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +05301666 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001667 break;
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001668 case KVM_REG_PPC_VRSAVE:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001669 *val = get_reg_val(id, vcpu->arch.vrsave);
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001670 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001671 default:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001672 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001673 break;
1674 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001675
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001676 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001677}
1678
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001679int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
1680 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001681{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001682 int r = 0;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001683
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001684 switch (id) {
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001685 case KVM_REG_PPC_IAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001686 vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001687 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301688 case KVM_REG_PPC_IAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001689 vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301690 break;
1691#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1692 case KVM_REG_PPC_IAC3:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001693 vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301694 break;
1695 case KVM_REG_PPC_IAC4:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001696 vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301697 break;
1698#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001699 case KVM_REG_PPC_DAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001700 vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301701 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001702 case KVM_REG_PPC_DAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001703 vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
Bharat Bhushan2c509672014-08-06 12:08:56 +05301704 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001705 case KVM_REG_PPC_EPR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001706 u32 new_epr = set_reg_val(id, *val);
Mihai Caraman35b299e2013-04-11 00:03:07 +00001707 kvmppc_set_epr(vcpu, new_epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001708 break;
1709 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001710#if defined(CONFIG_64BIT)
1711 case KVM_REG_PPC_EPCR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001712 u32 new_epcr = set_reg_val(id, *val);
Mihai Caraman35b299e2013-04-11 00:03:07 +00001713 kvmppc_set_epcr(vcpu, new_epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001714 break;
1715 }
1716#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001717 case KVM_REG_PPC_OR_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001718 u32 tsr_bits = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001719 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1720 break;
1721 }
1722 case KVM_REG_PPC_CLEAR_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001723 u32 tsr_bits = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001724 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1725 break;
1726 }
1727 case KVM_REG_PPC_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001728 u32 tsr = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001729 kvmppc_set_tsr(vcpu, tsr);
1730 break;
1731 }
1732 case KVM_REG_PPC_TCR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001733 u32 tcr = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001734 kvmppc_set_tcr(vcpu, tcr);
1735 break;
1736 }
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001737 case KVM_REG_PPC_VRSAVE:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001738 vcpu->arch.vrsave = set_reg_val(id, *val);
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001739 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001740 default:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001741 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001742 break;
1743 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001744
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001745 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001746}
1747
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001748int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1749{
1750 return -ENOTSUPP;
1751}
1752
1753int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1754{
1755 return -ENOTSUPP;
1756}
1757
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001758int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1759 struct kvm_translation *tr)
1760{
Avi Kivity98001d82010-05-13 11:05:49 +03001761 int r;
1762
Christoffer Dall1da5b612017-12-04 21:35:32 +01001763 vcpu_load(vcpu);
Avi Kivity98001d82010-05-13 11:05:49 +03001764 r = kvmppc_core_vcpu_translate(vcpu, tr);
Christoffer Dall1da5b612017-12-04 21:35:32 +01001765 vcpu_put(vcpu);
Avi Kivity98001d82010-05-13 11:05:49 +03001766 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001767}
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001768
Sean Christopherson0dff0842020-02-18 13:07:29 -08001769void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1770{
1771
1772}
1773
Alexander Graf4e755752009-10-30 05:47:01 +00001774int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1775{
1776 return -ENOTSUPP;
1777}
1778
Sean Christophersone96c81e2020-02-18 13:07:27 -08001779void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001780{
1781}
1782
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001783int kvmppc_core_prepare_memory_region(struct kvm *kvm,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001784 struct kvm_memory_slot *memslot,
Sean Christopherson82307e62020-02-18 13:07:18 -08001785 const struct kvm_userspace_memory_region *mem,
1786 enum kvm_mr_change change)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001787{
1788 return 0;
1789}
1790
1791void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001792 const struct kvm_userspace_memory_region *mem,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02001793 const struct kvm_memory_slot *old,
Bharata B Raof032b732018-12-12 15:15:30 +11001794 const struct kvm_memory_slot *new,
1795 enum kvm_mr_change change)
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001796{
1797}
1798
1799void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001800{
1801}
1802
Mihai Caraman38f98822012-10-11 06:13:27 +00001803void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1804{
1805#if defined(CONFIG_64BIT)
1806 vcpu->arch.epcr = new_epcr;
1807#ifdef CONFIG_KVM_BOOKE_HV
1808 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1809 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1810 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1811#endif
1812#endif
1813}
1814
Scott Wooddfd4d472011-11-17 12:39:59 +00001815void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1816{
1817 vcpu->arch.tcr = new_tcr;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001818 arm_next_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +00001819 update_timer_ints(vcpu);
1820}
1821
1822void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1823{
1824 set_bits(tsr_bits, &vcpu->arch.tsr);
1825 smp_wmb();
1826 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1827 kvm_vcpu_kick(vcpu);
1828}
1829
1830void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1831{
1832 clear_bits(tsr_bits, &vcpu->arch.tsr);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001833
1834 /*
1835 * We may have stopped the watchdog due to
1836 * being stuck on final expiration.
1837 */
1838 if (tsr_bits & (TSR_ENW | TSR_WIS))
1839 arm_next_watchdog(vcpu);
1840
Scott Wooddfd4d472011-11-17 12:39:59 +00001841 update_timer_ints(vcpu);
1842}
1843
Mihai Caramand02d4d12014-09-01 17:19:56 +03001844void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
Scott Wooddfd4d472011-11-17 12:39:59 +00001845{
Bharat Bhushan21bd0002012-05-20 23:21:23 +00001846 if (vcpu->arch.tcr & TCR_ARE) {
1847 vcpu->arch.dec = vcpu->arch.decar;
1848 kvmppc_emulate_dec(vcpu);
1849 }
1850
Scott Wooddfd4d472011-11-17 12:39:59 +00001851 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1852}
1853
Bharat Bhushance11e482013-07-04 12:27:47 +05301854static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1855 uint64_t addr, int index)
1856{
1857 switch (index) {
1858 case 0:
1859 dbg_reg->dbcr0 |= DBCR0_IAC1;
1860 dbg_reg->iac1 = addr;
1861 break;
1862 case 1:
1863 dbg_reg->dbcr0 |= DBCR0_IAC2;
1864 dbg_reg->iac2 = addr;
1865 break;
1866#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1867 case 2:
1868 dbg_reg->dbcr0 |= DBCR0_IAC3;
1869 dbg_reg->iac3 = addr;
1870 break;
1871 case 3:
1872 dbg_reg->dbcr0 |= DBCR0_IAC4;
1873 dbg_reg->iac4 = addr;
1874 break;
1875#endif
1876 default:
1877 return -EINVAL;
1878 }
1879
1880 dbg_reg->dbcr0 |= DBCR0_IDM;
1881 return 0;
1882}
1883
1884static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1885 int type, int index)
1886{
1887 switch (index) {
1888 case 0:
1889 if (type & KVMPPC_DEBUG_WATCH_READ)
1890 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1891 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1892 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1893 dbg_reg->dac1 = addr;
1894 break;
1895 case 1:
1896 if (type & KVMPPC_DEBUG_WATCH_READ)
1897 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1898 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1899 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1900 dbg_reg->dac2 = addr;
1901 break;
1902 default:
1903 return -EINVAL;
1904 }
1905
1906 dbg_reg->dbcr0 |= DBCR0_IDM;
1907 return 0;
1908}
1909void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1910{
1911 /* XXX: Add similar MSR protection for BookE-PR */
1912#ifdef CONFIG_KVM_BOOKE_HV
1913 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1914 if (set) {
1915 if (prot_bitmap & MSR_UCLE)
1916 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1917 if (prot_bitmap & MSR_DE)
1918 vcpu->arch.shadow_msrp |= MSRP_DEP;
1919 if (prot_bitmap & MSR_PMM)
1920 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1921 } else {
1922 if (prot_bitmap & MSR_UCLE)
1923 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1924 if (prot_bitmap & MSR_DE)
1925 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1926 if (prot_bitmap & MSR_PMM)
1927 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1928 }
1929#endif
1930}
1931
Alexander Graf7d15c06f2014-06-20 13:52:36 +02001932int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
1933 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
1934{
1935 int gtlb_index;
1936 gpa_t gpaddr;
1937
1938#ifdef CONFIG_KVM_E500V2
1939 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1940 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1941 pte->eaddr = eaddr;
1942 pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
1943 (eaddr & ~PAGE_MASK);
1944 pte->vpage = eaddr >> PAGE_SHIFT;
1945 pte->may_read = true;
1946 pte->may_write = true;
1947 pte->may_execute = true;
1948
1949 return 0;
1950 }
1951#endif
1952
1953 /* Check the guest TLB. */
1954 switch (xlid) {
1955 case XLATE_INST:
1956 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1957 break;
1958 case XLATE_DATA:
1959 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1960 break;
1961 default:
1962 BUG();
1963 }
1964
1965 /* Do we have a TLB entry at all? */
1966 if (gtlb_index < 0)
1967 return -ENOENT;
1968
1969 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1970
1971 pte->eaddr = eaddr;
1972 pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK);
1973 pte->vpage = eaddr >> PAGE_SHIFT;
1974
1975 /* XXX read permissions from the guest TLB */
1976 pte->may_read = true;
1977 pte->may_write = true;
1978 pte->may_execute = true;
1979
1980 return 0;
1981}
1982
Bharat Bhushance11e482013-07-04 12:27:47 +05301983int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1984 struct kvm_guest_debug *dbg)
1985{
1986 struct debug_reg *dbg_reg;
1987 int n, b = 0, w = 0;
Christoffer Dall66b56562017-12-04 21:35:33 +01001988 int ret = 0;
1989
1990 vcpu_load(vcpu);
Bharat Bhushance11e482013-07-04 12:27:47 +05301991
1992 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
Bharat Bhushan348ba712014-08-06 12:08:55 +05301993 vcpu->arch.dbg_reg.dbcr0 = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +05301994 vcpu->guest_debug = 0;
1995 kvm_guest_protect_msr(vcpu, MSR_DE, false);
Christoffer Dall66b56562017-12-04 21:35:33 +01001996 goto out;
Bharat Bhushance11e482013-07-04 12:27:47 +05301997 }
1998
1999 kvm_guest_protect_msr(vcpu, MSR_DE, true);
2000 vcpu->guest_debug = dbg->control;
Bharat Bhushan348ba712014-08-06 12:08:55 +05302001 vcpu->arch.dbg_reg.dbcr0 = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +05302002
2003 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
Bharat Bhushan348ba712014-08-06 12:08:55 +05302004 vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
Bharat Bhushance11e482013-07-04 12:27:47 +05302005
2006 /* Code below handles only HW breakpoints */
Bharat Bhushan348ba712014-08-06 12:08:55 +05302007 dbg_reg = &(vcpu->arch.dbg_reg);
Bharat Bhushance11e482013-07-04 12:27:47 +05302008
2009#ifdef CONFIG_KVM_BOOKE_HV
2010 /*
2011 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
2012 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
2013 */
2014 dbg_reg->dbcr1 = 0;
2015 dbg_reg->dbcr2 = 0;
2016#else
2017 /*
2018 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
2019 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
2020 * is set.
2021 */
2022 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
2023 DBCR1_IAC4US;
2024 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
2025#endif
2026
2027 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
Christoffer Dall66b56562017-12-04 21:35:33 +01002028 goto out;
Bharat Bhushance11e482013-07-04 12:27:47 +05302029
Christoffer Dall66b56562017-12-04 21:35:33 +01002030 ret = -EINVAL;
Bharat Bhushance11e482013-07-04 12:27:47 +05302031 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
2032 uint64_t addr = dbg->arch.bp[n].addr;
2033 uint32_t type = dbg->arch.bp[n].type;
2034
2035 if (type == KVMPPC_DEBUG_NONE)
2036 continue;
2037
Dan Carpenterac0e89b2016-07-14 13:15:46 +03002038 if (type & ~(KVMPPC_DEBUG_WATCH_READ |
Bharat Bhushance11e482013-07-04 12:27:47 +05302039 KVMPPC_DEBUG_WATCH_WRITE |
2040 KVMPPC_DEBUG_BREAKPOINT))
Christoffer Dall66b56562017-12-04 21:35:33 +01002041 goto out;
Bharat Bhushance11e482013-07-04 12:27:47 +05302042
2043 if (type & KVMPPC_DEBUG_BREAKPOINT) {
2044 /* Setting H/W breakpoint */
2045 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
Christoffer Dall66b56562017-12-04 21:35:33 +01002046 goto out;
Bharat Bhushance11e482013-07-04 12:27:47 +05302047 } else {
2048 /* Setting H/W watchpoint */
2049 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
2050 type, w++))
Christoffer Dall66b56562017-12-04 21:35:33 +01002051 goto out;
Bharat Bhushance11e482013-07-04 12:27:47 +05302052 }
2053 }
2054
Christoffer Dall66b56562017-12-04 21:35:33 +01002055 ret = 0;
2056out:
2057 vcpu_put(vcpu);
2058 return ret;
Bharat Bhushance11e482013-07-04 12:27:47 +05302059}
2060
Scott Wood94fa9d92011-12-20 15:34:22 +00002061void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2062{
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00002063 vcpu->cpu = smp_processor_id();
Scott Woodd30f6e42011-12-20 15:34:43 +00002064 current->thread.kvm_vcpu = vcpu;
Scott Wood94fa9d92011-12-20 15:34:22 +00002065}
2066
2067void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
2068{
Scott Woodd30f6e42011-12-20 15:34:43 +00002069 current->thread.kvm_vcpu = NULL;
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00002070 vcpu->cpu = -1;
Bharat Bhushance11e482013-07-04 12:27:47 +05302071
2072 /* Clear pending debug event in DBSR */
2073 kvmppc_clear_dbsr();
Scott Wood94fa9d92011-12-20 15:34:22 +00002074}
2075
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302076int kvmppc_core_init_vm(struct kvm *kvm)
2077{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302078 return kvm->arch.kvm_ops->init_vm(kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302079}
2080
Sean Christophersonff030fd2019-12-18 13:55:00 -08002081int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302082{
Sean Christophersonb3d42c92019-12-18 13:55:21 -08002083 int i;
2084 int r;
2085
2086 r = vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
2087 if (r)
2088 return r;
2089
2090 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
2091 vcpu->arch.regs.nip = 0;
2092 vcpu->arch.shared->pir = vcpu->vcpu_id;
2093 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
2094 kvmppc_set_msr(vcpu, 0);
2095
2096#ifndef CONFIG_KVM_BOOKE_HV
2097 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
2098 vcpu->arch.shadow_pid = 1;
2099 vcpu->arch.shared->msr = 0;
2100#endif
2101
2102 /* Eye-catching numbers so we know if the guest takes an interrupt
2103 * before it's programmed its own IVPR/IVORs. */
2104 vcpu->arch.ivpr = 0x55550000;
2105 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
2106 vcpu->arch.ivor[i] = 0x7700 | i * 4;
2107
2108 kvmppc_init_timing_stats(vcpu);
2109
2110 r = kvmppc_core_vcpu_setup(vcpu);
2111 if (r)
2112 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
2113 kvmppc_sanity_check(vcpu);
2114 return r;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302115}
2116
2117void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
2118{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302119 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302120}
2121
2122void kvmppc_core_destroy_vm(struct kvm *kvm)
2123{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302124 kvm->arch.kvm_ops->destroy_vm(kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302125}
2126
2127void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2128{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302129 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302130}
2131
2132void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
2133{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302134 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002135}
2136
2137int __init kvmppc_booke_init(void)
2138{
Scott Woodd30f6e42011-12-20 15:34:43 +00002139#ifndef CONFIG_KVM_BOOKE_HV
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002140 unsigned long ivor[16];
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002141 unsigned long *handler = kvmppc_booke_handler_addr;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002142 unsigned long max_ivor = 0;
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002143 unsigned long handler_len;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002144 int i;
2145
2146 /* We install our own exception handlers by hijacking IVPR. IVPR must
2147 * be 16-bit aligned, so we need a 64KB allocation. */
2148 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
2149 VCPU_SIZE_ORDER);
2150 if (!kvmppc_booke_handlers)
2151 return -ENOMEM;
2152
2153 /* XXX make sure our handlers are smaller than Linux's */
2154
2155 /* Copy our interrupt handlers to match host IVORs. That way we don't
2156 * have to swap the IVORs on every guest/host transition. */
2157 ivor[0] = mfspr(SPRN_IVOR0);
2158 ivor[1] = mfspr(SPRN_IVOR1);
2159 ivor[2] = mfspr(SPRN_IVOR2);
2160 ivor[3] = mfspr(SPRN_IVOR3);
2161 ivor[4] = mfspr(SPRN_IVOR4);
2162 ivor[5] = mfspr(SPRN_IVOR5);
2163 ivor[6] = mfspr(SPRN_IVOR6);
2164 ivor[7] = mfspr(SPRN_IVOR7);
2165 ivor[8] = mfspr(SPRN_IVOR8);
2166 ivor[9] = mfspr(SPRN_IVOR9);
2167 ivor[10] = mfspr(SPRN_IVOR10);
2168 ivor[11] = mfspr(SPRN_IVOR11);
2169 ivor[12] = mfspr(SPRN_IVOR12);
2170 ivor[13] = mfspr(SPRN_IVOR13);
2171 ivor[14] = mfspr(SPRN_IVOR14);
2172 ivor[15] = mfspr(SPRN_IVOR15);
2173
2174 for (i = 0; i < 16; i++) {
2175 if (ivor[i] > max_ivor)
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002176 max_ivor = i;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002177
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002178 handler_len = handler[i + 1] - handler[i];
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002179 memcpy((void *)kvmppc_booke_handlers + ivor[i],
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002180 (void *)handler[i], handler_len);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002181 }
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002182
2183 handler_len = handler[max_ivor + 1] - handler[max_ivor];
2184 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
2185 ivor[max_ivor] + handler_len);
Scott Woodd30f6e42011-12-20 15:34:43 +00002186#endif /* !BOOKE_HV */
Hollis Blancharddb93f572008-11-05 09:36:18 -06002187 return 0;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002188}
2189
Hollis Blancharddb93f572008-11-05 09:36:18 -06002190void __exit kvmppc_booke_exit(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002191{
2192 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
2193 kvm_exit();
2194}