blob: be9a45874194f3b49c99417cffc00dea0be6e1ba [file] [log] [blame]
Thomas Gleixnerd94d71c2019-05-29 07:12:40 -07001// SPDX-License-Identifier: GPL-2.0-only
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002/*
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05003 *
4 * Copyright IBM Corp. 2007
Scott Wood4cd35f62011-06-14 18:34:31 -05005 * Copyright 2010-2011 Freescale Semiconductor, Inc.
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05006 *
7 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
8 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Scott Woodd30f6e42011-12-20 15:34:43 +00009 * Scott Wood <scottwood@freescale.com>
10 * Varun Sethi <varun.sethi@freescale.com>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050011 */
12
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/gfp.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050017#include <linux/module.h>
18#include <linux/vmalloc.h>
19#include <linux/fs.h>
Hollis Blanchard7924bd42008-12-02 15:51:55 -060020
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050021#include <asm/cputable.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080022#include <linux/uaccess.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050023#include <asm/kvm_ppc.h>
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060024#include <asm/cacheflush.h>
Scott Woodd30f6e42011-12-20 15:34:43 +000025#include <asm/dbell.h>
26#include <asm/hw_irq.h>
27#include <asm/irq.h>
Mihai Caramanb50df192012-10-11 06:13:19 +000028#include <asm/time.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050029
Scott Woodd30f6e42011-12-20 15:34:43 +000030#include "timing.h"
Hollis Blanchard75f74f02008-11-05 09:36:16 -060031#include "booke.h"
Aneesh Kumar K.Vdba291f2013-10-07 22:17:58 +053032
33#define CREATE_TRACE_POINTS
34#include "trace_booke.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050035
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060036unsigned long kvmppc_booke_handlers;
37
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050038#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
39#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41struct kvm_stats_debugfs_item debugfs_entries[] = {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050042 { "mmio", VCPU_STAT(mmio_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050043 { "sig", VCPU_STAT(signal_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050044 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
45 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
46 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
47 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
48 { "sysc", VCPU_STAT(syscall_exits) },
49 { "isi", VCPU_STAT(isi_exits) },
50 { "dsi", VCPU_STAT(dsi_exits) },
51 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
52 { "dec", VCPU_STAT(dec_exits) },
53 { "ext_intr", VCPU_STAT(ext_intr_exits) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010054 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020055 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020056 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
Hollis Blanchard45c5eb62008-04-25 17:55:49 -050057 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Scott Woodd30f6e42011-12-20 15:34:43 +000058 { "doorbell", VCPU_STAT(dbell_exits) },
59 { "guest doorbell", VCPU_STAT(gdbell_exits) },
Alexander Grafcf1c5ca2012-08-01 12:56:51 +020060 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050061 { NULL }
62};
63
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050064/* TODO: use vcpu_printf() */
65void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
66{
67 int i;
68
Simon Guo173c5202018-05-07 14:20:08 +080069 printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip,
70 vcpu->arch.shared->msr);
71 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link,
72 vcpu->arch.regs.ctr);
Alexander Grafde7906c2010-07-29 14:47:46 +020073 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
74 vcpu->arch.shared->srr1);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050075
76 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
77
78 for (i = 0; i < 32; i += 4) {
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060079 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
Alexander Graf8e5b26b2010-01-08 02:58:01 +010080 kvmppc_get_gpr(vcpu, i),
81 kvmppc_get_gpr(vcpu, i+1),
82 kvmppc_get_gpr(vcpu, i+2),
83 kvmppc_get_gpr(vcpu, i+3));
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050084 }
85}
86
Scott Wood4cd35f62011-06-14 18:34:31 -050087#ifdef CONFIG_SPE
88void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
89{
90 preempt_disable();
91 enable_kernel_spe();
92 kvmppc_save_guest_spe(vcpu);
Anton Blancharddc4fbba2015-10-29 11:44:05 +110093 disable_kernel_spe();
Scott Wood4cd35f62011-06-14 18:34:31 -050094 vcpu->arch.shadow_msr &= ~MSR_SPE;
95 preempt_enable();
96}
97
98static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
99{
100 preempt_disable();
101 enable_kernel_spe();
102 kvmppc_load_guest_spe(vcpu);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100103 disable_kernel_spe();
Scott Wood4cd35f62011-06-14 18:34:31 -0500104 vcpu->arch.shadow_msr |= MSR_SPE;
105 preempt_enable();
106}
107
108static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
109{
110 if (vcpu->arch.shared->msr & MSR_SPE) {
111 if (!(vcpu->arch.shadow_msr & MSR_SPE))
112 kvmppc_vcpu_enable_spe(vcpu);
113 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
114 kvmppc_vcpu_disable_spe(vcpu);
115 }
116}
117#else
118static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
119{
120}
121#endif
122
Mihai Caraman3efc7da2014-08-20 16:36:22 +0300123/*
124 * Load up guest vcpu FP state if it's needed.
125 * It also set the MSR_FP in thread so that host know
126 * we're holding FPU, and then host can help to save
127 * guest vcpu FP state if other threads require to use FPU.
128 * This simulates an FP unavailable fault.
129 *
130 * It requires to be called with preemption disabled.
131 */
132static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
133{
134#ifdef CONFIG_PPC_FPU
135 if (!(current->thread.regs->msr & MSR_FP)) {
136 enable_kernel_fp();
137 load_fp_state(&vcpu->arch.fp);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100138 disable_kernel_fp();
Mihai Caraman3efc7da2014-08-20 16:36:22 +0300139 current->thread.fp_save_area = &vcpu->arch.fp;
140 current->thread.regs->msr |= MSR_FP;
141 }
142#endif
143}
144
145/*
146 * Save guest vcpu FP state into thread.
147 * It requires to be called with preemption disabled.
148 */
149static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
150{
151#ifdef CONFIG_PPC_FPU
152 if (current->thread.regs->msr & MSR_FP)
153 giveup_fpu(current);
154 current->thread.fp_save_area = NULL;
155#endif
156}
157
Alexander Graf7a08c272012-08-16 13:10:16 +0200158static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
159{
160#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
161 /* We always treat the FP bit as enabled from the host
162 perspective, so only need to adjust the shadow MSR */
163 vcpu->arch.shadow_msr &= ~MSR_FP;
164 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
165#endif
166}
167
Mihai Caraman95d80a22014-08-20 16:36:23 +0300168/*
169 * Simulate AltiVec unavailable fault to load guest state
170 * from thread to AltiVec unit.
171 * It requires to be called with preemption disabled.
172 */
173static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
174{
175#ifdef CONFIG_ALTIVEC
176 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
177 if (!(current->thread.regs->msr & MSR_VEC)) {
178 enable_kernel_altivec();
179 load_vr_state(&vcpu->arch.vr);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100180 disable_kernel_altivec();
Mihai Caraman95d80a22014-08-20 16:36:23 +0300181 current->thread.vr_save_area = &vcpu->arch.vr;
182 current->thread.regs->msr |= MSR_VEC;
183 }
184 }
185#endif
186}
187
188/*
189 * Save guest vcpu AltiVec state into thread.
190 * It requires to be called with preemption disabled.
191 */
192static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
193{
194#ifdef CONFIG_ALTIVEC
195 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
196 if (current->thread.regs->msr & MSR_VEC)
197 giveup_altivec(current);
198 current->thread.vr_save_area = NULL;
199 }
200#endif
201}
202
Bharat Bhushance11e482013-07-04 12:27:47 +0530203static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
204{
205 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
206#ifndef CONFIG_KVM_BOOKE_HV
207 vcpu->arch.shadow_msr &= ~MSR_DE;
208 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
209#endif
210
211 /* Force enable debug interrupts when user space wants to debug */
212 if (vcpu->guest_debug) {
213#ifdef CONFIG_KVM_BOOKE_HV
214 /*
215 * Since there is no shadow MSR, sync MSR_DE into the guest
216 * visible MSR.
217 */
218 vcpu->arch.shared->msr |= MSR_DE;
219#else
220 vcpu->arch.shadow_msr |= MSR_DE;
221 vcpu->arch.shared->msr &= ~MSR_DE;
222#endif
223 }
224}
225
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500226/*
227 * Helper function for "full" MSR writes. No need to call this if only
228 * EE/CE/ME/DE/RI are changing.
229 */
Scott Wood4cd35f62011-06-14 18:34:31 -0500230void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
231{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500232 u32 old_msr = vcpu->arch.shared->msr;
Scott Wood4cd35f62011-06-14 18:34:31 -0500233
Scott Woodd30f6e42011-12-20 15:34:43 +0000234#ifdef CONFIG_KVM_BOOKE_HV
235 new_msr |= MSR_GS;
236#endif
237
Scott Wood4cd35f62011-06-14 18:34:31 -0500238 vcpu->arch.shared->msr = new_msr;
239
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500240 kvmppc_mmu_msr_notify(vcpu, old_msr);
Scott Wood4cd35f62011-06-14 18:34:31 -0500241 kvmppc_vcpu_sync_spe(vcpu);
Alexander Graf7a08c272012-08-16 13:10:16 +0200242 kvmppc_vcpu_sync_fpu(vcpu);
Bharat Bhushance11e482013-07-04 12:27:47 +0530243 kvmppc_vcpu_sync_debug(vcpu);
Scott Wood4cd35f62011-06-14 18:34:31 -0500244}
245
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600246static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
247 unsigned int priority)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600248{
Alexander Graf63460462012-08-08 00:44:52 +0200249 trace_kvm_booke_queue_irqprio(vcpu, priority);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600250 set_bit(priority, &vcpu->arch.pending_exceptions);
251}
252
Alexander Graf8de12012014-06-18 21:56:55 +0200253void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
254 ulong dear_flags, ulong esr_flags)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600255{
Liu Yudaf5e272010-02-02 19:44:35 +0800256 vcpu->arch.queued_dear = dear_flags;
257 vcpu->arch.queued_esr = esr_flags;
258 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
259}
260
Alexander Graf8de12012014-06-18 21:56:55 +0200261void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
262 ulong dear_flags, ulong esr_flags)
Liu Yudaf5e272010-02-02 19:44:35 +0800263{
264 vcpu->arch.queued_dear = dear_flags;
265 vcpu->arch.queued_esr = esr_flags;
266 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
267}
268
Alexander Graf8de12012014-06-18 21:56:55 +0200269void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
270{
271 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
272}
273
274void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
Liu Yudaf5e272010-02-02 19:44:35 +0800275{
276 vcpu->arch.queued_esr = esr_flags;
277 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
278}
279
Alexander Graf011da892013-01-31 14:17:38 +0100280static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
281 ulong esr_flags)
282{
283 vcpu->arch.queued_dear = dear_flags;
284 vcpu->arch.queued_esr = esr_flags;
285 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
286}
287
Liu Yudaf5e272010-02-02 19:44:35 +0800288void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
289{
290 vcpu->arch.queued_esr = esr_flags;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600291 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600292}
293
Paul Mackerras307d9272017-03-22 21:02:08 +1100294void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
295{
296 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
297}
298
Laurentiu Tudorb2d7ecb2018-04-26 15:33:19 +0300299#ifdef CONFIG_ALTIVEC
300void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
301{
302 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
303}
304#endif
305
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600306void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
307{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600308 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600309}
310
311int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
312{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600313 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600314}
315
Alexander Graf7706664d2009-12-21 20:21:24 +0100316void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
317{
318 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
319}
320
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600321void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
322 struct kvm_interrupt *irq)
323{
Alexander Grafc5335f12010-08-30 14:03:24 +0200324 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
325
326 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
327 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
328
329 kvmppc_booke_queue_irqprio(vcpu, prio);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600330}
331
Paul Mackerras4fe27d22013-02-14 14:00:25 +0000332void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
Alexander Graf4496f972010-04-07 10:03:25 +0200333{
334 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
Alexander Grafc5335f12010-08-30 14:03:24 +0200335 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
Alexander Graf4496f972010-04-07 10:03:25 +0200336}
337
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000338static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
339{
340 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
341}
342
343static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
344{
345 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
346}
347
Bharat Bhushan2f699a52014-08-13 14:39:44 +0530348void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
349{
350 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
351}
352
353void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
354{
355 clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
356}
357
Scott Woodd30f6e42011-12-20 15:34:43 +0000358static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
359{
Bharat Bhushan31579ee2014-07-17 17:01:36 +0530360 kvmppc_set_srr0(vcpu, srr0);
361 kvmppc_set_srr1(vcpu, srr1);
Scott Woodd30f6e42011-12-20 15:34:43 +0000362}
363
364static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
365{
366 vcpu->arch.csrr0 = srr0;
367 vcpu->arch.csrr1 = srr1;
368}
369
370static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
371{
372 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
373 vcpu->arch.dsrr0 = srr0;
374 vcpu->arch.dsrr1 = srr1;
375 } else {
376 set_guest_csrr(vcpu, srr0, srr1);
377 }
378}
379
380static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
381{
382 vcpu->arch.mcsrr0 = srr0;
383 vcpu->arch.mcsrr1 = srr1;
384}
385
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600386/* Deliver the interrupt of the corresponding priority, if possible. */
387static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
388 unsigned int priority)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500389{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600390 int allowed = 0;
Alexander Graf79300f82012-02-15 19:12:29 +0000391 ulong msr_mask = 0;
Alexander Graf1c810632013-01-04 18:12:48 +0100392 bool update_esr = false, update_dear = false, update_epr = false;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200393 ulong crit_raw = vcpu->arch.shared->critical;
394 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
395 bool crit;
Alexander Grafc5335f12010-08-30 14:03:24 +0200396 bool keep_irq = false;
Scott Woodd30f6e42011-12-20 15:34:43 +0000397 enum int_class int_class;
Mihai Caraman95e90b42012-10-11 06:13:26 +0000398 ulong new_msr = vcpu->arch.shared->msr;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200399
400 /* Truncate crit indicators in 32 bit mode */
401 if (!(vcpu->arch.shared->msr & MSR_SF)) {
402 crit_raw &= 0xffffffff;
403 crit_r1 &= 0xffffffff;
404 }
405
406 /* Critical section when crit == r1 */
407 crit = (crit_raw == crit_r1);
408 /* ... and we're in supervisor mode */
409 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500410
Alexander Grafc5335f12010-08-30 14:03:24 +0200411 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
412 priority = BOOKE_IRQPRIO_EXTERNAL;
413 keep_irq = true;
414 }
415
Scott Wood5df554ad2013-04-12 14:08:46 +0000416 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
Alexander Graf1c810632013-01-04 18:12:48 +0100417 update_epr = true;
418
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600419 switch (priority) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600420 case BOOKE_IRQPRIO_DTLB_MISS:
Liu Yudaf5e272010-02-02 19:44:35 +0800421 case BOOKE_IRQPRIO_DATA_STORAGE:
Alexander Graf011da892013-01-31 14:17:38 +0100422 case BOOKE_IRQPRIO_ALIGNMENT:
Liu Yudaf5e272010-02-02 19:44:35 +0800423 update_dear = true;
424 /* fall through */
425 case BOOKE_IRQPRIO_INST_STORAGE:
426 case BOOKE_IRQPRIO_PROGRAM:
427 update_esr = true;
428 /* fall through */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600429 case BOOKE_IRQPRIO_ITLB_MISS:
430 case BOOKE_IRQPRIO_SYSCALL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600431 case BOOKE_IRQPRIO_FP_UNAVAIL:
Mihai Caraman95d80a22014-08-20 16:36:23 +0300432#ifdef CONFIG_SPE_POSSIBLE
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600433 case BOOKE_IRQPRIO_SPE_UNAVAIL:
434 case BOOKE_IRQPRIO_SPE_FP_DATA:
435 case BOOKE_IRQPRIO_SPE_FP_ROUND:
Mihai Caraman95d80a22014-08-20 16:36:23 +0300436#endif
437#ifdef CONFIG_ALTIVEC
438 case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
439 case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
440#endif
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600441 case BOOKE_IRQPRIO_AP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600442 allowed = 1;
Alexander Graf79300f82012-02-15 19:12:29 +0000443 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000444 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500445 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000446 case BOOKE_IRQPRIO_WATCHDOG:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600447 case BOOKE_IRQPRIO_CRITICAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000448 case BOOKE_IRQPRIO_DBELL_CRIT:
Alexander Graf666e7252010-07-29 14:47:43 +0200449 allowed = vcpu->arch.shared->msr & MSR_CE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000450 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000451 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000452 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500453 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600454 case BOOKE_IRQPRIO_MACHINE_CHECK:
Alexander Graf666e7252010-07-29 14:47:43 +0200455 allowed = vcpu->arch.shared->msr & MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000456 allowed = allowed && !crit;
Scott Woodd30f6e42011-12-20 15:34:43 +0000457 int_class = INT_CLASS_MC;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500458 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600459 case BOOKE_IRQPRIO_DECREMENTER:
460 case BOOKE_IRQPRIO_FIT:
Scott Wooddfd4d472011-11-17 12:39:59 +0000461 keep_irq = true;
462 /* fall through */
463 case BOOKE_IRQPRIO_EXTERNAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000464 case BOOKE_IRQPRIO_DBELL:
Alexander Graf666e7252010-07-29 14:47:43 +0200465 allowed = vcpu->arch.shared->msr & MSR_EE;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200466 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000467 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000468 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500469 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600470 case BOOKE_IRQPRIO_DEBUG:
Alexander Graf666e7252010-07-29 14:47:43 +0200471 allowed = vcpu->arch.shared->msr & MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000472 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000473 msr_mask = MSR_ME;
Bharat Bhushan9fee7562014-08-06 12:08:51 +0530474 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
475 int_class = INT_CLASS_DBG;
476 else
477 int_class = INT_CLASS_CRIT;
478
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500479 break;
480 }
481
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600482 if (allowed) {
Scott Woodd30f6e42011-12-20 15:34:43 +0000483 switch (int_class) {
484 case INT_CLASS_NONCRIT:
Simon Guo173c5202018-05-07 14:20:08 +0800485 set_guest_srr(vcpu, vcpu->arch.regs.nip,
Scott Woodd30f6e42011-12-20 15:34:43 +0000486 vcpu->arch.shared->msr);
487 break;
488 case INT_CLASS_CRIT:
Simon Guo173c5202018-05-07 14:20:08 +0800489 set_guest_csrr(vcpu, vcpu->arch.regs.nip,
Scott Woodd30f6e42011-12-20 15:34:43 +0000490 vcpu->arch.shared->msr);
491 break;
492 case INT_CLASS_DBG:
Simon Guo173c5202018-05-07 14:20:08 +0800493 set_guest_dsrr(vcpu, vcpu->arch.regs.nip,
Scott Woodd30f6e42011-12-20 15:34:43 +0000494 vcpu->arch.shared->msr);
495 break;
496 case INT_CLASS_MC:
Simon Guo173c5202018-05-07 14:20:08 +0800497 set_guest_mcsrr(vcpu, vcpu->arch.regs.nip,
Scott Woodd30f6e42011-12-20 15:34:43 +0000498 vcpu->arch.shared->msr);
499 break;
500 }
501
Simon Guo173c5202018-05-07 14:20:08 +0800502 vcpu->arch.regs.nip = vcpu->arch.ivpr |
503 vcpu->arch.ivor[priority];
Liu Yudaf5e272010-02-02 19:44:35 +0800504 if (update_esr == true)
Bharat Bhushandc168542014-07-17 17:01:38 +0530505 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
Liu Yudaf5e272010-02-02 19:44:35 +0800506 if (update_dear == true)
Bharat Bhushana5414d42014-07-17 17:01:37 +0530507 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
Scott Wood5df554ad2013-04-12 14:08:46 +0000508 if (update_epr == true) {
509 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
510 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000511 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
512 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
513 kvmppc_mpic_set_epr(vcpu);
514 }
Scott Wood5df554ad2013-04-12 14:08:46 +0000515 }
Mihai Caraman95e90b42012-10-11 06:13:26 +0000516
517 new_msr &= msr_mask;
518#if defined(CONFIG_64BIT)
519 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
520 new_msr |= MSR_CM;
521#endif
522 kvmppc_set_msr(vcpu, new_msr);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600523
Alexander Grafc5335f12010-08-30 14:03:24 +0200524 if (!keep_irq)
525 clear_bit(priority, &vcpu->arch.pending_exceptions);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600526 }
527
Scott Woodd30f6e42011-12-20 15:34:43 +0000528#ifdef CONFIG_KVM_BOOKE_HV
529 /*
530 * If an interrupt is pending but masked, raise a guest doorbell
531 * so that we are notified when the guest enables the relevant
532 * MSR bit.
533 */
534 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
535 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
536 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
537 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
538 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
539 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
540#endif
541
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600542 return allowed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500543}
544
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000545/*
546 * Return the number of jiffies until the next timeout. If the timeout is
547 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
548 * because the larger value can break the timer APIs.
549 */
550static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
551{
552 u64 tb, wdt_tb, wdt_ticks = 0;
553 u64 nr_jiffies = 0;
554 u32 period = TCR_GET_WP(vcpu->arch.tcr);
555
556 wdt_tb = 1ULL << (63 - period);
557 tb = get_tb();
558 /*
559 * The watchdog timeout will hapeen when TB bit corresponding
560 * to watchdog will toggle from 0 to 1.
561 */
562 if (tb & wdt_tb)
563 wdt_ticks = wdt_tb;
564
565 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
566
567 /* Convert timebase ticks to jiffies */
568 nr_jiffies = wdt_ticks;
569
570 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
571 nr_jiffies++;
572
573 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
574}
575
576static void arm_next_watchdog(struct kvm_vcpu *vcpu)
577{
578 unsigned long nr_jiffies;
579 unsigned long flags;
580
581 /*
582 * If TSR_ENW and TSR_WIS are not set then no need to exit to
583 * userspace, so clear the KVM_REQ_WATCHDOG request.
584 */
585 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
Radim Krčmář72875d82017-04-26 22:32:19 +0200586 kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000587
588 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
589 nr_jiffies = watchdog_next_timeout(vcpu);
590 /*
591 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
592 * then do not run the watchdog timer as this can break timer APIs.
593 */
594 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
595 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
596 else
597 del_timer(&vcpu->arch.wdt_timer);
598 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
599}
600
Kees Cook86cb30e2017-10-17 20:21:24 -0700601void kvmppc_watchdog_func(struct timer_list *t)
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000602{
Kees Cook86cb30e2017-10-17 20:21:24 -0700603 struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000604 u32 tsr, new_tsr;
605 int final;
606
607 do {
608 new_tsr = tsr = vcpu->arch.tsr;
609 final = 0;
610
611 /* Time out event */
612 if (tsr & TSR_ENW) {
613 if (tsr & TSR_WIS)
614 final = 1;
615 else
616 new_tsr = tsr | TSR_WIS;
617 } else {
618 new_tsr = tsr | TSR_ENW;
619 }
620 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
621
622 if (new_tsr & TSR_WIS) {
623 smp_wmb();
624 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
625 kvm_vcpu_kick(vcpu);
626 }
627
628 /*
629 * If this is final watchdog expiry and some action is required
630 * then exit to userspace.
631 */
632 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
633 vcpu->arch.watchdog_enabled) {
634 smp_wmb();
635 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
636 kvm_vcpu_kick(vcpu);
637 }
638
639 /*
640 * Stop running the watchdog timer after final expiration to
641 * prevent the host from being flooded with timers if the
642 * guest sets a short period.
643 * Timers will resume when TSR/TCR is updated next time.
644 */
645 if (!final)
646 arm_next_watchdog(vcpu);
647}
648
Scott Wooddfd4d472011-11-17 12:39:59 +0000649static void update_timer_ints(struct kvm_vcpu *vcpu)
650{
651 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
652 kvmppc_core_queue_dec(vcpu);
653 else
654 kvmppc_core_dequeue_dec(vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000655
656 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
657 kvmppc_core_queue_watchdog(vcpu);
658 else
659 kvmppc_core_dequeue_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +0000660}
661
Scott Woodc59a6a32011-11-08 18:23:25 -0600662static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500663{
664 unsigned long *pending = &vcpu->arch.pending_exceptions;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500665 unsigned int priority;
666
Hollis Blanchard9ab80842008-11-05 09:36:22 -0600667 priority = __ffs(*pending);
Alexander Graf8b3a00f2012-02-16 14:12:46 +0000668 while (priority < BOOKE_IRQPRIO_MAX) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600669 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500670 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500671
672 priority = find_next_bit(pending,
673 BITS_PER_BYTE * sizeof(*pending),
674 priority + 1);
675 }
Alexander Graf90bba352010-07-29 14:47:51 +0200676
677 /* Tell the guest about our interrupt status */
Scott Wood29ac26e2011-11-08 18:23:27 -0600678 vcpu->arch.shared->int_pending = !!*pending;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500679}
680
Scott Woodc59a6a32011-11-08 18:23:25 -0600681/* Check pending exceptions and deliver one, if possible. */
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000682int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
Scott Woodc59a6a32011-11-08 18:23:25 -0600683{
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000684 int r = 0;
Scott Woodc59a6a32011-11-08 18:23:25 -0600685 WARN_ON_ONCE(!irqs_disabled());
686
687 kvmppc_core_check_exceptions(vcpu);
688
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +0200689 if (kvm_request_pending(vcpu)) {
Alexander Grafb8c649a2012-12-20 04:52:39 +0000690 /* Exception delivery raised request; start over */
691 return 1;
692 }
693
Scott Woodc59a6a32011-11-08 18:23:25 -0600694 if (vcpu->arch.shared->msr & MSR_WE) {
695 local_irq_enable();
696 kvm_vcpu_block(vcpu);
Radim Krčmář72875d82017-04-26 22:32:19 +0200697 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -0600698 hard_irq_disable();
Scott Woodc59a6a32011-11-08 18:23:25 -0600699
700 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000701 r = 1;
Scott Woodc59a6a32011-11-08 18:23:25 -0600702 };
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000703
704 return r;
705}
706
Alexander Graf7c973a22012-08-13 12:50:35 +0200707int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
Alexander Graf4ffc6352012-08-08 20:31:13 +0200708{
Alexander Graf7c973a22012-08-13 12:50:35 +0200709 int r = 1; /* Indicate we want to get back into the guest */
710
Alexander Graf2d8185d2012-08-10 12:31:12 +0200711 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
712 update_timer_ints(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200713#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Alexander Graf2d8185d2012-08-10 12:31:12 +0200714 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
715 kvmppc_core_flush_tlb(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200716#endif
Alexander Graf7c973a22012-08-13 12:50:35 +0200717
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000718 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
719 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
720 r = 0;
721 }
722
Alexander Graf1c810632013-01-04 18:12:48 +0100723 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
724 vcpu->run->epr.epr = 0;
725 vcpu->arch.epr_needed = true;
726 vcpu->run->exit_reason = KVM_EXIT_EPR;
727 r = 0;
728 }
729
Alexander Graf7c973a22012-08-13 12:50:35 +0200730 return r;
Alexander Graf4ffc6352012-08-08 20:31:13 +0200731}
732
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000733int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
734{
Alexander Graf7ee78852012-08-13 12:44:41 +0200735 int ret, s;
Scott Woodf5f97212013-11-22 15:52:29 -0600736 struct debug_reg debug;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000737
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200738 if (!vcpu->arch.sane) {
739 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
740 return -EINVAL;
741 }
742
Alexander Graf7ee78852012-08-13 12:44:41 +0200743 s = kvmppc_prepare_to_enter(vcpu);
744 if (s <= 0) {
Alexander Graf7ee78852012-08-13 12:44:41 +0200745 ret = s;
Scott Wood1d1ef222011-11-08 16:11:59 -0600746 goto out;
747 }
Scott Wood6c85f522014-01-09 19:18:40 -0600748 /* interrupts now hard-disabled */
Scott Wood1d1ef222011-11-08 16:11:59 -0600749
Scott Wood8fae8452011-12-20 15:34:45 +0000750#ifdef CONFIG_PPC_FPU
751 /* Save userspace FPU state in stack */
752 enable_kernel_fp();
Scott Wood8fae8452011-12-20 15:34:45 +0000753
754 /*
755 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
Mihai Caraman3efc7da2014-08-20 16:36:22 +0300756 * as always using the FPU.
Scott Wood8fae8452011-12-20 15:34:45 +0000757 */
Scott Wood8fae8452011-12-20 15:34:45 +0000758 kvmppc_load_guest_fp(vcpu);
759#endif
760
Mihai Caraman95d80a22014-08-20 16:36:23 +0300761#ifdef CONFIG_ALTIVEC
762 /* Save userspace AltiVec state in stack */
763 if (cpu_has_feature(CPU_FTR_ALTIVEC))
764 enable_kernel_altivec();
765 /*
766 * Since we can't trap on MSR_VEC in GS-mode, we consider the guest
767 * as always using the AltiVec.
768 */
769 kvmppc_load_guest_altivec(vcpu);
770#endif
771
Bharat Bhushance11e482013-07-04 12:27:47 +0530772 /* Switch to guest debug context */
Bharat Bhushan348ba712014-08-06 12:08:55 +0530773 debug = vcpu->arch.dbg_reg;
Scott Woodf5f97212013-11-22 15:52:29 -0600774 switch_booke_debug_regs(&debug);
775 debug = current->thread.debug;
Bharat Bhushan348ba712014-08-06 12:08:55 +0530776 current->thread.debug = vcpu->arch.dbg_reg;
Bharat Bhushance11e482013-07-04 12:27:47 +0530777
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530778 vcpu->arch.pgdir = current->mm->pgd;
Scott Wood5f1c2482013-07-10 17:47:39 -0500779 kvmppc_fix_ee_before_entry();
Scott Woodf8941fbe2013-06-11 11:38:31 -0500780
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000781 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000782
Paolo Bonzini6edaa532016-06-15 15:18:26 +0200783 /* No need for guest_exit. It's done in handle_exit.
Alexander Graf24afa372012-08-12 12:42:30 +0200784 We also get here with interrupts enabled. */
785
Bharat Bhushance11e482013-07-04 12:27:47 +0530786 /* Switch back to user space debug context */
Scott Woodf5f97212013-11-22 15:52:29 -0600787 switch_booke_debug_regs(&debug);
788 current->thread.debug = debug;
Bharat Bhushance11e482013-07-04 12:27:47 +0530789
Scott Wood8fae8452011-12-20 15:34:45 +0000790#ifdef CONFIG_PPC_FPU
791 kvmppc_save_guest_fp(vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000792#endif
793
Mihai Caraman95d80a22014-08-20 16:36:23 +0300794#ifdef CONFIG_ALTIVEC
795 kvmppc_save_guest_altivec(vcpu);
796#endif
797
Scott Wood1d1ef222011-11-08 16:11:59 -0600798out:
Alexander Grafd69c6432012-08-08 20:44:20 +0200799 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000800 return ret;
801}
802
Scott Woodd30f6e42011-12-20 15:34:43 +0000803static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
804{
805 enum emulation_result er;
806
807 er = kvmppc_emulate_instruction(run, vcpu);
808 switch (er) {
809 case EMULATE_DONE:
810 /* don't overwrite subtypes, just account kvm_stats */
811 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
812 /* Future optimization: only reload non-volatiles if
813 * they were actually modified by emulation. */
814 return RESUME_GUEST_NV;
815
Mihai Caraman51f04722014-07-23 19:06:21 +0300816 case EMULATE_AGAIN:
817 return RESUME_GUEST;
818
Scott Woodd30f6e42011-12-20 15:34:43 +0000819 case EMULATE_FAIL:
Scott Woodd30f6e42011-12-20 15:34:43 +0000820 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
Simon Guo173c5202018-05-07 14:20:08 +0800821 __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
Scott Woodd30f6e42011-12-20 15:34:43 +0000822 /* For debugging, encode the failing instruction and
823 * report it to userspace. */
824 run->hw.hardware_exit_reason = ~0ULL << 32;
825 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
Alexander Grafd1ff5492012-02-16 13:24:03 +0000826 kvmppc_core_queue_program(vcpu, ESR_PIL);
Scott Woodd30f6e42011-12-20 15:34:43 +0000827 return RESUME_HOST;
828
Bharat Bhushan9b4f5302013-04-08 00:32:15 +0000829 case EMULATE_EXIT_USER:
830 return RESUME_HOST;
831
Scott Woodd30f6e42011-12-20 15:34:43 +0000832 default:
833 BUG();
834 }
835}
836
Bharat Bhushance11e482013-07-04 12:27:47 +0530837static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
838{
Bharat Bhushan348ba712014-08-06 12:08:55 +0530839 struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
Bharat Bhushance11e482013-07-04 12:27:47 +0530840 u32 dbsr = vcpu->arch.dbsr;
841
Bharat Bhushan2f699a52014-08-13 14:39:44 +0530842 if (vcpu->guest_debug == 0) {
843 /*
844 * Debug resources belong to Guest.
845 * Imprecise debug event is not injected
846 */
847 if (dbsr & DBSR_IDE) {
848 dbsr &= ~DBSR_IDE;
849 if (!dbsr)
850 return RESUME_GUEST;
851 }
852
853 if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
854 (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
855 kvmppc_core_queue_debug(vcpu);
856
857 /* Inject a program interrupt if trap debug is not allowed */
858 if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
859 kvmppc_core_queue_program(vcpu, ESR_PTR);
860
861 return RESUME_GUEST;
862 }
863
864 /*
865 * Debug resource owned by userspace.
866 * Clear guest dbsr (vcpu->arch.dbsr)
867 */
Bharat Bhushan21909912014-08-06 12:08:54 +0530868 vcpu->arch.dbsr = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +0530869 run->debug.arch.status = 0;
Simon Guo173c5202018-05-07 14:20:08 +0800870 run->debug.arch.address = vcpu->arch.regs.nip;
Bharat Bhushance11e482013-07-04 12:27:47 +0530871
872 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
873 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
874 } else {
875 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
876 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
877 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
878 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
879 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
880 run->debug.arch.address = dbg_reg->dac1;
881 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
882 run->debug.arch.address = dbg_reg->dac2;
883 }
884
885 return RESUME_HOST;
886}
887
Alexander Graf4e642cc2012-02-20 23:57:26 +0100888static void kvmppc_fill_pt_regs(struct pt_regs *regs)
889{
890 ulong r1, ip, msr, lr;
891
892 asm("mr %0, 1" : "=r"(r1));
893 asm("mflr %0" : "=r"(lr));
894 asm("mfmsr %0" : "=r"(msr));
895 asm("bl 1f; 1: mflr %0" : "=r"(ip));
896
897 memset(regs, 0, sizeof(*regs));
898 regs->gpr[1] = r1;
899 regs->nip = ip;
900 regs->msr = msr;
901 regs->link = lr;
902}
903
Bharat Bhushan6328e592012-06-20 05:56:53 +0000904/*
905 * For interrupts needed to be handled by host interrupt handlers,
906 * corresponding host handler are called from here in similar way
907 * (but not exact) as they are called from low level handler
908 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
909 */
Alexander Graf4e642cc2012-02-20 23:57:26 +0100910static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
911 unsigned int exit_nr)
912{
913 struct pt_regs regs;
914
915 switch (exit_nr) {
916 case BOOKE_INTERRUPT_EXTERNAL:
917 kvmppc_fill_pt_regs(&regs);
918 do_IRQ(&regs);
919 break;
920 case BOOKE_INTERRUPT_DECREMENTER:
921 kvmppc_fill_pt_regs(&regs);
922 timer_interrupt(&regs);
923 break;
Tiejun Chen5f17ce82013-05-13 10:00:45 +0800924#if defined(CONFIG_PPC_DOORBELL)
Alexander Graf4e642cc2012-02-20 23:57:26 +0100925 case BOOKE_INTERRUPT_DOORBELL:
926 kvmppc_fill_pt_regs(&regs);
927 doorbell_exception(&regs);
928 break;
929#endif
930 case BOOKE_INTERRUPT_MACHINE_CHECK:
931 /* FIXME */
932 break;
Alexander Graf7cc1e8e2012-02-22 16:26:34 +0100933 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
934 kvmppc_fill_pt_regs(&regs);
935 performance_monitor_exception(&regs);
936 break;
Bharat Bhushan6328e592012-06-20 05:56:53 +0000937 case BOOKE_INTERRUPT_WATCHDOG:
938 kvmppc_fill_pt_regs(&regs);
939#ifdef CONFIG_BOOKE_WDT
940 WatchdogException(&regs);
941#else
942 unknown_exception(&regs);
943#endif
944 break;
945 case BOOKE_INTERRUPT_CRITICAL:
Tudor Laurentiu845ac982015-05-18 15:44:27 +0300946 kvmppc_fill_pt_regs(&regs);
Bharat Bhushan6328e592012-06-20 05:56:53 +0000947 unknown_exception(&regs);
948 break;
Bharat Bhushance11e482013-07-04 12:27:47 +0530949 case BOOKE_INTERRUPT_DEBUG:
950 /* Save DBSR before preemption is enabled */
951 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
952 kvmppc_clear_dbsr();
953 break;
Alexander Graf4e642cc2012-02-20 23:57:26 +0100954 }
955}
956
Mihai Caramanf5250472014-07-23 19:06:22 +0300957static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
958 enum emulation_result emulated, u32 last_inst)
959{
960 switch (emulated) {
961 case EMULATE_AGAIN:
962 return RESUME_GUEST;
963
964 case EMULATE_FAIL:
965 pr_debug("%s: load instruction from guest address %lx failed\n",
Simon Guo173c5202018-05-07 14:20:08 +0800966 __func__, vcpu->arch.regs.nip);
Mihai Caramanf5250472014-07-23 19:06:22 +0300967 /* For debugging, encode the failing instruction and
968 * report it to userspace. */
969 run->hw.hardware_exit_reason = ~0ULL << 32;
970 run->hw.hardware_exit_reason |= last_inst;
971 kvmppc_core_queue_program(vcpu, ESR_PIL);
972 return RESUME_HOST;
973
974 default:
975 BUG();
976 }
977}
978
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500979/**
980 * kvmppc_handle_exit
981 *
982 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
983 */
984int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
985 unsigned int exit_nr)
986{
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500987 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200988 int s;
Scott Woodf1e89022013-06-06 19:16:31 -0500989 int idx;
Mihai Caramanf5250472014-07-23 19:06:22 +0300990 u32 last_inst = KVM_INST_FETCH_FAILED;
991 enum emulation_result emulated = EMULATE_DONE;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500992
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600993 /* update before a new last_exit_type is rewritten */
994 kvmppc_update_timing_stats(vcpu);
995
Alexander Graf4e642cc2012-02-20 23:57:26 +0100996 /* restart interrupts if they were meant for the host */
997 kvmppc_restart_interrupt(vcpu, exit_nr);
Scott Woodd30f6e42011-12-20 15:34:43 +0000998
Mihai Caramanf5250472014-07-23 19:06:22 +0300999 /*
Adam Buchbinder446957b2016-02-24 10:51:11 -08001000 * get last instruction before being preempted
Mihai Caramanf5250472014-07-23 19:06:22 +03001001 * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA
1002 */
1003 switch (exit_nr) {
1004 case BOOKE_INTERRUPT_DATA_STORAGE:
1005 case BOOKE_INTERRUPT_DTLB_MISS:
1006 case BOOKE_INTERRUPT_HV_PRIV:
Alexander Graf8d0eff62014-09-10 14:37:29 +02001007 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Mihai Caramanf5250472014-07-23 19:06:22 +03001008 break;
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +05301009 case BOOKE_INTERRUPT_PROGRAM:
1010 /* SW breakpoints arrive as illegal instructions on HV */
1011 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Alexander Graf8d0eff62014-09-10 14:37:29 +02001012 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +05301013 break;
Mihai Caramanf5250472014-07-23 19:06:22 +03001014 default:
1015 break;
1016 }
1017
Alexander Graf97c95052012-08-02 15:10:00 +02001018 trace_kvm_exit(exit_nr, vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02001019 guest_exit_irqoff();
Paolo Bonzinie233d542015-04-30 14:39:40 +02001020
1021 local_irq_enable();
Alexander Graf97c95052012-08-02 15:10:00 +02001022
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001023 run->exit_reason = KVM_EXIT_UNKNOWN;
1024 run->ready_for_interrupt_injection = 1;
1025
Mihai Caramanf5250472014-07-23 19:06:22 +03001026 if (emulated != EMULATE_DONE) {
1027 r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst);
1028 goto out;
1029 }
1030
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001031 switch (exit_nr) {
1032 case BOOKE_INTERRUPT_MACHINE_CHECK:
Alexander Grafc35c9d82012-02-20 12:21:18 +01001033 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
1034 kvmppc_dump_vcpu(vcpu);
1035 /* For debugging, send invalid exit reason to user space */
1036 run->hw.hardware_exit_reason = ~1ULL << 32;
1037 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
1038 r = RESUME_HOST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001039 break;
1040
1041 case BOOKE_INTERRUPT_EXTERNAL:
Hollis Blanchard7b701592008-12-02 15:51:58 -06001042 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
Hollis Blanchard1b6766c2008-11-05 09:36:21 -06001043 r = RESUME_GUEST;
1044 break;
1045
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001046 case BOOKE_INTERRUPT_DECREMENTER:
Hollis Blanchard7b701592008-12-02 15:51:58 -06001047 kvmppc_account_exit(vcpu, DEC_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001048 r = RESUME_GUEST;
1049 break;
1050
Bharat Bhushan6328e592012-06-20 05:56:53 +00001051 case BOOKE_INTERRUPT_WATCHDOG:
1052 r = RESUME_GUEST;
1053 break;
1054
Scott Woodd30f6e42011-12-20 15:34:43 +00001055 case BOOKE_INTERRUPT_DOORBELL:
1056 kvmppc_account_exit(vcpu, DBELL_EXITS);
Scott Woodd30f6e42011-12-20 15:34:43 +00001057 r = RESUME_GUEST;
1058 break;
1059
1060 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
1061 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1062
1063 /*
1064 * We are here because there is a pending guest interrupt
1065 * which could not be delivered as MSR_CE or MSR_ME was not
1066 * set. Once we break from here we will retry delivery.
1067 */
1068 r = RESUME_GUEST;
1069 break;
1070
1071 case BOOKE_INTERRUPT_GUEST_DBELL:
1072 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1073
1074 /*
1075 * We are here because there is a pending guest interrupt
1076 * which could not be delivered as MSR_EE was not set. Once
1077 * we break from here we will retry delivery.
1078 */
1079 r = RESUME_GUEST;
1080 break;
1081
Alexander Graf95f2e922012-02-20 22:45:12 +01001082 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
1083 r = RESUME_GUEST;
1084 break;
1085
Scott Woodd30f6e42011-12-20 15:34:43 +00001086 case BOOKE_INTERRUPT_HV_PRIV:
1087 r = emulation_exit(run, vcpu);
1088 break;
1089
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001090 case BOOKE_INTERRUPT_PROGRAM:
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +05301091 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
1092 (last_inst == KVMPPC_INST_SW_BREAKPOINT)) {
1093 /*
1094 * We are here because of an SW breakpoint instr,
1095 * so lets return to host to handle.
1096 */
1097 r = kvmppc_handle_debug(run, vcpu);
1098 run->exit_reason = KVM_EXIT_DEBUG;
1099 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1100 break;
1101 }
1102
Scott Woodd30f6e42011-12-20 15:34:43 +00001103 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
Alexander Graf02685972012-02-20 12:33:22 +01001104 /*
1105 * Program traps generated by user-level software must
1106 * be handled by the guest kernel.
1107 *
1108 * In GS mode, hypervisor privileged instructions trap
1109 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
1110 * actual program interrupts, handled by the guest.
1111 */
Liu Yudaf5e272010-02-02 19:44:35 +08001112 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001113 r = RESUME_GUEST;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001114 kvmppc_account_exit(vcpu, USR_PR_INST);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001115 break;
1116 }
1117
Scott Woodd30f6e42011-12-20 15:34:43 +00001118 r = emulation_exit(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001119 break;
1120
Christian Ehrhardtde368dc2008-04-29 18:18:23 +02001121 case BOOKE_INTERRUPT_FP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001122 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001123 kvmppc_account_exit(vcpu, FP_UNAVAIL);
Christian Ehrhardtde368dc2008-04-29 18:18:23 +02001124 r = RESUME_GUEST;
1125 break;
1126
Scott Wood4cd35f62011-06-14 18:34:31 -05001127#ifdef CONFIG_SPE
1128 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
1129 if (vcpu->arch.shared->msr & MSR_SPE)
1130 kvmppc_vcpu_enable_spe(vcpu);
1131 else
1132 kvmppc_booke_queue_irqprio(vcpu,
1133 BOOKE_IRQPRIO_SPE_UNAVAIL);
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001134 r = RESUME_GUEST;
1135 break;
Scott Wood4cd35f62011-06-14 18:34:31 -05001136 }
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001137
1138 case BOOKE_INTERRUPT_SPE_FP_DATA:
1139 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
1140 r = RESUME_GUEST;
1141 break;
1142
1143 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1144 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
1145 r = RESUME_GUEST;
1146 break;
Mihai Caraman95d80a22014-08-20 16:36:23 +03001147#elif defined(CONFIG_SPE_POSSIBLE)
Scott Wood4cd35f62011-06-14 18:34:31 -05001148 case BOOKE_INTERRUPT_SPE_UNAVAIL:
1149 /*
1150 * Guest wants SPE, but host kernel doesn't support it. Send
1151 * an "unimplemented operation" program check to the guest.
1152 */
1153 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1154 r = RESUME_GUEST;
1155 break;
1156
1157 /*
1158 * These really should never happen without CONFIG_SPE,
1159 * as we should never enable the real MSR[SPE] in the guest.
1160 */
1161 case BOOKE_INTERRUPT_SPE_FP_DATA:
1162 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1163 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
Simon Guo173c5202018-05-07 14:20:08 +08001164 __func__, exit_nr, vcpu->arch.regs.nip);
Scott Wood4cd35f62011-06-14 18:34:31 -05001165 run->hw.hardware_exit_reason = exit_nr;
1166 r = RESUME_HOST;
1167 break;
Mihai Caraman95d80a22014-08-20 16:36:23 +03001168#endif /* CONFIG_SPE_POSSIBLE */
1169
1170/*
1171 * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC,
1172 * see kvmppc_core_check_processor_compat().
1173 */
1174#ifdef CONFIG_ALTIVEC
1175 case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
1176 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
1177 r = RESUME_GUEST;
1178 break;
1179
1180 case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
1181 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
1182 r = RESUME_GUEST;
1183 break;
Scott Wood4cd35f62011-06-14 18:34:31 -05001184#endif
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001185
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001186 case BOOKE_INTERRUPT_DATA_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +08001187 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1188 vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001189 kvmppc_account_exit(vcpu, DSI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001190 r = RESUME_GUEST;
1191 break;
1192
1193 case BOOKE_INTERRUPT_INST_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +08001194 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001195 kvmppc_account_exit(vcpu, ISI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001196 r = RESUME_GUEST;
1197 break;
1198
Alexander Graf011da892013-01-31 14:17:38 +01001199 case BOOKE_INTERRUPT_ALIGNMENT:
1200 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1201 vcpu->arch.fault_esr);
1202 r = RESUME_GUEST;
1203 break;
1204
Scott Woodd30f6e42011-12-20 15:34:43 +00001205#ifdef CONFIG_KVM_BOOKE_HV
1206 case BOOKE_INTERRUPT_HV_SYSCALL:
1207 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1208 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1209 } else {
1210 /*
1211 * hcall from guest userspace -- send privileged
1212 * instruction program check.
1213 */
1214 kvmppc_core_queue_program(vcpu, ESR_PPR);
1215 }
1216
1217 r = RESUME_GUEST;
1218 break;
1219#else
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001220 case BOOKE_INTERRUPT_SYSCALL:
Alexander Graf2a342ed2010-07-29 14:47:48 +02001221 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1222 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1223 /* KVM PV hypercalls */
1224 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1225 r = RESUME_GUEST;
1226 } else {
1227 /* Guest syscalls */
1228 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1229 }
Hollis Blanchard7b701592008-12-02 15:51:58 -06001230 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001231 r = RESUME_GUEST;
1232 break;
Scott Woodd30f6e42011-12-20 15:34:43 +00001233#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001234
1235 case BOOKE_INTERRUPT_DTLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001236 unsigned long eaddr = vcpu->arch.fault_dear;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001237 int gtlb_index;
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001238 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001239 gfn_t gfn;
1240
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001241#ifdef CONFIG_KVM_E500V2
Scott Wooda4cd8b22011-06-14 18:34:41 -05001242 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1243 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1244 kvmppc_map_magic(vcpu);
1245 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1246 r = RESUME_GUEST;
1247
1248 break;
1249 }
1250#endif
1251
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001252 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001253 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001254 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001255 /* The guest didn't have a mapping for it. */
Liu Yudaf5e272010-02-02 19:44:35 +08001256 kvmppc_core_queue_dtlb_miss(vcpu,
1257 vcpu->arch.fault_dear,
1258 vcpu->arch.fault_esr);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001259 kvmppc_mmu_dtlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001260 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001261 r = RESUME_GUEST;
1262 break;
1263 }
1264
Scott Woodf1e89022013-06-06 19:16:31 -05001265 idx = srcu_read_lock(&vcpu->kvm->srcu);
1266
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001267 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001268 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001269
1270 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1271 /* The guest TLB had a mapping, but the shadow TLB
1272 * didn't, and it is RAM. This could be because:
1273 * a) the entry is mapping the host kernel, or
1274 * b) the guest used a large mapping which we're faking
1275 * Either way, we need to satisfy the fault without
1276 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001277 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001278 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001279 r = RESUME_GUEST;
1280 } else {
1281 /* Guest has mapped and accessed a page which is not
1282 * actually RAM. */
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001283 vcpu->arch.paddr_accessed = gpaddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +01001284 vcpu->arch.vaddr_accessed = eaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001285 r = kvmppc_emulate_mmio(run, vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001286 kvmppc_account_exit(vcpu, MMIO_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001287 }
1288
Scott Woodf1e89022013-06-06 19:16:31 -05001289 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001290 break;
1291 }
1292
1293 case BOOKE_INTERRUPT_ITLB_MISS: {
Simon Guo173c5202018-05-07 14:20:08 +08001294 unsigned long eaddr = vcpu->arch.regs.nip;
Hollis Blanchard89168612008-12-02 15:51:53 -06001295 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001296 gfn_t gfn;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001297 int gtlb_index;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001298
1299 r = RESUME_GUEST;
1300
1301 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001302 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001303 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001304 /* The guest didn't have a mapping for it. */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001305 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001306 kvmppc_mmu_itlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001307 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001308 break;
1309 }
1310
Hollis Blanchard7b701592008-12-02 15:51:58 -06001311 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001312
Scott Woodf1e89022013-06-06 19:16:31 -05001313 idx = srcu_read_lock(&vcpu->kvm->srcu);
1314
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001315 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard89168612008-12-02 15:51:53 -06001316 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001317
1318 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1319 /* The guest TLB had a mapping, but the shadow TLB
1320 * didn't. This could be because:
1321 * a) the entry is mapping the host kernel, or
1322 * b) the guest used a large mapping which we're faking
1323 * Either way, we need to satisfy the fault without
1324 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001325 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001326 } else {
1327 /* Guest mapped and leaped at non-RAM! */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001328 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001329 }
1330
Scott Woodf1e89022013-06-06 19:16:31 -05001331 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001332 break;
1333 }
1334
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001335 case BOOKE_INTERRUPT_DEBUG: {
Bharat Bhushance11e482013-07-04 12:27:47 +05301336 r = kvmppc_handle_debug(run, vcpu);
1337 if (r == RESUME_HOST)
1338 run->exit_reason = KVM_EXIT_DEBUG;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001339 kvmppc_account_exit(vcpu, DEBUG_EXITS);
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001340 break;
1341 }
1342
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001343 default:
1344 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1345 BUG();
1346 }
1347
Mihai Caramanf5250472014-07-23 19:06:22 +03001348out:
Alexander Grafa8e4ef82012-02-16 14:07:37 +00001349 /*
1350 * To avoid clobbering exit_reason, only check for signals if we
1351 * aren't already exiting to userspace for some other reason.
1352 */
Alexander Graf03660ba2012-02-28 12:00:41 +01001353 if (!(r & RESUME_HOST)) {
Alexander Graf7ee78852012-08-13 12:44:41 +02001354 s = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001355 if (s <= 0)
Alexander Graf7ee78852012-08-13 12:44:41 +02001356 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
Scott Wood6c85f522014-01-09 19:18:40 -06001357 else {
1358 /* interrupts now hard-disabled */
Scott Wood5f1c2482013-07-10 17:47:39 -05001359 kvmppc_fix_ee_before_entry();
Mihai Caraman3efc7da2014-08-20 16:36:22 +03001360 kvmppc_load_guest_fp(vcpu);
Mihai Caraman95d80a22014-08-20 16:36:23 +03001361 kvmppc_load_guest_altivec(vcpu);
Alexander Graf03660ba2012-02-28 12:00:41 +01001362 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001363 }
1364
1365 return r;
1366}
1367
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001368static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1369{
1370 u32 old_tsr = vcpu->arch.tsr;
1371
1372 vcpu->arch.tsr = new_tsr;
1373
1374 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1375 arm_next_watchdog(vcpu);
1376
1377 update_timer_ints(vcpu);
1378}
1379
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001380/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
1381int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1382{
Hollis Blanchard082decf2010-08-07 10:33:56 -07001383 int i;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001384 int r;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001385
Simon Guo173c5202018-05-07 14:20:08 +08001386 vcpu->arch.regs.nip = 0;
Scott Woodb5904972011-11-08 18:23:30 -06001387 vcpu->arch.shared->pir = vcpu->vcpu_id;
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001388 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
Scott Woodd30f6e42011-12-20 15:34:43 +00001389 kvmppc_set_msr(vcpu, 0);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001390
Scott Woodd30f6e42011-12-20 15:34:43 +00001391#ifndef CONFIG_KVM_BOOKE_HV
Bharat Bhushance11e482013-07-04 12:27:47 +05301392 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001393 vcpu->arch.shadow_pid = 1;
Scott Woodd30f6e42011-12-20 15:34:43 +00001394 vcpu->arch.shared->msr = 0;
1395#endif
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001396
Hollis Blanchard082decf2010-08-07 10:33:56 -07001397 /* Eye-catching numbers so we know if the guest takes an interrupt
1398 * before it's programmed its own IVPR/IVORs. */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001399 vcpu->arch.ivpr = 0x55550000;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001400 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1401 vcpu->arch.ivor[i] = 0x7700 | i * 4;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001402
Hollis Blanchard73e75b42008-12-02 15:51:57 -06001403 kvmppc_init_timing_stats(vcpu);
1404
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001405 r = kvmppc_core_vcpu_setup(vcpu);
1406 kvmppc_sanity_check(vcpu);
1407 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001408}
1409
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001410int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1411{
1412 /* setup watchdog timer once */
1413 spin_lock_init(&vcpu->arch.wdt_lock);
Kees Cook86cb30e2017-10-17 20:21:24 -07001414 timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001415
Bharat Bhushan2f699a52014-08-13 14:39:44 +05301416 /*
1417 * Clear DBSR.MRR to avoid guest debug interrupt as
1418 * this is of host interest
1419 */
1420 mtspr(SPRN_DBSR, DBSR_MRR);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001421 return 0;
1422}
1423
1424void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1425{
1426 del_timer_sync(&vcpu->arch.wdt_timer);
1427}
1428
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001429int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1430{
1431 int i;
1432
Christoffer Dall1fc9b762017-12-04 21:35:26 +01001433 vcpu_load(vcpu);
1434
Simon Guo173c5202018-05-07 14:20:08 +08001435 regs->pc = vcpu->arch.regs.nip;
Alexander Graf992b5b22010-01-08 02:58:02 +01001436 regs->cr = kvmppc_get_cr(vcpu);
Simon Guo173c5202018-05-07 14:20:08 +08001437 regs->ctr = vcpu->arch.regs.ctr;
1438 regs->lr = vcpu->arch.regs.link;
Alexander Graf992b5b22010-01-08 02:58:02 +01001439 regs->xer = kvmppc_get_xer(vcpu);
Alexander Graf666e7252010-07-29 14:47:43 +02001440 regs->msr = vcpu->arch.shared->msr;
Bharat Bhushan31579ee2014-07-17 17:01:36 +05301441 regs->srr0 = kvmppc_get_srr0(vcpu);
1442 regs->srr1 = kvmppc_get_srr1(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001443 regs->pid = vcpu->arch.pid;
Bharat Bhushanc1b8a012014-07-17 17:01:39 +05301444 regs->sprg0 = kvmppc_get_sprg0(vcpu);
1445 regs->sprg1 = kvmppc_get_sprg1(vcpu);
1446 regs->sprg2 = kvmppc_get_sprg2(vcpu);
1447 regs->sprg3 = kvmppc_get_sprg3(vcpu);
1448 regs->sprg4 = kvmppc_get_sprg4(vcpu);
1449 regs->sprg5 = kvmppc_get_sprg5(vcpu);
1450 regs->sprg6 = kvmppc_get_sprg6(vcpu);
1451 regs->sprg7 = kvmppc_get_sprg7(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001452
1453 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001454 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001455
Christoffer Dall1fc9b762017-12-04 21:35:26 +01001456 vcpu_put(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001457 return 0;
1458}
1459
1460int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1461{
1462 int i;
1463
Christoffer Dall875656f2017-12-04 21:35:27 +01001464 vcpu_load(vcpu);
1465
Simon Guo173c5202018-05-07 14:20:08 +08001466 vcpu->arch.regs.nip = regs->pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001467 kvmppc_set_cr(vcpu, regs->cr);
Simon Guo173c5202018-05-07 14:20:08 +08001468 vcpu->arch.regs.ctr = regs->ctr;
1469 vcpu->arch.regs.link = regs->lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001470 kvmppc_set_xer(vcpu, regs->xer);
Hollis Blanchardb8fd68a2008-11-05 09:36:20 -06001471 kvmppc_set_msr(vcpu, regs->msr);
Bharat Bhushan31579ee2014-07-17 17:01:36 +05301472 kvmppc_set_srr0(vcpu, regs->srr0);
1473 kvmppc_set_srr1(vcpu, regs->srr1);
Scott Wood5ce941e2011-04-27 17:24:21 -05001474 kvmppc_set_pid(vcpu, regs->pid);
Bharat Bhushanc1b8a012014-07-17 17:01:39 +05301475 kvmppc_set_sprg0(vcpu, regs->sprg0);
1476 kvmppc_set_sprg1(vcpu, regs->sprg1);
1477 kvmppc_set_sprg2(vcpu, regs->sprg2);
1478 kvmppc_set_sprg3(vcpu, regs->sprg3);
1479 kvmppc_set_sprg4(vcpu, regs->sprg4);
1480 kvmppc_set_sprg5(vcpu, regs->sprg5);
1481 kvmppc_set_sprg6(vcpu, regs->sprg6);
1482 kvmppc_set_sprg7(vcpu, regs->sprg7);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001483
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001484 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1485 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001486
Christoffer Dall875656f2017-12-04 21:35:27 +01001487 vcpu_put(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001488 return 0;
1489}
1490
Scott Wood5ce941e2011-04-27 17:24:21 -05001491static void get_sregs_base(struct kvm_vcpu *vcpu,
1492 struct kvm_sregs *sregs)
1493{
1494 u64 tb = get_tb();
1495
1496 sregs->u.e.features |= KVM_SREGS_E_BASE;
1497
1498 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1499 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1500 sregs->u.e.mcsr = vcpu->arch.mcsr;
Bharat Bhushandc168542014-07-17 17:01:38 +05301501 sregs->u.e.esr = kvmppc_get_esr(vcpu);
Bharat Bhushana5414d42014-07-17 17:01:37 +05301502 sregs->u.e.dear = kvmppc_get_dar(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001503 sregs->u.e.tsr = vcpu->arch.tsr;
1504 sregs->u.e.tcr = vcpu->arch.tcr;
1505 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1506 sregs->u.e.tb = tb;
1507 sregs->u.e.vrsave = vcpu->arch.vrsave;
1508}
1509
1510static int set_sregs_base(struct kvm_vcpu *vcpu,
1511 struct kvm_sregs *sregs)
1512{
1513 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1514 return 0;
1515
1516 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1517 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1518 vcpu->arch.mcsr = sregs->u.e.mcsr;
Bharat Bhushandc168542014-07-17 17:01:38 +05301519 kvmppc_set_esr(vcpu, sregs->u.e.esr);
Bharat Bhushana5414d42014-07-17 17:01:37 +05301520 kvmppc_set_dar(vcpu, sregs->u.e.dear);
Scott Wood5ce941e2011-04-27 17:24:21 -05001521 vcpu->arch.vrsave = sregs->u.e.vrsave;
Scott Wooddfd4d472011-11-17 12:39:59 +00001522 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001523
Scott Wooddfd4d472011-11-17 12:39:59 +00001524 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
Scott Wood5ce941e2011-04-27 17:24:21 -05001525 vcpu->arch.dec = sregs->u.e.dec;
Scott Wooddfd4d472011-11-17 12:39:59 +00001526 kvmppc_emulate_dec(vcpu);
1527 }
Scott Wood5ce941e2011-04-27 17:24:21 -05001528
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001529 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1530 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001531
1532 return 0;
1533}
1534
1535static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1536 struct kvm_sregs *sregs)
1537{
1538 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1539
Scott Wood841741f2011-09-02 17:39:37 -05001540 sregs->u.e.pir = vcpu->vcpu_id;
Scott Wood5ce941e2011-04-27 17:24:21 -05001541 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1542 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1543 sregs->u.e.decar = vcpu->arch.decar;
1544 sregs->u.e.ivpr = vcpu->arch.ivpr;
1545}
1546
1547static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1548 struct kvm_sregs *sregs)
1549{
1550 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1551 return 0;
1552
Scott Wood841741f2011-09-02 17:39:37 -05001553 if (sregs->u.e.pir != vcpu->vcpu_id)
Scott Wood5ce941e2011-04-27 17:24:21 -05001554 return -EINVAL;
1555
1556 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1557 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1558 vcpu->arch.decar = sregs->u.e.decar;
1559 vcpu->arch.ivpr = sregs->u.e.ivpr;
1560
1561 return 0;
1562}
1563
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301564int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
Scott Wood5ce941e2011-04-27 17:24:21 -05001565{
1566 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1567
1568 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1569 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1570 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1571 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1572 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1573 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1574 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1575 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1576 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1577 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1578 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1579 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1580 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1581 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1582 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1583 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301584 return 0;
Scott Wood5ce941e2011-04-27 17:24:21 -05001585}
1586
1587int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1588{
1589 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1590 return 0;
1591
1592 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1593 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1594 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1595 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1596 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1597 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1598 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1599 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1600 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1601 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1602 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1603 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1604 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1605 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1606 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1607 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1608
1609 return 0;
1610}
1611
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001612int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1613 struct kvm_sregs *sregs)
1614{
Christoffer Dallbcdec412017-12-04 21:35:28 +01001615 int ret;
1616
1617 vcpu_load(vcpu);
1618
Scott Wood5ce941e2011-04-27 17:24:21 -05001619 sregs->pvr = vcpu->arch.pvr;
1620
1621 get_sregs_base(vcpu, sregs);
1622 get_sregs_arch206(vcpu, sregs);
Christoffer Dallbcdec412017-12-04 21:35:28 +01001623 ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
1624
1625 vcpu_put(vcpu);
1626 return ret;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001627}
1628
1629int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1630 struct kvm_sregs *sregs)
1631{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001632 int ret = -EINVAL;
Scott Wood5ce941e2011-04-27 17:24:21 -05001633
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001634 vcpu_load(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001635 if (vcpu->arch.pvr != sregs->pvr)
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001636 goto out;
Scott Wood5ce941e2011-04-27 17:24:21 -05001637
1638 ret = set_sregs_base(vcpu, sregs);
1639 if (ret < 0)
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001640 goto out;
Scott Wood5ce941e2011-04-27 17:24:21 -05001641
1642 ret = set_sregs_arch206(vcpu, sregs);
1643 if (ret < 0)
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001644 goto out;
Scott Wood5ce941e2011-04-27 17:24:21 -05001645
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001646 ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
1647
1648out:
1649 vcpu_put(vcpu);
1650 return ret;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001651}
1652
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001653int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
1654 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001655{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001656 int r = 0;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001657
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001658 switch (id) {
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001659 case KVM_REG_PPC_IAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001660 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001661 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301662 case KVM_REG_PPC_IAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001663 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301664 break;
1665#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1666 case KVM_REG_PPC_IAC3:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001667 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301668 break;
1669 case KVM_REG_PPC_IAC4:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001670 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301671 break;
1672#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001673 case KVM_REG_PPC_DAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001674 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301675 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001676 case KVM_REG_PPC_DAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001677 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
Bharat Bhushan2c509672014-08-06 12:08:56 +05301678 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001679 case KVM_REG_PPC_EPR: {
Bharat Bhushan34f754b2014-07-17 17:01:40 +05301680 u32 epr = kvmppc_get_epr(vcpu);
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001681 *val = get_reg_val(id, epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001682 break;
1683 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001684#if defined(CONFIG_64BIT)
1685 case KVM_REG_PPC_EPCR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001686 *val = get_reg_val(id, vcpu->arch.epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001687 break;
1688#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001689 case KVM_REG_PPC_TCR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001690 *val = get_reg_val(id, vcpu->arch.tcr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001691 break;
1692 case KVM_REG_PPC_TSR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001693 *val = get_reg_val(id, vcpu->arch.tsr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001694 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001695 case KVM_REG_PPC_DEBUG_INST:
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +05301696 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001697 break;
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001698 case KVM_REG_PPC_VRSAVE:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001699 *val = get_reg_val(id, vcpu->arch.vrsave);
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001700 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001701 default:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001702 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001703 break;
1704 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001705
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001706 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001707}
1708
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001709int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
1710 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001711{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001712 int r = 0;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001713
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001714 switch (id) {
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001715 case KVM_REG_PPC_IAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001716 vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001717 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301718 case KVM_REG_PPC_IAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001719 vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301720 break;
1721#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1722 case KVM_REG_PPC_IAC3:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001723 vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301724 break;
1725 case KVM_REG_PPC_IAC4:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001726 vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301727 break;
1728#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001729 case KVM_REG_PPC_DAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001730 vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301731 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001732 case KVM_REG_PPC_DAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001733 vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
Bharat Bhushan2c509672014-08-06 12:08:56 +05301734 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001735 case KVM_REG_PPC_EPR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001736 u32 new_epr = set_reg_val(id, *val);
Mihai Caraman35b299e2013-04-11 00:03:07 +00001737 kvmppc_set_epr(vcpu, new_epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001738 break;
1739 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001740#if defined(CONFIG_64BIT)
1741 case KVM_REG_PPC_EPCR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001742 u32 new_epcr = set_reg_val(id, *val);
Mihai Caraman35b299e2013-04-11 00:03:07 +00001743 kvmppc_set_epcr(vcpu, new_epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001744 break;
1745 }
1746#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001747 case KVM_REG_PPC_OR_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001748 u32 tsr_bits = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001749 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1750 break;
1751 }
1752 case KVM_REG_PPC_CLEAR_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001753 u32 tsr_bits = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001754 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1755 break;
1756 }
1757 case KVM_REG_PPC_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001758 u32 tsr = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001759 kvmppc_set_tsr(vcpu, tsr);
1760 break;
1761 }
1762 case KVM_REG_PPC_TCR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001763 u32 tcr = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001764 kvmppc_set_tcr(vcpu, tcr);
1765 break;
1766 }
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001767 case KVM_REG_PPC_VRSAVE:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001768 vcpu->arch.vrsave = set_reg_val(id, *val);
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001769 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001770 default:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001771 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001772 break;
1773 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001774
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001775 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001776}
1777
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001778int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1779{
1780 return -ENOTSUPP;
1781}
1782
1783int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1784{
1785 return -ENOTSUPP;
1786}
1787
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001788int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1789 struct kvm_translation *tr)
1790{
Avi Kivity98001d82010-05-13 11:05:49 +03001791 int r;
1792
Christoffer Dall1da5b612017-12-04 21:35:32 +01001793 vcpu_load(vcpu);
Avi Kivity98001d82010-05-13 11:05:49 +03001794 r = kvmppc_core_vcpu_translate(vcpu, tr);
Christoffer Dall1da5b612017-12-04 21:35:32 +01001795 vcpu_put(vcpu);
Avi Kivity98001d82010-05-13 11:05:49 +03001796 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001797}
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001798
Alexander Graf4e755752009-10-30 05:47:01 +00001799int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1800{
1801 return -ENOTSUPP;
1802}
1803
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301804void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001805 struct kvm_memory_slot *dont)
1806{
1807}
1808
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301809int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001810 unsigned long npages)
1811{
1812 return 0;
1813}
1814
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001815int kvmppc_core_prepare_memory_region(struct kvm *kvm,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001816 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001817 const struct kvm_userspace_memory_region *mem)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001818{
1819 return 0;
1820}
1821
1822void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001823 const struct kvm_userspace_memory_region *mem,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02001824 const struct kvm_memory_slot *old,
Bharata B Raof032b732018-12-12 15:15:30 +11001825 const struct kvm_memory_slot *new,
1826 enum kvm_mr_change change)
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001827{
1828}
1829
1830void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001831{
1832}
1833
Mihai Caraman38f98822012-10-11 06:13:27 +00001834void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1835{
1836#if defined(CONFIG_64BIT)
1837 vcpu->arch.epcr = new_epcr;
1838#ifdef CONFIG_KVM_BOOKE_HV
1839 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1840 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1841 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1842#endif
1843#endif
1844}
1845
Scott Wooddfd4d472011-11-17 12:39:59 +00001846void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1847{
1848 vcpu->arch.tcr = new_tcr;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001849 arm_next_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +00001850 update_timer_ints(vcpu);
1851}
1852
1853void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1854{
1855 set_bits(tsr_bits, &vcpu->arch.tsr);
1856 smp_wmb();
1857 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1858 kvm_vcpu_kick(vcpu);
1859}
1860
1861void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1862{
1863 clear_bits(tsr_bits, &vcpu->arch.tsr);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001864
1865 /*
1866 * We may have stopped the watchdog due to
1867 * being stuck on final expiration.
1868 */
1869 if (tsr_bits & (TSR_ENW | TSR_WIS))
1870 arm_next_watchdog(vcpu);
1871
Scott Wooddfd4d472011-11-17 12:39:59 +00001872 update_timer_ints(vcpu);
1873}
1874
Mihai Caramand02d4d12014-09-01 17:19:56 +03001875void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
Scott Wooddfd4d472011-11-17 12:39:59 +00001876{
Bharat Bhushan21bd0002012-05-20 23:21:23 +00001877 if (vcpu->arch.tcr & TCR_ARE) {
1878 vcpu->arch.dec = vcpu->arch.decar;
1879 kvmppc_emulate_dec(vcpu);
1880 }
1881
Scott Wooddfd4d472011-11-17 12:39:59 +00001882 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1883}
1884
Bharat Bhushance11e482013-07-04 12:27:47 +05301885static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1886 uint64_t addr, int index)
1887{
1888 switch (index) {
1889 case 0:
1890 dbg_reg->dbcr0 |= DBCR0_IAC1;
1891 dbg_reg->iac1 = addr;
1892 break;
1893 case 1:
1894 dbg_reg->dbcr0 |= DBCR0_IAC2;
1895 dbg_reg->iac2 = addr;
1896 break;
1897#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1898 case 2:
1899 dbg_reg->dbcr0 |= DBCR0_IAC3;
1900 dbg_reg->iac3 = addr;
1901 break;
1902 case 3:
1903 dbg_reg->dbcr0 |= DBCR0_IAC4;
1904 dbg_reg->iac4 = addr;
1905 break;
1906#endif
1907 default:
1908 return -EINVAL;
1909 }
1910
1911 dbg_reg->dbcr0 |= DBCR0_IDM;
1912 return 0;
1913}
1914
1915static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1916 int type, int index)
1917{
1918 switch (index) {
1919 case 0:
1920 if (type & KVMPPC_DEBUG_WATCH_READ)
1921 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1922 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1923 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1924 dbg_reg->dac1 = addr;
1925 break;
1926 case 1:
1927 if (type & KVMPPC_DEBUG_WATCH_READ)
1928 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1929 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1930 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1931 dbg_reg->dac2 = addr;
1932 break;
1933 default:
1934 return -EINVAL;
1935 }
1936
1937 dbg_reg->dbcr0 |= DBCR0_IDM;
1938 return 0;
1939}
1940void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1941{
1942 /* XXX: Add similar MSR protection for BookE-PR */
1943#ifdef CONFIG_KVM_BOOKE_HV
1944 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1945 if (set) {
1946 if (prot_bitmap & MSR_UCLE)
1947 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1948 if (prot_bitmap & MSR_DE)
1949 vcpu->arch.shadow_msrp |= MSRP_DEP;
1950 if (prot_bitmap & MSR_PMM)
1951 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1952 } else {
1953 if (prot_bitmap & MSR_UCLE)
1954 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1955 if (prot_bitmap & MSR_DE)
1956 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1957 if (prot_bitmap & MSR_PMM)
1958 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1959 }
1960#endif
1961}
1962
Alexander Graf7d15c06f2014-06-20 13:52:36 +02001963int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
1964 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
1965{
1966 int gtlb_index;
1967 gpa_t gpaddr;
1968
1969#ifdef CONFIG_KVM_E500V2
1970 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1971 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1972 pte->eaddr = eaddr;
1973 pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
1974 (eaddr & ~PAGE_MASK);
1975 pte->vpage = eaddr >> PAGE_SHIFT;
1976 pte->may_read = true;
1977 pte->may_write = true;
1978 pte->may_execute = true;
1979
1980 return 0;
1981 }
1982#endif
1983
1984 /* Check the guest TLB. */
1985 switch (xlid) {
1986 case XLATE_INST:
1987 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1988 break;
1989 case XLATE_DATA:
1990 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1991 break;
1992 default:
1993 BUG();
1994 }
1995
1996 /* Do we have a TLB entry at all? */
1997 if (gtlb_index < 0)
1998 return -ENOENT;
1999
2000 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
2001
2002 pte->eaddr = eaddr;
2003 pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK);
2004 pte->vpage = eaddr >> PAGE_SHIFT;
2005
2006 /* XXX read permissions from the guest TLB */
2007 pte->may_read = true;
2008 pte->may_write = true;
2009 pte->may_execute = true;
2010
2011 return 0;
2012}
2013
Bharat Bhushance11e482013-07-04 12:27:47 +05302014int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2015 struct kvm_guest_debug *dbg)
2016{
2017 struct debug_reg *dbg_reg;
2018 int n, b = 0, w = 0;
Christoffer Dall66b56562017-12-04 21:35:33 +01002019 int ret = 0;
2020
2021 vcpu_load(vcpu);
Bharat Bhushance11e482013-07-04 12:27:47 +05302022
2023 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
Bharat Bhushan348ba712014-08-06 12:08:55 +05302024 vcpu->arch.dbg_reg.dbcr0 = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +05302025 vcpu->guest_debug = 0;
2026 kvm_guest_protect_msr(vcpu, MSR_DE, false);
Christoffer Dall66b56562017-12-04 21:35:33 +01002027 goto out;
Bharat Bhushance11e482013-07-04 12:27:47 +05302028 }
2029
2030 kvm_guest_protect_msr(vcpu, MSR_DE, true);
2031 vcpu->guest_debug = dbg->control;
Bharat Bhushan348ba712014-08-06 12:08:55 +05302032 vcpu->arch.dbg_reg.dbcr0 = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +05302033
2034 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
Bharat Bhushan348ba712014-08-06 12:08:55 +05302035 vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
Bharat Bhushance11e482013-07-04 12:27:47 +05302036
2037 /* Code below handles only HW breakpoints */
Bharat Bhushan348ba712014-08-06 12:08:55 +05302038 dbg_reg = &(vcpu->arch.dbg_reg);
Bharat Bhushance11e482013-07-04 12:27:47 +05302039
2040#ifdef CONFIG_KVM_BOOKE_HV
2041 /*
2042 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
2043 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
2044 */
2045 dbg_reg->dbcr1 = 0;
2046 dbg_reg->dbcr2 = 0;
2047#else
2048 /*
2049 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
2050 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
2051 * is set.
2052 */
2053 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
2054 DBCR1_IAC4US;
2055 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
2056#endif
2057
2058 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
Christoffer Dall66b56562017-12-04 21:35:33 +01002059 goto out;
Bharat Bhushance11e482013-07-04 12:27:47 +05302060
Christoffer Dall66b56562017-12-04 21:35:33 +01002061 ret = -EINVAL;
Bharat Bhushance11e482013-07-04 12:27:47 +05302062 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
2063 uint64_t addr = dbg->arch.bp[n].addr;
2064 uint32_t type = dbg->arch.bp[n].type;
2065
2066 if (type == KVMPPC_DEBUG_NONE)
2067 continue;
2068
Dan Carpenterac0e89b2016-07-14 13:15:46 +03002069 if (type & ~(KVMPPC_DEBUG_WATCH_READ |
Bharat Bhushance11e482013-07-04 12:27:47 +05302070 KVMPPC_DEBUG_WATCH_WRITE |
2071 KVMPPC_DEBUG_BREAKPOINT))
Christoffer Dall66b56562017-12-04 21:35:33 +01002072 goto out;
Bharat Bhushance11e482013-07-04 12:27:47 +05302073
2074 if (type & KVMPPC_DEBUG_BREAKPOINT) {
2075 /* Setting H/W breakpoint */
2076 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
Christoffer Dall66b56562017-12-04 21:35:33 +01002077 goto out;
Bharat Bhushance11e482013-07-04 12:27:47 +05302078 } else {
2079 /* Setting H/W watchpoint */
2080 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
2081 type, w++))
Christoffer Dall66b56562017-12-04 21:35:33 +01002082 goto out;
Bharat Bhushance11e482013-07-04 12:27:47 +05302083 }
2084 }
2085
Christoffer Dall66b56562017-12-04 21:35:33 +01002086 ret = 0;
2087out:
2088 vcpu_put(vcpu);
2089 return ret;
Bharat Bhushance11e482013-07-04 12:27:47 +05302090}
2091
Scott Wood94fa9d92011-12-20 15:34:22 +00002092void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2093{
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00002094 vcpu->cpu = smp_processor_id();
Scott Woodd30f6e42011-12-20 15:34:43 +00002095 current->thread.kvm_vcpu = vcpu;
Scott Wood94fa9d92011-12-20 15:34:22 +00002096}
2097
2098void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
2099{
Scott Woodd30f6e42011-12-20 15:34:43 +00002100 current->thread.kvm_vcpu = NULL;
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00002101 vcpu->cpu = -1;
Bharat Bhushance11e482013-07-04 12:27:47 +05302102
2103 /* Clear pending debug event in DBSR */
2104 kvmppc_clear_dbsr();
Scott Wood94fa9d92011-12-20 15:34:22 +00002105}
2106
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302107void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
2108{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302109 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302110}
2111
2112int kvmppc_core_init_vm(struct kvm *kvm)
2113{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302114 return kvm->arch.kvm_ops->init_vm(kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302115}
2116
2117struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
2118{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302119 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302120}
2121
2122void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
2123{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302124 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302125}
2126
2127void kvmppc_core_destroy_vm(struct kvm *kvm)
2128{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302129 kvm->arch.kvm_ops->destroy_vm(kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302130}
2131
2132void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2133{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302134 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302135}
2136
2137void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
2138{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302139 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002140}
2141
2142int __init kvmppc_booke_init(void)
2143{
Scott Woodd30f6e42011-12-20 15:34:43 +00002144#ifndef CONFIG_KVM_BOOKE_HV
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002145 unsigned long ivor[16];
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002146 unsigned long *handler = kvmppc_booke_handler_addr;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002147 unsigned long max_ivor = 0;
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002148 unsigned long handler_len;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002149 int i;
2150
2151 /* We install our own exception handlers by hijacking IVPR. IVPR must
2152 * be 16-bit aligned, so we need a 64KB allocation. */
2153 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
2154 VCPU_SIZE_ORDER);
2155 if (!kvmppc_booke_handlers)
2156 return -ENOMEM;
2157
2158 /* XXX make sure our handlers are smaller than Linux's */
2159
2160 /* Copy our interrupt handlers to match host IVORs. That way we don't
2161 * have to swap the IVORs on every guest/host transition. */
2162 ivor[0] = mfspr(SPRN_IVOR0);
2163 ivor[1] = mfspr(SPRN_IVOR1);
2164 ivor[2] = mfspr(SPRN_IVOR2);
2165 ivor[3] = mfspr(SPRN_IVOR3);
2166 ivor[4] = mfspr(SPRN_IVOR4);
2167 ivor[5] = mfspr(SPRN_IVOR5);
2168 ivor[6] = mfspr(SPRN_IVOR6);
2169 ivor[7] = mfspr(SPRN_IVOR7);
2170 ivor[8] = mfspr(SPRN_IVOR8);
2171 ivor[9] = mfspr(SPRN_IVOR9);
2172 ivor[10] = mfspr(SPRN_IVOR10);
2173 ivor[11] = mfspr(SPRN_IVOR11);
2174 ivor[12] = mfspr(SPRN_IVOR12);
2175 ivor[13] = mfspr(SPRN_IVOR13);
2176 ivor[14] = mfspr(SPRN_IVOR14);
2177 ivor[15] = mfspr(SPRN_IVOR15);
2178
2179 for (i = 0; i < 16; i++) {
2180 if (ivor[i] > max_ivor)
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002181 max_ivor = i;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002182
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002183 handler_len = handler[i + 1] - handler[i];
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002184 memcpy((void *)kvmppc_booke_handlers + ivor[i],
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002185 (void *)handler[i], handler_len);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002186 }
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002187
2188 handler_len = handler[max_ivor + 1] - handler[max_ivor];
2189 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
2190 ivor[max_ivor] + handler_len);
Scott Woodd30f6e42011-12-20 15:34:43 +00002191#endif /* !BOOKE_HV */
Hollis Blancharddb93f572008-11-05 09:36:18 -06002192 return 0;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002193}
2194
Hollis Blancharddb93f572008-11-05 09:36:18 -06002195void __exit kvmppc_booke_exit(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002196{
2197 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
2198 kvm_exit();
2199}