blob: e5f8ba793c31192a571f2b703ad648233b8af376 [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
Scott Wood4cd35f62011-06-14 18:34:31 -050016 * Copyright 2010-2011 Freescale Semiconductor, Inc.
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050017 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Scott Woodd30f6e42011-12-20 15:34:43 +000020 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050022 */
23
24#include <linux/errno.h>
25#include <linux/err.h>
26#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/gfp.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050028#include <linux/module.h>
29#include <linux/vmalloc.h>
30#include <linux/fs.h>
Hollis Blanchard7924bd42008-12-02 15:51:55 -060031
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050032#include <asm/cputable.h>
33#include <asm/uaccess.h>
34#include <asm/kvm_ppc.h>
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060035#include <asm/cacheflush.h>
Scott Woodd30f6e42011-12-20 15:34:43 +000036#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
Mihai Caramanb50df192012-10-11 06:13:19 +000039#include <asm/time.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050040
Scott Woodd30f6e42011-12-20 15:34:43 +000041#include "timing.h"
Hollis Blanchard75f74f02008-11-05 09:36:16 -060042#include "booke.h"
Alexander Graf97c95052012-08-02 15:10:00 +020043#include "trace.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050044
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060045unsigned long kvmppc_booke_handlers;
46
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050047#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
48#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
49
50struct kvm_stats_debugfs_item debugfs_entries[] = {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050051 { "mmio", VCPU_STAT(mmio_exits) },
52 { "dcr", VCPU_STAT(dcr_exits) },
53 { "sig", VCPU_STAT(signal_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050054 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
55 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
56 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
57 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
58 { "sysc", VCPU_STAT(syscall_exits) },
59 { "isi", VCPU_STAT(isi_exits) },
60 { "dsi", VCPU_STAT(dsi_exits) },
61 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
62 { "dec", VCPU_STAT(dec_exits) },
63 { "ext_intr", VCPU_STAT(ext_intr_exits) },
Hollis Blanchard45c5eb62008-04-25 17:55:49 -050064 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Scott Woodd30f6e42011-12-20 15:34:43 +000065 { "doorbell", VCPU_STAT(dbell_exits) },
66 { "guest doorbell", VCPU_STAT(gdbell_exits) },
Alexander Grafcf1c5ca2012-08-01 12:56:51 +020067 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050068 { NULL }
69};
70
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050071/* TODO: use vcpu_printf() */
72void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
73{
74 int i;
75
Alexander Graf666e7252010-07-29 14:47:43 +020076 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060077 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
Alexander Grafde7906c2010-07-29 14:47:46 +020078 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
79 vcpu->arch.shared->srr1);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050080
81 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
82
83 for (i = 0; i < 32; i += 4) {
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060084 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
Alexander Graf8e5b26b2010-01-08 02:58:01 +010085 kvmppc_get_gpr(vcpu, i),
86 kvmppc_get_gpr(vcpu, i+1),
87 kvmppc_get_gpr(vcpu, i+2),
88 kvmppc_get_gpr(vcpu, i+3));
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050089 }
90}
91
Scott Wood4cd35f62011-06-14 18:34:31 -050092#ifdef CONFIG_SPE
93void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
94{
95 preempt_disable();
96 enable_kernel_spe();
97 kvmppc_save_guest_spe(vcpu);
98 vcpu->arch.shadow_msr &= ~MSR_SPE;
99 preempt_enable();
100}
101
102static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
103{
104 preempt_disable();
105 enable_kernel_spe();
106 kvmppc_load_guest_spe(vcpu);
107 vcpu->arch.shadow_msr |= MSR_SPE;
108 preempt_enable();
109}
110
111static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
112{
113 if (vcpu->arch.shared->msr & MSR_SPE) {
114 if (!(vcpu->arch.shadow_msr & MSR_SPE))
115 kvmppc_vcpu_enable_spe(vcpu);
116 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
117 kvmppc_vcpu_disable_spe(vcpu);
118 }
119}
120#else
121static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
122{
123}
124#endif
125
Alexander Graf7a08c272012-08-16 13:10:16 +0200126static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
127{
128#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
129 /* We always treat the FP bit as enabled from the host
130 perspective, so only need to adjust the shadow MSR */
131 vcpu->arch.shadow_msr &= ~MSR_FP;
132 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
133#endif
134}
135
Bharat Bhushance11e482013-07-04 12:27:47 +0530136static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
137{
138 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
139#ifndef CONFIG_KVM_BOOKE_HV
140 vcpu->arch.shadow_msr &= ~MSR_DE;
141 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
142#endif
143
144 /* Force enable debug interrupts when user space wants to debug */
145 if (vcpu->guest_debug) {
146#ifdef CONFIG_KVM_BOOKE_HV
147 /*
148 * Since there is no shadow MSR, sync MSR_DE into the guest
149 * visible MSR.
150 */
151 vcpu->arch.shared->msr |= MSR_DE;
152#else
153 vcpu->arch.shadow_msr |= MSR_DE;
154 vcpu->arch.shared->msr &= ~MSR_DE;
155#endif
156 }
157}
158
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500159/*
160 * Helper function for "full" MSR writes. No need to call this if only
161 * EE/CE/ME/DE/RI are changing.
162 */
Scott Wood4cd35f62011-06-14 18:34:31 -0500163void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
164{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500165 u32 old_msr = vcpu->arch.shared->msr;
Scott Wood4cd35f62011-06-14 18:34:31 -0500166
Scott Woodd30f6e42011-12-20 15:34:43 +0000167#ifdef CONFIG_KVM_BOOKE_HV
168 new_msr |= MSR_GS;
169#endif
170
Scott Wood4cd35f62011-06-14 18:34:31 -0500171 vcpu->arch.shared->msr = new_msr;
172
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500173 kvmppc_mmu_msr_notify(vcpu, old_msr);
Scott Wood4cd35f62011-06-14 18:34:31 -0500174 kvmppc_vcpu_sync_spe(vcpu);
Alexander Graf7a08c272012-08-16 13:10:16 +0200175 kvmppc_vcpu_sync_fpu(vcpu);
Bharat Bhushance11e482013-07-04 12:27:47 +0530176 kvmppc_vcpu_sync_debug(vcpu);
Scott Wood4cd35f62011-06-14 18:34:31 -0500177}
178
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600179static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
180 unsigned int priority)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600181{
Alexander Graf63460462012-08-08 00:44:52 +0200182 trace_kvm_booke_queue_irqprio(vcpu, priority);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600183 set_bit(priority, &vcpu->arch.pending_exceptions);
184}
185
Liu Yudaf5e272010-02-02 19:44:35 +0800186static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
187 ulong dear_flags, ulong esr_flags)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600188{
Liu Yudaf5e272010-02-02 19:44:35 +0800189 vcpu->arch.queued_dear = dear_flags;
190 vcpu->arch.queued_esr = esr_flags;
191 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
192}
193
194static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
195 ulong dear_flags, ulong esr_flags)
196{
197 vcpu->arch.queued_dear = dear_flags;
198 vcpu->arch.queued_esr = esr_flags;
199 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
200}
201
202static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
203 ulong esr_flags)
204{
205 vcpu->arch.queued_esr = esr_flags;
206 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
207}
208
Alexander Graf011da892013-01-31 14:17:38 +0100209static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
210 ulong esr_flags)
211{
212 vcpu->arch.queued_dear = dear_flags;
213 vcpu->arch.queued_esr = esr_flags;
214 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
215}
216
Liu Yudaf5e272010-02-02 19:44:35 +0800217void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
218{
219 vcpu->arch.queued_esr = esr_flags;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600220 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600221}
222
223void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
224{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600225 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600226}
227
228int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
229{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600230 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600231}
232
Alexander Graf7706664d2009-12-21 20:21:24 +0100233void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
234{
235 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
236}
237
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600238void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
239 struct kvm_interrupt *irq)
240{
Alexander Grafc5335f12010-08-30 14:03:24 +0200241 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
242
243 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
244 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
245
246 kvmppc_booke_queue_irqprio(vcpu, prio);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600247}
248
Paul Mackerras4fe27d22013-02-14 14:00:25 +0000249void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
Alexander Graf4496f972010-04-07 10:03:25 +0200250{
251 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
Alexander Grafc5335f12010-08-30 14:03:24 +0200252 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
Alexander Graf4496f972010-04-07 10:03:25 +0200253}
254
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000255static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
256{
257 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
258}
259
260static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
261{
262 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
263}
264
Scott Woodd30f6e42011-12-20 15:34:43 +0000265static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
266{
267#ifdef CONFIG_KVM_BOOKE_HV
268 mtspr(SPRN_GSRR0, srr0);
269 mtspr(SPRN_GSRR1, srr1);
270#else
271 vcpu->arch.shared->srr0 = srr0;
272 vcpu->arch.shared->srr1 = srr1;
273#endif
274}
275
276static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
277{
278 vcpu->arch.csrr0 = srr0;
279 vcpu->arch.csrr1 = srr1;
280}
281
282static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
283{
284 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
285 vcpu->arch.dsrr0 = srr0;
286 vcpu->arch.dsrr1 = srr1;
287 } else {
288 set_guest_csrr(vcpu, srr0, srr1);
289 }
290}
291
292static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
293{
294 vcpu->arch.mcsrr0 = srr0;
295 vcpu->arch.mcsrr1 = srr1;
296}
297
298static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
299{
300#ifdef CONFIG_KVM_BOOKE_HV
301 return mfspr(SPRN_GDEAR);
302#else
303 return vcpu->arch.shared->dar;
304#endif
305}
306
307static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
308{
309#ifdef CONFIG_KVM_BOOKE_HV
310 mtspr(SPRN_GDEAR, dear);
311#else
312 vcpu->arch.shared->dar = dear;
313#endif
314}
315
316static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
317{
318#ifdef CONFIG_KVM_BOOKE_HV
319 return mfspr(SPRN_GESR);
320#else
321 return vcpu->arch.shared->esr;
322#endif
323}
324
325static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
326{
327#ifdef CONFIG_KVM_BOOKE_HV
328 mtspr(SPRN_GESR, esr);
329#else
330 vcpu->arch.shared->esr = esr;
331#endif
332}
333
Alexander Graf324b3e62013-01-04 18:28:51 +0100334static unsigned long get_guest_epr(struct kvm_vcpu *vcpu)
335{
336#ifdef CONFIG_KVM_BOOKE_HV
337 return mfspr(SPRN_GEPR);
338#else
339 return vcpu->arch.epr;
340#endif
341}
342
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600343/* Deliver the interrupt of the corresponding priority, if possible. */
344static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
345 unsigned int priority)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500346{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600347 int allowed = 0;
Alexander Graf79300f82012-02-15 19:12:29 +0000348 ulong msr_mask = 0;
Alexander Graf1c810632013-01-04 18:12:48 +0100349 bool update_esr = false, update_dear = false, update_epr = false;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200350 ulong crit_raw = vcpu->arch.shared->critical;
351 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
352 bool crit;
Alexander Grafc5335f12010-08-30 14:03:24 +0200353 bool keep_irq = false;
Scott Woodd30f6e42011-12-20 15:34:43 +0000354 enum int_class int_class;
Mihai Caraman95e90b42012-10-11 06:13:26 +0000355 ulong new_msr = vcpu->arch.shared->msr;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200356
357 /* Truncate crit indicators in 32 bit mode */
358 if (!(vcpu->arch.shared->msr & MSR_SF)) {
359 crit_raw &= 0xffffffff;
360 crit_r1 &= 0xffffffff;
361 }
362
363 /* Critical section when crit == r1 */
364 crit = (crit_raw == crit_r1);
365 /* ... and we're in supervisor mode */
366 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500367
Alexander Grafc5335f12010-08-30 14:03:24 +0200368 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
369 priority = BOOKE_IRQPRIO_EXTERNAL;
370 keep_irq = true;
371 }
372
Scott Wood5df554ad2013-04-12 14:08:46 +0000373 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
Alexander Graf1c810632013-01-04 18:12:48 +0100374 update_epr = true;
375
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600376 switch (priority) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600377 case BOOKE_IRQPRIO_DTLB_MISS:
Liu Yudaf5e272010-02-02 19:44:35 +0800378 case BOOKE_IRQPRIO_DATA_STORAGE:
Alexander Graf011da892013-01-31 14:17:38 +0100379 case BOOKE_IRQPRIO_ALIGNMENT:
Liu Yudaf5e272010-02-02 19:44:35 +0800380 update_dear = true;
381 /* fall through */
382 case BOOKE_IRQPRIO_INST_STORAGE:
383 case BOOKE_IRQPRIO_PROGRAM:
384 update_esr = true;
385 /* fall through */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600386 case BOOKE_IRQPRIO_ITLB_MISS:
387 case BOOKE_IRQPRIO_SYSCALL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600388 case BOOKE_IRQPRIO_FP_UNAVAIL:
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600389 case BOOKE_IRQPRIO_SPE_UNAVAIL:
390 case BOOKE_IRQPRIO_SPE_FP_DATA:
391 case BOOKE_IRQPRIO_SPE_FP_ROUND:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600392 case BOOKE_IRQPRIO_AP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600393 allowed = 1;
Alexander Graf79300f82012-02-15 19:12:29 +0000394 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000395 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500396 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000397 case BOOKE_IRQPRIO_WATCHDOG:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600398 case BOOKE_IRQPRIO_CRITICAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000399 case BOOKE_IRQPRIO_DBELL_CRIT:
Alexander Graf666e7252010-07-29 14:47:43 +0200400 allowed = vcpu->arch.shared->msr & MSR_CE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000401 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000402 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000403 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500404 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600405 case BOOKE_IRQPRIO_MACHINE_CHECK:
Alexander Graf666e7252010-07-29 14:47:43 +0200406 allowed = vcpu->arch.shared->msr & MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000407 allowed = allowed && !crit;
Scott Woodd30f6e42011-12-20 15:34:43 +0000408 int_class = INT_CLASS_MC;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500409 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600410 case BOOKE_IRQPRIO_DECREMENTER:
411 case BOOKE_IRQPRIO_FIT:
Scott Wooddfd4d472011-11-17 12:39:59 +0000412 keep_irq = true;
413 /* fall through */
414 case BOOKE_IRQPRIO_EXTERNAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000415 case BOOKE_IRQPRIO_DBELL:
Alexander Graf666e7252010-07-29 14:47:43 +0200416 allowed = vcpu->arch.shared->msr & MSR_EE;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200417 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000418 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000419 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500420 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600421 case BOOKE_IRQPRIO_DEBUG:
Alexander Graf666e7252010-07-29 14:47:43 +0200422 allowed = vcpu->arch.shared->msr & MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000423 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000424 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000425 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500426 break;
427 }
428
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600429 if (allowed) {
Scott Woodd30f6e42011-12-20 15:34:43 +0000430 switch (int_class) {
431 case INT_CLASS_NONCRIT:
432 set_guest_srr(vcpu, vcpu->arch.pc,
433 vcpu->arch.shared->msr);
434 break;
435 case INT_CLASS_CRIT:
436 set_guest_csrr(vcpu, vcpu->arch.pc,
437 vcpu->arch.shared->msr);
438 break;
439 case INT_CLASS_DBG:
440 set_guest_dsrr(vcpu, vcpu->arch.pc,
441 vcpu->arch.shared->msr);
442 break;
443 case INT_CLASS_MC:
444 set_guest_mcsrr(vcpu, vcpu->arch.pc,
445 vcpu->arch.shared->msr);
446 break;
447 }
448
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600449 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
Liu Yudaf5e272010-02-02 19:44:35 +0800450 if (update_esr == true)
Scott Woodd30f6e42011-12-20 15:34:43 +0000451 set_guest_esr(vcpu, vcpu->arch.queued_esr);
Liu Yudaf5e272010-02-02 19:44:35 +0800452 if (update_dear == true)
Scott Woodd30f6e42011-12-20 15:34:43 +0000453 set_guest_dear(vcpu, vcpu->arch.queued_dear);
Scott Wood5df554ad2013-04-12 14:08:46 +0000454 if (update_epr == true) {
455 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
456 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000457 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
458 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
459 kvmppc_mpic_set_epr(vcpu);
460 }
Scott Wood5df554ad2013-04-12 14:08:46 +0000461 }
Mihai Caraman95e90b42012-10-11 06:13:26 +0000462
463 new_msr &= msr_mask;
464#if defined(CONFIG_64BIT)
465 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
466 new_msr |= MSR_CM;
467#endif
468 kvmppc_set_msr(vcpu, new_msr);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600469
Alexander Grafc5335f12010-08-30 14:03:24 +0200470 if (!keep_irq)
471 clear_bit(priority, &vcpu->arch.pending_exceptions);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600472 }
473
Scott Woodd30f6e42011-12-20 15:34:43 +0000474#ifdef CONFIG_KVM_BOOKE_HV
475 /*
476 * If an interrupt is pending but masked, raise a guest doorbell
477 * so that we are notified when the guest enables the relevant
478 * MSR bit.
479 */
480 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
481 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
482 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
483 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
484 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
485 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
486#endif
487
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600488 return allowed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500489}
490
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000491/*
492 * Return the number of jiffies until the next timeout. If the timeout is
493 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
494 * because the larger value can break the timer APIs.
495 */
496static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
497{
498 u64 tb, wdt_tb, wdt_ticks = 0;
499 u64 nr_jiffies = 0;
500 u32 period = TCR_GET_WP(vcpu->arch.tcr);
501
502 wdt_tb = 1ULL << (63 - period);
503 tb = get_tb();
504 /*
505 * The watchdog timeout will hapeen when TB bit corresponding
506 * to watchdog will toggle from 0 to 1.
507 */
508 if (tb & wdt_tb)
509 wdt_ticks = wdt_tb;
510
511 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
512
513 /* Convert timebase ticks to jiffies */
514 nr_jiffies = wdt_ticks;
515
516 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
517 nr_jiffies++;
518
519 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
520}
521
522static void arm_next_watchdog(struct kvm_vcpu *vcpu)
523{
524 unsigned long nr_jiffies;
525 unsigned long flags;
526
527 /*
528 * If TSR_ENW and TSR_WIS are not set then no need to exit to
529 * userspace, so clear the KVM_REQ_WATCHDOG request.
530 */
531 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
532 clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
533
534 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
535 nr_jiffies = watchdog_next_timeout(vcpu);
536 /*
537 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
538 * then do not run the watchdog timer as this can break timer APIs.
539 */
540 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
541 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
542 else
543 del_timer(&vcpu->arch.wdt_timer);
544 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
545}
546
547void kvmppc_watchdog_func(unsigned long data)
548{
549 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
550 u32 tsr, new_tsr;
551 int final;
552
553 do {
554 new_tsr = tsr = vcpu->arch.tsr;
555 final = 0;
556
557 /* Time out event */
558 if (tsr & TSR_ENW) {
559 if (tsr & TSR_WIS)
560 final = 1;
561 else
562 new_tsr = tsr | TSR_WIS;
563 } else {
564 new_tsr = tsr | TSR_ENW;
565 }
566 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
567
568 if (new_tsr & TSR_WIS) {
569 smp_wmb();
570 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
571 kvm_vcpu_kick(vcpu);
572 }
573
574 /*
575 * If this is final watchdog expiry and some action is required
576 * then exit to userspace.
577 */
578 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
579 vcpu->arch.watchdog_enabled) {
580 smp_wmb();
581 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
582 kvm_vcpu_kick(vcpu);
583 }
584
585 /*
586 * Stop running the watchdog timer after final expiration to
587 * prevent the host from being flooded with timers if the
588 * guest sets a short period.
589 * Timers will resume when TSR/TCR is updated next time.
590 */
591 if (!final)
592 arm_next_watchdog(vcpu);
593}
594
Scott Wooddfd4d472011-11-17 12:39:59 +0000595static void update_timer_ints(struct kvm_vcpu *vcpu)
596{
597 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
598 kvmppc_core_queue_dec(vcpu);
599 else
600 kvmppc_core_dequeue_dec(vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000601
602 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
603 kvmppc_core_queue_watchdog(vcpu);
604 else
605 kvmppc_core_dequeue_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +0000606}
607
Scott Woodc59a6a32011-11-08 18:23:25 -0600608static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500609{
610 unsigned long *pending = &vcpu->arch.pending_exceptions;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500611 unsigned int priority;
612
Hollis Blanchard9ab80842008-11-05 09:36:22 -0600613 priority = __ffs(*pending);
Alexander Graf8b3a00f2012-02-16 14:12:46 +0000614 while (priority < BOOKE_IRQPRIO_MAX) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600615 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500616 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500617
618 priority = find_next_bit(pending,
619 BITS_PER_BYTE * sizeof(*pending),
620 priority + 1);
621 }
Alexander Graf90bba352010-07-29 14:47:51 +0200622
623 /* Tell the guest about our interrupt status */
Scott Wood29ac26e2011-11-08 18:23:27 -0600624 vcpu->arch.shared->int_pending = !!*pending;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500625}
626
Scott Woodc59a6a32011-11-08 18:23:25 -0600627/* Check pending exceptions and deliver one, if possible. */
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000628int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
Scott Woodc59a6a32011-11-08 18:23:25 -0600629{
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000630 int r = 0;
Scott Woodc59a6a32011-11-08 18:23:25 -0600631 WARN_ON_ONCE(!irqs_disabled());
632
633 kvmppc_core_check_exceptions(vcpu);
634
Alexander Grafb8c649a2012-12-20 04:52:39 +0000635 if (vcpu->requests) {
636 /* Exception delivery raised request; start over */
637 return 1;
638 }
639
Scott Woodc59a6a32011-11-08 18:23:25 -0600640 if (vcpu->arch.shared->msr & MSR_WE) {
641 local_irq_enable();
642 kvm_vcpu_block(vcpu);
Alexander Graf966cd0f2012-03-14 16:55:08 +0100643 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Scott Woodc59a6a32011-11-08 18:23:25 -0600644 local_irq_disable();
645
646 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000647 r = 1;
Scott Woodc59a6a32011-11-08 18:23:25 -0600648 };
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000649
650 return r;
651}
652
Alexander Graf7c973a22012-08-13 12:50:35 +0200653int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
Alexander Graf4ffc6352012-08-08 20:31:13 +0200654{
Alexander Graf7c973a22012-08-13 12:50:35 +0200655 int r = 1; /* Indicate we want to get back into the guest */
656
Alexander Graf2d8185d2012-08-10 12:31:12 +0200657 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
658 update_timer_ints(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200659#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Alexander Graf2d8185d2012-08-10 12:31:12 +0200660 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
661 kvmppc_core_flush_tlb(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200662#endif
Alexander Graf7c973a22012-08-13 12:50:35 +0200663
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000664 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
665 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
666 r = 0;
667 }
668
Alexander Graf1c810632013-01-04 18:12:48 +0100669 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
670 vcpu->run->epr.epr = 0;
671 vcpu->arch.epr_needed = true;
672 vcpu->run->exit_reason = KVM_EXIT_EPR;
673 r = 0;
674 }
675
Alexander Graf7c973a22012-08-13 12:50:35 +0200676 return r;
Alexander Graf4ffc6352012-08-08 20:31:13 +0200677}
678
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000679int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
680{
Alexander Graf7ee78852012-08-13 12:44:41 +0200681 int ret, s;
Bharat Bhushance11e482013-07-04 12:27:47 +0530682 struct thread_struct thread;
Scott Wood8fae8452011-12-20 15:34:45 +0000683#ifdef CONFIG_PPC_FPU
684 unsigned int fpscr;
685 int fpexc_mode;
686 u64 fpr[32];
687#endif
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000688
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200689 if (!vcpu->arch.sane) {
690 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
691 return -EINVAL;
692 }
693
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000694 local_irq_disable();
Alexander Graf7ee78852012-08-13 12:44:41 +0200695 s = kvmppc_prepare_to_enter(vcpu);
696 if (s <= 0) {
Alexander Graf24afa372012-08-12 12:42:30 +0200697 local_irq_enable();
Alexander Graf7ee78852012-08-13 12:44:41 +0200698 ret = s;
Scott Wood1d1ef222011-11-08 16:11:59 -0600699 goto out;
700 }
701
Scott Wood8fae8452011-12-20 15:34:45 +0000702#ifdef CONFIG_PPC_FPU
703 /* Save userspace FPU state in stack */
704 enable_kernel_fp();
705 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
706 fpscr = current->thread.fpscr.val;
707 fpexc_mode = current->thread.fpexc_mode;
708
709 /* Restore guest FPU state to thread */
710 memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
711 current->thread.fpscr.val = vcpu->arch.fpscr;
712
713 /*
714 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
715 * as always using the FPU. Kernel usage of FP (via
716 * enable_kernel_fp()) in this thread must not occur while
717 * vcpu->fpu_active is set.
718 */
719 vcpu->fpu_active = 1;
720
721 kvmppc_load_guest_fp(vcpu);
722#endif
723
Bharat Bhushance11e482013-07-04 12:27:47 +0530724 /* Switch to guest debug context */
725 thread.debug = vcpu->arch.shadow_dbg_reg;
726 switch_booke_debug_regs(&thread);
727 thread.debug = current->thread.debug;
728 current->thread.debug = vcpu->arch.shadow_dbg_reg;
729
Scott Wood5f1c2482013-07-10 17:47:39 -0500730 kvmppc_fix_ee_before_entry();
Scott Woodf8941fbe2013-06-11 11:38:31 -0500731
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000732 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000733
Alexander Graf24afa372012-08-12 12:42:30 +0200734 /* No need for kvm_guest_exit. It's done in handle_exit.
735 We also get here with interrupts enabled. */
736
Bharat Bhushance11e482013-07-04 12:27:47 +0530737 /* Switch back to user space debug context */
738 switch_booke_debug_regs(&thread);
739 current->thread.debug = thread.debug;
740
Scott Wood8fae8452011-12-20 15:34:45 +0000741#ifdef CONFIG_PPC_FPU
742 kvmppc_save_guest_fp(vcpu);
743
744 vcpu->fpu_active = 0;
745
746 /* Save guest FPU state from thread */
747 memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
748 vcpu->arch.fpscr = current->thread.fpscr.val;
749
750 /* Restore userspace FPU state from stack */
751 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
752 current->thread.fpscr.val = fpscr;
753 current->thread.fpexc_mode = fpexc_mode;
754#endif
755
Scott Wood1d1ef222011-11-08 16:11:59 -0600756out:
Alexander Grafd69c6432012-08-08 20:44:20 +0200757 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000758 return ret;
759}
760
Scott Woodd30f6e42011-12-20 15:34:43 +0000761static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
762{
763 enum emulation_result er;
764
765 er = kvmppc_emulate_instruction(run, vcpu);
766 switch (er) {
767 case EMULATE_DONE:
768 /* don't overwrite subtypes, just account kvm_stats */
769 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
770 /* Future optimization: only reload non-volatiles if
771 * they were actually modified by emulation. */
772 return RESUME_GUEST_NV;
773
774 case EMULATE_DO_DCR:
775 run->exit_reason = KVM_EXIT_DCR;
776 return RESUME_HOST;
777
778 case EMULATE_FAIL:
Scott Woodd30f6e42011-12-20 15:34:43 +0000779 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
780 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
781 /* For debugging, encode the failing instruction and
782 * report it to userspace. */
783 run->hw.hardware_exit_reason = ~0ULL << 32;
784 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
Alexander Grafd1ff5492012-02-16 13:24:03 +0000785 kvmppc_core_queue_program(vcpu, ESR_PIL);
Scott Woodd30f6e42011-12-20 15:34:43 +0000786 return RESUME_HOST;
787
Bharat Bhushan9b4f5302013-04-08 00:32:15 +0000788 case EMULATE_EXIT_USER:
789 return RESUME_HOST;
790
Scott Woodd30f6e42011-12-20 15:34:43 +0000791 default:
792 BUG();
793 }
794}
795
Bharat Bhushance11e482013-07-04 12:27:47 +0530796static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
797{
798 struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg);
799 u32 dbsr = vcpu->arch.dbsr;
800
801 run->debug.arch.status = 0;
802 run->debug.arch.address = vcpu->arch.pc;
803
804 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
805 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
806 } else {
807 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
808 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
809 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
810 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
811 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
812 run->debug.arch.address = dbg_reg->dac1;
813 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
814 run->debug.arch.address = dbg_reg->dac2;
815 }
816
817 return RESUME_HOST;
818}
819
Alexander Graf4e642cc2012-02-20 23:57:26 +0100820static void kvmppc_fill_pt_regs(struct pt_regs *regs)
821{
822 ulong r1, ip, msr, lr;
823
824 asm("mr %0, 1" : "=r"(r1));
825 asm("mflr %0" : "=r"(lr));
826 asm("mfmsr %0" : "=r"(msr));
827 asm("bl 1f; 1: mflr %0" : "=r"(ip));
828
829 memset(regs, 0, sizeof(*regs));
830 regs->gpr[1] = r1;
831 regs->nip = ip;
832 regs->msr = msr;
833 regs->link = lr;
834}
835
Bharat Bhushan6328e592012-06-20 05:56:53 +0000836/*
837 * For interrupts needed to be handled by host interrupt handlers,
838 * corresponding host handler are called from here in similar way
839 * (but not exact) as they are called from low level handler
840 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
841 */
Alexander Graf4e642cc2012-02-20 23:57:26 +0100842static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
843 unsigned int exit_nr)
844{
845 struct pt_regs regs;
846
847 switch (exit_nr) {
848 case BOOKE_INTERRUPT_EXTERNAL:
849 kvmppc_fill_pt_regs(&regs);
850 do_IRQ(&regs);
851 break;
852 case BOOKE_INTERRUPT_DECREMENTER:
853 kvmppc_fill_pt_regs(&regs);
854 timer_interrupt(&regs);
855 break;
Tiejun Chen5f17ce82013-05-13 10:00:45 +0800856#if defined(CONFIG_PPC_DOORBELL)
Alexander Graf4e642cc2012-02-20 23:57:26 +0100857 case BOOKE_INTERRUPT_DOORBELL:
858 kvmppc_fill_pt_regs(&regs);
859 doorbell_exception(&regs);
860 break;
861#endif
862 case BOOKE_INTERRUPT_MACHINE_CHECK:
863 /* FIXME */
864 break;
Alexander Graf7cc1e8e2012-02-22 16:26:34 +0100865 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
866 kvmppc_fill_pt_regs(&regs);
867 performance_monitor_exception(&regs);
868 break;
Bharat Bhushan6328e592012-06-20 05:56:53 +0000869 case BOOKE_INTERRUPT_WATCHDOG:
870 kvmppc_fill_pt_regs(&regs);
871#ifdef CONFIG_BOOKE_WDT
872 WatchdogException(&regs);
873#else
874 unknown_exception(&regs);
875#endif
876 break;
877 case BOOKE_INTERRUPT_CRITICAL:
878 unknown_exception(&regs);
879 break;
Bharat Bhushance11e482013-07-04 12:27:47 +0530880 case BOOKE_INTERRUPT_DEBUG:
881 /* Save DBSR before preemption is enabled */
882 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
883 kvmppc_clear_dbsr();
884 break;
Alexander Graf4e642cc2012-02-20 23:57:26 +0100885 }
886}
887
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500888/**
889 * kvmppc_handle_exit
890 *
891 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
892 */
893int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
894 unsigned int exit_nr)
895{
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500896 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200897 int s;
Scott Woodf1e89022013-06-06 19:16:31 -0500898 int idx;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500899
Scott Wood7c11c0c2013-06-06 19:16:32 -0500900#ifdef CONFIG_PPC64
901 WARN_ON(local_paca->irq_happened != 0);
902#endif
903
904 /*
905 * We enter with interrupts disabled in hardware, but
906 * we need to call hard_irq_disable anyway to ensure that
907 * the software state is kept in sync.
908 */
909 hard_irq_disable();
910
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600911 /* update before a new last_exit_type is rewritten */
912 kvmppc_update_timing_stats(vcpu);
913
Alexander Graf4e642cc2012-02-20 23:57:26 +0100914 /* restart interrupts if they were meant for the host */
915 kvmppc_restart_interrupt(vcpu, exit_nr);
Scott Woodd30f6e42011-12-20 15:34:43 +0000916
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500917 local_irq_enable();
918
Alexander Graf97c95052012-08-02 15:10:00 +0200919 trace_kvm_exit(exit_nr, vcpu);
Alexander Graf706fb732012-08-12 11:29:09 +0200920 kvm_guest_exit();
Alexander Graf97c95052012-08-02 15:10:00 +0200921
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500922 run->exit_reason = KVM_EXIT_UNKNOWN;
923 run->ready_for_interrupt_injection = 1;
924
925 switch (exit_nr) {
926 case BOOKE_INTERRUPT_MACHINE_CHECK:
Alexander Grafc35c9d82012-02-20 12:21:18 +0100927 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
928 kvmppc_dump_vcpu(vcpu);
929 /* For debugging, send invalid exit reason to user space */
930 run->hw.hardware_exit_reason = ~1ULL << 32;
931 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
932 r = RESUME_HOST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500933 break;
934
935 case BOOKE_INTERRUPT_EXTERNAL:
Hollis Blanchard7b701592008-12-02 15:51:58 -0600936 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
Hollis Blanchard1b6766c2008-11-05 09:36:21 -0600937 r = RESUME_GUEST;
938 break;
939
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500940 case BOOKE_INTERRUPT_DECREMENTER:
Hollis Blanchard7b701592008-12-02 15:51:58 -0600941 kvmppc_account_exit(vcpu, DEC_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500942 r = RESUME_GUEST;
943 break;
944
Bharat Bhushan6328e592012-06-20 05:56:53 +0000945 case BOOKE_INTERRUPT_WATCHDOG:
946 r = RESUME_GUEST;
947 break;
948
Scott Woodd30f6e42011-12-20 15:34:43 +0000949 case BOOKE_INTERRUPT_DOORBELL:
950 kvmppc_account_exit(vcpu, DBELL_EXITS);
Scott Woodd30f6e42011-12-20 15:34:43 +0000951 r = RESUME_GUEST;
952 break;
953
954 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
955 kvmppc_account_exit(vcpu, GDBELL_EXITS);
956
957 /*
958 * We are here because there is a pending guest interrupt
959 * which could not be delivered as MSR_CE or MSR_ME was not
960 * set. Once we break from here we will retry delivery.
961 */
962 r = RESUME_GUEST;
963 break;
964
965 case BOOKE_INTERRUPT_GUEST_DBELL:
966 kvmppc_account_exit(vcpu, GDBELL_EXITS);
967
968 /*
969 * We are here because there is a pending guest interrupt
970 * which could not be delivered as MSR_EE was not set. Once
971 * we break from here we will retry delivery.
972 */
973 r = RESUME_GUEST;
974 break;
975
Alexander Graf95f2e922012-02-20 22:45:12 +0100976 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
977 r = RESUME_GUEST;
978 break;
979
Scott Woodd30f6e42011-12-20 15:34:43 +0000980 case BOOKE_INTERRUPT_HV_PRIV:
981 r = emulation_exit(run, vcpu);
982 break;
983
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500984 case BOOKE_INTERRUPT_PROGRAM:
Scott Woodd30f6e42011-12-20 15:34:43 +0000985 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
Alexander Graf02685972012-02-20 12:33:22 +0100986 /*
987 * Program traps generated by user-level software must
988 * be handled by the guest kernel.
989 *
990 * In GS mode, hypervisor privileged instructions trap
991 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
992 * actual program interrupts, handled by the guest.
993 */
Liu Yudaf5e272010-02-02 19:44:35 +0800994 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500995 r = RESUME_GUEST;
Hollis Blanchard7b701592008-12-02 15:51:58 -0600996 kvmppc_account_exit(vcpu, USR_PR_INST);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500997 break;
998 }
999
Scott Woodd30f6e42011-12-20 15:34:43 +00001000 r = emulation_exit(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001001 break;
1002
Christian Ehrhardtde368dc2008-04-29 18:18:23 +02001003 case BOOKE_INTERRUPT_FP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001004 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001005 kvmppc_account_exit(vcpu, FP_UNAVAIL);
Christian Ehrhardtde368dc2008-04-29 18:18:23 +02001006 r = RESUME_GUEST;
1007 break;
1008
Scott Wood4cd35f62011-06-14 18:34:31 -05001009#ifdef CONFIG_SPE
1010 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
1011 if (vcpu->arch.shared->msr & MSR_SPE)
1012 kvmppc_vcpu_enable_spe(vcpu);
1013 else
1014 kvmppc_booke_queue_irqprio(vcpu,
1015 BOOKE_IRQPRIO_SPE_UNAVAIL);
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001016 r = RESUME_GUEST;
1017 break;
Scott Wood4cd35f62011-06-14 18:34:31 -05001018 }
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001019
1020 case BOOKE_INTERRUPT_SPE_FP_DATA:
1021 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
1022 r = RESUME_GUEST;
1023 break;
1024
1025 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1026 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
1027 r = RESUME_GUEST;
1028 break;
Scott Wood4cd35f62011-06-14 18:34:31 -05001029#else
1030 case BOOKE_INTERRUPT_SPE_UNAVAIL:
1031 /*
1032 * Guest wants SPE, but host kernel doesn't support it. Send
1033 * an "unimplemented operation" program check to the guest.
1034 */
1035 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1036 r = RESUME_GUEST;
1037 break;
1038
1039 /*
1040 * These really should never happen without CONFIG_SPE,
1041 * as we should never enable the real MSR[SPE] in the guest.
1042 */
1043 case BOOKE_INTERRUPT_SPE_FP_DATA:
1044 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1045 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
1046 __func__, exit_nr, vcpu->arch.pc);
1047 run->hw.hardware_exit_reason = exit_nr;
1048 r = RESUME_HOST;
1049 break;
1050#endif
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001051
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001052 case BOOKE_INTERRUPT_DATA_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +08001053 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1054 vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001055 kvmppc_account_exit(vcpu, DSI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001056 r = RESUME_GUEST;
1057 break;
1058
1059 case BOOKE_INTERRUPT_INST_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +08001060 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001061 kvmppc_account_exit(vcpu, ISI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001062 r = RESUME_GUEST;
1063 break;
1064
Alexander Graf011da892013-01-31 14:17:38 +01001065 case BOOKE_INTERRUPT_ALIGNMENT:
1066 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1067 vcpu->arch.fault_esr);
1068 r = RESUME_GUEST;
1069 break;
1070
Scott Woodd30f6e42011-12-20 15:34:43 +00001071#ifdef CONFIG_KVM_BOOKE_HV
1072 case BOOKE_INTERRUPT_HV_SYSCALL:
1073 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1074 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1075 } else {
1076 /*
1077 * hcall from guest userspace -- send privileged
1078 * instruction program check.
1079 */
1080 kvmppc_core_queue_program(vcpu, ESR_PPR);
1081 }
1082
1083 r = RESUME_GUEST;
1084 break;
1085#else
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001086 case BOOKE_INTERRUPT_SYSCALL:
Alexander Graf2a342ed2010-07-29 14:47:48 +02001087 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1088 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1089 /* KVM PV hypercalls */
1090 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1091 r = RESUME_GUEST;
1092 } else {
1093 /* Guest syscalls */
1094 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1095 }
Hollis Blanchard7b701592008-12-02 15:51:58 -06001096 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001097 r = RESUME_GUEST;
1098 break;
Scott Woodd30f6e42011-12-20 15:34:43 +00001099#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001100
1101 case BOOKE_INTERRUPT_DTLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001102 unsigned long eaddr = vcpu->arch.fault_dear;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001103 int gtlb_index;
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001104 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001105 gfn_t gfn;
1106
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001107#ifdef CONFIG_KVM_E500V2
Scott Wooda4cd8b22011-06-14 18:34:41 -05001108 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1109 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1110 kvmppc_map_magic(vcpu);
1111 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1112 r = RESUME_GUEST;
1113
1114 break;
1115 }
1116#endif
1117
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001118 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001119 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001120 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001121 /* The guest didn't have a mapping for it. */
Liu Yudaf5e272010-02-02 19:44:35 +08001122 kvmppc_core_queue_dtlb_miss(vcpu,
1123 vcpu->arch.fault_dear,
1124 vcpu->arch.fault_esr);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001125 kvmppc_mmu_dtlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001126 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001127 r = RESUME_GUEST;
1128 break;
1129 }
1130
Scott Woodf1e89022013-06-06 19:16:31 -05001131 idx = srcu_read_lock(&vcpu->kvm->srcu);
1132
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001133 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001134 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001135
1136 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1137 /* The guest TLB had a mapping, but the shadow TLB
1138 * didn't, and it is RAM. This could be because:
1139 * a) the entry is mapping the host kernel, or
1140 * b) the guest used a large mapping which we're faking
1141 * Either way, we need to satisfy the fault without
1142 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001143 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001144 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001145 r = RESUME_GUEST;
1146 } else {
1147 /* Guest has mapped and accessed a page which is not
1148 * actually RAM. */
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001149 vcpu->arch.paddr_accessed = gpaddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +01001150 vcpu->arch.vaddr_accessed = eaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001151 r = kvmppc_emulate_mmio(run, vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001152 kvmppc_account_exit(vcpu, MMIO_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001153 }
1154
Scott Woodf1e89022013-06-06 19:16:31 -05001155 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001156 break;
1157 }
1158
1159 case BOOKE_INTERRUPT_ITLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001160 unsigned long eaddr = vcpu->arch.pc;
Hollis Blanchard89168612008-12-02 15:51:53 -06001161 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001162 gfn_t gfn;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001163 int gtlb_index;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001164
1165 r = RESUME_GUEST;
1166
1167 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001168 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001169 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001170 /* The guest didn't have a mapping for it. */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001171 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001172 kvmppc_mmu_itlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001173 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001174 break;
1175 }
1176
Hollis Blanchard7b701592008-12-02 15:51:58 -06001177 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001178
Scott Woodf1e89022013-06-06 19:16:31 -05001179 idx = srcu_read_lock(&vcpu->kvm->srcu);
1180
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001181 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard89168612008-12-02 15:51:53 -06001182 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001183
1184 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1185 /* The guest TLB had a mapping, but the shadow TLB
1186 * didn't. This could be because:
1187 * a) the entry is mapping the host kernel, or
1188 * b) the guest used a large mapping which we're faking
1189 * Either way, we need to satisfy the fault without
1190 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001191 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001192 } else {
1193 /* Guest mapped and leaped at non-RAM! */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001194 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001195 }
1196
Scott Woodf1e89022013-06-06 19:16:31 -05001197 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001198 break;
1199 }
1200
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001201 case BOOKE_INTERRUPT_DEBUG: {
Bharat Bhushance11e482013-07-04 12:27:47 +05301202 r = kvmppc_handle_debug(run, vcpu);
1203 if (r == RESUME_HOST)
1204 run->exit_reason = KVM_EXIT_DEBUG;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001205 kvmppc_account_exit(vcpu, DEBUG_EXITS);
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001206 break;
1207 }
1208
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001209 default:
1210 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1211 BUG();
1212 }
1213
Alexander Grafa8e4ef82012-02-16 14:07:37 +00001214 /*
1215 * To avoid clobbering exit_reason, only check for signals if we
1216 * aren't already exiting to userspace for some other reason.
1217 */
Alexander Graf03660ba2012-02-28 12:00:41 +01001218 if (!(r & RESUME_HOST)) {
1219 local_irq_disable();
Alexander Graf7ee78852012-08-13 12:44:41 +02001220 s = kvmppc_prepare_to_enter(vcpu);
1221 if (s <= 0) {
Alexander Graf24afa372012-08-12 12:42:30 +02001222 local_irq_enable();
Alexander Graf7ee78852012-08-13 12:44:41 +02001223 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
Alexander Graf24afa372012-08-12 12:42:30 +02001224 } else {
Scott Wood5f1c2482013-07-10 17:47:39 -05001225 kvmppc_fix_ee_before_entry();
Alexander Graf03660ba2012-02-28 12:00:41 +01001226 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001227 }
1228
1229 return r;
1230}
1231
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001232static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1233{
1234 u32 old_tsr = vcpu->arch.tsr;
1235
1236 vcpu->arch.tsr = new_tsr;
1237
1238 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1239 arm_next_watchdog(vcpu);
1240
1241 update_timer_ints(vcpu);
1242}
1243
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001244/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
1245int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1246{
Hollis Blanchard082decf2010-08-07 10:33:56 -07001247 int i;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001248 int r;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001249
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001250 vcpu->arch.pc = 0;
Scott Woodb5904972011-11-08 18:23:30 -06001251 vcpu->arch.shared->pir = vcpu->vcpu_id;
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001252 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
Scott Woodd30f6e42011-12-20 15:34:43 +00001253 kvmppc_set_msr(vcpu, 0);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001254
Scott Woodd30f6e42011-12-20 15:34:43 +00001255#ifndef CONFIG_KVM_BOOKE_HV
Bharat Bhushance11e482013-07-04 12:27:47 +05301256 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001257 vcpu->arch.shadow_pid = 1;
Scott Woodd30f6e42011-12-20 15:34:43 +00001258 vcpu->arch.shared->msr = 0;
1259#endif
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001260
Hollis Blanchard082decf2010-08-07 10:33:56 -07001261 /* Eye-catching numbers so we know if the guest takes an interrupt
1262 * before it's programmed its own IVPR/IVORs. */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001263 vcpu->arch.ivpr = 0x55550000;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001264 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1265 vcpu->arch.ivor[i] = 0x7700 | i * 4;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001266
Hollis Blanchard73e75b42008-12-02 15:51:57 -06001267 kvmppc_init_timing_stats(vcpu);
1268
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001269 r = kvmppc_core_vcpu_setup(vcpu);
1270 kvmppc_sanity_check(vcpu);
1271 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001272}
1273
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001274int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1275{
1276 /* setup watchdog timer once */
1277 spin_lock_init(&vcpu->arch.wdt_lock);
1278 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
1279 (unsigned long)vcpu);
1280
1281 return 0;
1282}
1283
1284void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1285{
1286 del_timer_sync(&vcpu->arch.wdt_timer);
1287}
1288
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001289int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1290{
1291 int i;
1292
1293 regs->pc = vcpu->arch.pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001294 regs->cr = kvmppc_get_cr(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001295 regs->ctr = vcpu->arch.ctr;
1296 regs->lr = vcpu->arch.lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001297 regs->xer = kvmppc_get_xer(vcpu);
Alexander Graf666e7252010-07-29 14:47:43 +02001298 regs->msr = vcpu->arch.shared->msr;
Alexander Grafde7906c2010-07-29 14:47:46 +02001299 regs->srr0 = vcpu->arch.shared->srr0;
1300 regs->srr1 = vcpu->arch.shared->srr1;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001301 regs->pid = vcpu->arch.pid;
Alexander Grafa73a9592010-07-29 14:47:47 +02001302 regs->sprg0 = vcpu->arch.shared->sprg0;
1303 regs->sprg1 = vcpu->arch.shared->sprg1;
1304 regs->sprg2 = vcpu->arch.shared->sprg2;
1305 regs->sprg3 = vcpu->arch.shared->sprg3;
Scott Woodb5904972011-11-08 18:23:30 -06001306 regs->sprg4 = vcpu->arch.shared->sprg4;
1307 regs->sprg5 = vcpu->arch.shared->sprg5;
1308 regs->sprg6 = vcpu->arch.shared->sprg6;
1309 regs->sprg7 = vcpu->arch.shared->sprg7;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001310
1311 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001312 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001313
1314 return 0;
1315}
1316
1317int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1318{
1319 int i;
1320
1321 vcpu->arch.pc = regs->pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001322 kvmppc_set_cr(vcpu, regs->cr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001323 vcpu->arch.ctr = regs->ctr;
1324 vcpu->arch.lr = regs->lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001325 kvmppc_set_xer(vcpu, regs->xer);
Hollis Blanchardb8fd68a2008-11-05 09:36:20 -06001326 kvmppc_set_msr(vcpu, regs->msr);
Alexander Grafde7906c2010-07-29 14:47:46 +02001327 vcpu->arch.shared->srr0 = regs->srr0;
1328 vcpu->arch.shared->srr1 = regs->srr1;
Scott Wood5ce941e2011-04-27 17:24:21 -05001329 kvmppc_set_pid(vcpu, regs->pid);
Alexander Grafa73a9592010-07-29 14:47:47 +02001330 vcpu->arch.shared->sprg0 = regs->sprg0;
1331 vcpu->arch.shared->sprg1 = regs->sprg1;
1332 vcpu->arch.shared->sprg2 = regs->sprg2;
1333 vcpu->arch.shared->sprg3 = regs->sprg3;
Scott Woodb5904972011-11-08 18:23:30 -06001334 vcpu->arch.shared->sprg4 = regs->sprg4;
1335 vcpu->arch.shared->sprg5 = regs->sprg5;
1336 vcpu->arch.shared->sprg6 = regs->sprg6;
1337 vcpu->arch.shared->sprg7 = regs->sprg7;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001338
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001339 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1340 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001341
1342 return 0;
1343}
1344
Scott Wood5ce941e2011-04-27 17:24:21 -05001345static void get_sregs_base(struct kvm_vcpu *vcpu,
1346 struct kvm_sregs *sregs)
1347{
1348 u64 tb = get_tb();
1349
1350 sregs->u.e.features |= KVM_SREGS_E_BASE;
1351
1352 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1353 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1354 sregs->u.e.mcsr = vcpu->arch.mcsr;
Scott Woodd30f6e42011-12-20 15:34:43 +00001355 sregs->u.e.esr = get_guest_esr(vcpu);
1356 sregs->u.e.dear = get_guest_dear(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001357 sregs->u.e.tsr = vcpu->arch.tsr;
1358 sregs->u.e.tcr = vcpu->arch.tcr;
1359 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1360 sregs->u.e.tb = tb;
1361 sregs->u.e.vrsave = vcpu->arch.vrsave;
1362}
1363
1364static int set_sregs_base(struct kvm_vcpu *vcpu,
1365 struct kvm_sregs *sregs)
1366{
1367 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1368 return 0;
1369
1370 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1371 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1372 vcpu->arch.mcsr = sregs->u.e.mcsr;
Scott Woodd30f6e42011-12-20 15:34:43 +00001373 set_guest_esr(vcpu, sregs->u.e.esr);
1374 set_guest_dear(vcpu, sregs->u.e.dear);
Scott Wood5ce941e2011-04-27 17:24:21 -05001375 vcpu->arch.vrsave = sregs->u.e.vrsave;
Scott Wooddfd4d472011-11-17 12:39:59 +00001376 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001377
Scott Wooddfd4d472011-11-17 12:39:59 +00001378 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
Scott Wood5ce941e2011-04-27 17:24:21 -05001379 vcpu->arch.dec = sregs->u.e.dec;
Scott Wooddfd4d472011-11-17 12:39:59 +00001380 kvmppc_emulate_dec(vcpu);
1381 }
Scott Wood5ce941e2011-04-27 17:24:21 -05001382
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001383 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1384 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001385
1386 return 0;
1387}
1388
1389static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1390 struct kvm_sregs *sregs)
1391{
1392 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1393
Scott Wood841741f2011-09-02 17:39:37 -05001394 sregs->u.e.pir = vcpu->vcpu_id;
Scott Wood5ce941e2011-04-27 17:24:21 -05001395 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1396 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1397 sregs->u.e.decar = vcpu->arch.decar;
1398 sregs->u.e.ivpr = vcpu->arch.ivpr;
1399}
1400
1401static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1402 struct kvm_sregs *sregs)
1403{
1404 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1405 return 0;
1406
Scott Wood841741f2011-09-02 17:39:37 -05001407 if (sregs->u.e.pir != vcpu->vcpu_id)
Scott Wood5ce941e2011-04-27 17:24:21 -05001408 return -EINVAL;
1409
1410 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1411 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1412 vcpu->arch.decar = sregs->u.e.decar;
1413 vcpu->arch.ivpr = sregs->u.e.ivpr;
1414
1415 return 0;
1416}
1417
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301418int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
Scott Wood5ce941e2011-04-27 17:24:21 -05001419{
1420 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1421
1422 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1423 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1424 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1425 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1426 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1427 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1428 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1429 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1430 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1431 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1432 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1433 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1434 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1435 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1436 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1437 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301438 return 0;
Scott Wood5ce941e2011-04-27 17:24:21 -05001439}
1440
1441int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1442{
1443 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1444 return 0;
1445
1446 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1447 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1448 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1449 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1450 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1451 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1452 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1453 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1454 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1455 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1456 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1457 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1458 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1459 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1460 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1461 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1462
1463 return 0;
1464}
1465
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001466int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1467 struct kvm_sregs *sregs)
1468{
Scott Wood5ce941e2011-04-27 17:24:21 -05001469 sregs->pvr = vcpu->arch.pvr;
1470
1471 get_sregs_base(vcpu, sregs);
1472 get_sregs_arch206(vcpu, sregs);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301473 return kvmppc_ops->get_sregs(vcpu, sregs);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001474}
1475
1476int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1477 struct kvm_sregs *sregs)
1478{
Scott Wood5ce941e2011-04-27 17:24:21 -05001479 int ret;
1480
1481 if (vcpu->arch.pvr != sregs->pvr)
1482 return -EINVAL;
1483
1484 ret = set_sregs_base(vcpu, sregs);
1485 if (ret < 0)
1486 return ret;
1487
1488 ret = set_sregs_arch206(vcpu, sregs);
1489 if (ret < 0)
1490 return ret;
1491
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301492 return kvmppc_ops->set_sregs(vcpu, sregs);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001493}
1494
Paul Mackerras31f34382011-12-12 12:26:50 +00001495int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1496{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001497 int r = 0;
1498 union kvmppc_one_reg val;
1499 int size;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001500
1501 size = one_reg_size(reg->id);
1502 if (size > sizeof(val))
1503 return -EINVAL;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001504
1505 switch (reg->id) {
1506 case KVM_REG_PPC_IAC1:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301507 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001508 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301509 case KVM_REG_PPC_IAC2:
1510 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2);
1511 break;
1512#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1513 case KVM_REG_PPC_IAC3:
1514 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3);
1515 break;
1516 case KVM_REG_PPC_IAC4:
1517 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4);
1518 break;
1519#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001520 case KVM_REG_PPC_DAC1:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301521 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1);
1522 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001523 case KVM_REG_PPC_DAC2:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301524 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001525 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001526 case KVM_REG_PPC_EPR: {
1527 u32 epr = get_guest_epr(vcpu);
Mihai Caraman35b299e2013-04-11 00:03:07 +00001528 val = get_reg_val(reg->id, epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001529 break;
1530 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001531#if defined(CONFIG_64BIT)
1532 case KVM_REG_PPC_EPCR:
Mihai Caraman35b299e2013-04-11 00:03:07 +00001533 val = get_reg_val(reg->id, vcpu->arch.epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001534 break;
1535#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001536 case KVM_REG_PPC_TCR:
Mihai Caraman35b299e2013-04-11 00:03:07 +00001537 val = get_reg_val(reg->id, vcpu->arch.tcr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001538 break;
1539 case KVM_REG_PPC_TSR:
Mihai Caraman35b299e2013-04-11 00:03:07 +00001540 val = get_reg_val(reg->id, vcpu->arch.tsr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001541 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001542 case KVM_REG_PPC_DEBUG_INST:
Bharat Bhushanb12c7842013-07-04 12:27:45 +05301543 val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG);
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001544 break;
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001545 case KVM_REG_PPC_VRSAVE:
1546 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1547 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001548 default:
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301549 r = kvmppc_ops->get_one_reg(vcpu, reg->id, &val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001550 break;
1551 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001552
1553 if (r)
1554 return r;
1555
1556 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1557 r = -EFAULT;
1558
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001559 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001560}
1561
1562int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1563{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001564 int r = 0;
1565 union kvmppc_one_reg val;
1566 int size;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001567
1568 size = one_reg_size(reg->id);
1569 if (size > sizeof(val))
1570 return -EINVAL;
1571
1572 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1573 return -EFAULT;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001574
1575 switch (reg->id) {
1576 case KVM_REG_PPC_IAC1:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301577 vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001578 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301579 case KVM_REG_PPC_IAC2:
1580 vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val);
1581 break;
1582#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1583 case KVM_REG_PPC_IAC3:
1584 vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val);
1585 break;
1586 case KVM_REG_PPC_IAC4:
1587 vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val);
1588 break;
1589#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001590 case KVM_REG_PPC_DAC1:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301591 vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val);
1592 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001593 case KVM_REG_PPC_DAC2:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301594 vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001595 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001596 case KVM_REG_PPC_EPR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001597 u32 new_epr = set_reg_val(reg->id, val);
1598 kvmppc_set_epr(vcpu, new_epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001599 break;
1600 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001601#if defined(CONFIG_64BIT)
1602 case KVM_REG_PPC_EPCR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001603 u32 new_epcr = set_reg_val(reg->id, val);
1604 kvmppc_set_epcr(vcpu, new_epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001605 break;
1606 }
1607#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001608 case KVM_REG_PPC_OR_TSR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001609 u32 tsr_bits = set_reg_val(reg->id, val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001610 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1611 break;
1612 }
1613 case KVM_REG_PPC_CLEAR_TSR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001614 u32 tsr_bits = set_reg_val(reg->id, val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001615 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1616 break;
1617 }
1618 case KVM_REG_PPC_TSR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001619 u32 tsr = set_reg_val(reg->id, val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001620 kvmppc_set_tsr(vcpu, tsr);
1621 break;
1622 }
1623 case KVM_REG_PPC_TCR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001624 u32 tcr = set_reg_val(reg->id, val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001625 kvmppc_set_tcr(vcpu, tcr);
1626 break;
1627 }
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001628 case KVM_REG_PPC_VRSAVE:
1629 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1630 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001631 default:
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301632 r = kvmppc_ops->set_one_reg(vcpu, reg->id, &val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001633 break;
1634 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001635
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001636 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001637}
1638
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001639int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1640{
1641 return -ENOTSUPP;
1642}
1643
1644int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1645{
1646 return -ENOTSUPP;
1647}
1648
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001649int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1650 struct kvm_translation *tr)
1651{
Avi Kivity98001d82010-05-13 11:05:49 +03001652 int r;
1653
Avi Kivity98001d82010-05-13 11:05:49 +03001654 r = kvmppc_core_vcpu_translate(vcpu, tr);
Avi Kivity98001d82010-05-13 11:05:49 +03001655 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001656}
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001657
Alexander Graf4e755752009-10-30 05:47:01 +00001658int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1659{
1660 return -ENOTSUPP;
1661}
1662
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001663void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1664 struct kvm_memory_slot *dont)
1665{
1666}
1667
1668int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1669 unsigned long npages)
1670{
1671 return 0;
1672}
1673
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001674int kvmppc_core_prepare_memory_region(struct kvm *kvm,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001675 struct kvm_memory_slot *memslot,
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001676 struct kvm_userspace_memory_region *mem)
1677{
1678 return 0;
1679}
1680
1681void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001682 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001683 const struct kvm_memory_slot *old)
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001684{
1685}
1686
1687void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001688{
1689}
1690
Mihai Caraman38f98822012-10-11 06:13:27 +00001691void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1692{
1693#if defined(CONFIG_64BIT)
1694 vcpu->arch.epcr = new_epcr;
1695#ifdef CONFIG_KVM_BOOKE_HV
1696 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1697 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1698 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1699#endif
1700#endif
1701}
1702
Scott Wooddfd4d472011-11-17 12:39:59 +00001703void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1704{
1705 vcpu->arch.tcr = new_tcr;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001706 arm_next_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +00001707 update_timer_ints(vcpu);
1708}
1709
1710void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1711{
1712 set_bits(tsr_bits, &vcpu->arch.tsr);
1713 smp_wmb();
1714 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1715 kvm_vcpu_kick(vcpu);
1716}
1717
1718void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1719{
1720 clear_bits(tsr_bits, &vcpu->arch.tsr);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001721
1722 /*
1723 * We may have stopped the watchdog due to
1724 * being stuck on final expiration.
1725 */
1726 if (tsr_bits & (TSR_ENW | TSR_WIS))
1727 arm_next_watchdog(vcpu);
1728
Scott Wooddfd4d472011-11-17 12:39:59 +00001729 update_timer_ints(vcpu);
1730}
1731
1732void kvmppc_decrementer_func(unsigned long data)
1733{
1734 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1735
Bharat Bhushan21bd0002012-05-20 23:21:23 +00001736 if (vcpu->arch.tcr & TCR_ARE) {
1737 vcpu->arch.dec = vcpu->arch.decar;
1738 kvmppc_emulate_dec(vcpu);
1739 }
1740
Scott Wooddfd4d472011-11-17 12:39:59 +00001741 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1742}
1743
Bharat Bhushance11e482013-07-04 12:27:47 +05301744static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1745 uint64_t addr, int index)
1746{
1747 switch (index) {
1748 case 0:
1749 dbg_reg->dbcr0 |= DBCR0_IAC1;
1750 dbg_reg->iac1 = addr;
1751 break;
1752 case 1:
1753 dbg_reg->dbcr0 |= DBCR0_IAC2;
1754 dbg_reg->iac2 = addr;
1755 break;
1756#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1757 case 2:
1758 dbg_reg->dbcr0 |= DBCR0_IAC3;
1759 dbg_reg->iac3 = addr;
1760 break;
1761 case 3:
1762 dbg_reg->dbcr0 |= DBCR0_IAC4;
1763 dbg_reg->iac4 = addr;
1764 break;
1765#endif
1766 default:
1767 return -EINVAL;
1768 }
1769
1770 dbg_reg->dbcr0 |= DBCR0_IDM;
1771 return 0;
1772}
1773
1774static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1775 int type, int index)
1776{
1777 switch (index) {
1778 case 0:
1779 if (type & KVMPPC_DEBUG_WATCH_READ)
1780 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1781 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1782 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1783 dbg_reg->dac1 = addr;
1784 break;
1785 case 1:
1786 if (type & KVMPPC_DEBUG_WATCH_READ)
1787 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1788 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1789 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1790 dbg_reg->dac2 = addr;
1791 break;
1792 default:
1793 return -EINVAL;
1794 }
1795
1796 dbg_reg->dbcr0 |= DBCR0_IDM;
1797 return 0;
1798}
1799void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1800{
1801 /* XXX: Add similar MSR protection for BookE-PR */
1802#ifdef CONFIG_KVM_BOOKE_HV
1803 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1804 if (set) {
1805 if (prot_bitmap & MSR_UCLE)
1806 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1807 if (prot_bitmap & MSR_DE)
1808 vcpu->arch.shadow_msrp |= MSRP_DEP;
1809 if (prot_bitmap & MSR_PMM)
1810 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1811 } else {
1812 if (prot_bitmap & MSR_UCLE)
1813 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1814 if (prot_bitmap & MSR_DE)
1815 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1816 if (prot_bitmap & MSR_PMM)
1817 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1818 }
1819#endif
1820}
1821
1822int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1823 struct kvm_guest_debug *dbg)
1824{
1825 struct debug_reg *dbg_reg;
1826 int n, b = 0, w = 0;
1827
1828 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
1829 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1830 vcpu->guest_debug = 0;
1831 kvm_guest_protect_msr(vcpu, MSR_DE, false);
1832 return 0;
1833 }
1834
1835 kvm_guest_protect_msr(vcpu, MSR_DE, true);
1836 vcpu->guest_debug = dbg->control;
1837 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1838 /* Set DBCR0_EDM in guest visible DBCR0 register. */
1839 vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM;
1840
1841 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1842 vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1843
1844 /* Code below handles only HW breakpoints */
1845 dbg_reg = &(vcpu->arch.shadow_dbg_reg);
1846
1847#ifdef CONFIG_KVM_BOOKE_HV
1848 /*
1849 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
1850 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
1851 */
1852 dbg_reg->dbcr1 = 0;
1853 dbg_reg->dbcr2 = 0;
1854#else
1855 /*
1856 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
1857 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
1858 * is set.
1859 */
1860 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
1861 DBCR1_IAC4US;
1862 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
1863#endif
1864
1865 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1866 return 0;
1867
1868 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
1869 uint64_t addr = dbg->arch.bp[n].addr;
1870 uint32_t type = dbg->arch.bp[n].type;
1871
1872 if (type == KVMPPC_DEBUG_NONE)
1873 continue;
1874
1875 if (type & !(KVMPPC_DEBUG_WATCH_READ |
1876 KVMPPC_DEBUG_WATCH_WRITE |
1877 KVMPPC_DEBUG_BREAKPOINT))
1878 return -EINVAL;
1879
1880 if (type & KVMPPC_DEBUG_BREAKPOINT) {
1881 /* Setting H/W breakpoint */
1882 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
1883 return -EINVAL;
1884 } else {
1885 /* Setting H/W watchpoint */
1886 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
1887 type, w++))
1888 return -EINVAL;
1889 }
1890 }
1891
1892 return 0;
1893}
1894
Scott Wood94fa9d92011-12-20 15:34:22 +00001895void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1896{
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00001897 vcpu->cpu = smp_processor_id();
Scott Woodd30f6e42011-12-20 15:34:43 +00001898 current->thread.kvm_vcpu = vcpu;
Scott Wood94fa9d92011-12-20 15:34:22 +00001899}
1900
1901void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1902{
Scott Woodd30f6e42011-12-20 15:34:43 +00001903 current->thread.kvm_vcpu = NULL;
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00001904 vcpu->cpu = -1;
Bharat Bhushance11e482013-07-04 12:27:47 +05301905
1906 /* Clear pending debug event in DBSR */
1907 kvmppc_clear_dbsr();
Scott Wood94fa9d92011-12-20 15:34:22 +00001908}
1909
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301910void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
1911{
1912 kvmppc_ops->mmu_destroy(vcpu);
1913}
1914
1915int kvmppc_core_init_vm(struct kvm *kvm)
1916{
1917 return kvmppc_ops->init_vm(kvm);
1918}
1919
1920struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1921{
1922 return kvmppc_ops->vcpu_create(kvm, id);
1923}
1924
1925void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1926{
1927 kvmppc_ops->vcpu_free(vcpu);
1928}
1929
1930void kvmppc_core_destroy_vm(struct kvm *kvm)
1931{
1932 kvmppc_ops->destroy_vm(kvm);
1933}
1934
1935void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1936{
1937 kvmppc_ops->vcpu_load(vcpu, cpu);
1938}
1939
1940void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
1941{
1942 kvmppc_ops->vcpu_put(vcpu);
1943}
1944
Stephen Rothwell2986b8c2009-06-02 11:46:14 +10001945int __init kvmppc_booke_init(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001946{
Scott Woodd30f6e42011-12-20 15:34:43 +00001947#ifndef CONFIG_KVM_BOOKE_HV
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001948 unsigned long ivor[16];
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001949 unsigned long *handler = kvmppc_booke_handler_addr;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001950 unsigned long max_ivor = 0;
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001951 unsigned long handler_len;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001952 int i;
1953
1954 /* We install our own exception handlers by hijacking IVPR. IVPR must
1955 * be 16-bit aligned, so we need a 64KB allocation. */
1956 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
1957 VCPU_SIZE_ORDER);
1958 if (!kvmppc_booke_handlers)
1959 return -ENOMEM;
1960
1961 /* XXX make sure our handlers are smaller than Linux's */
1962
1963 /* Copy our interrupt handlers to match host IVORs. That way we don't
1964 * have to swap the IVORs on every guest/host transition. */
1965 ivor[0] = mfspr(SPRN_IVOR0);
1966 ivor[1] = mfspr(SPRN_IVOR1);
1967 ivor[2] = mfspr(SPRN_IVOR2);
1968 ivor[3] = mfspr(SPRN_IVOR3);
1969 ivor[4] = mfspr(SPRN_IVOR4);
1970 ivor[5] = mfspr(SPRN_IVOR5);
1971 ivor[6] = mfspr(SPRN_IVOR6);
1972 ivor[7] = mfspr(SPRN_IVOR7);
1973 ivor[8] = mfspr(SPRN_IVOR8);
1974 ivor[9] = mfspr(SPRN_IVOR9);
1975 ivor[10] = mfspr(SPRN_IVOR10);
1976 ivor[11] = mfspr(SPRN_IVOR11);
1977 ivor[12] = mfspr(SPRN_IVOR12);
1978 ivor[13] = mfspr(SPRN_IVOR13);
1979 ivor[14] = mfspr(SPRN_IVOR14);
1980 ivor[15] = mfspr(SPRN_IVOR15);
1981
1982 for (i = 0; i < 16; i++) {
1983 if (ivor[i] > max_ivor)
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001984 max_ivor = i;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001985
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001986 handler_len = handler[i + 1] - handler[i];
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001987 memcpy((void *)kvmppc_booke_handlers + ivor[i],
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001988 (void *)handler[i], handler_len);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001989 }
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001990
1991 handler_len = handler[max_ivor + 1] - handler[max_ivor];
1992 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
1993 ivor[max_ivor] + handler_len);
Scott Woodd30f6e42011-12-20 15:34:43 +00001994#endif /* !BOOKE_HV */
Hollis Blancharddb93f572008-11-05 09:36:18 -06001995 return 0;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001996}
1997
Hollis Blancharddb93f572008-11-05 09:36:18 -06001998void __exit kvmppc_booke_exit(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001999{
2000 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
2001 kvm_exit();
2002}