blob: 959aae96469cb3ed04bfb966ad82dcd4f8c4a521 [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
Scott Wood4cd35f62011-06-14 18:34:31 -050016 * Copyright 2010-2011 Freescale Semiconductor, Inc.
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050017 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Scott Woodd30f6e42011-12-20 15:34:43 +000020 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050022 */
23
24#include <linux/errno.h>
25#include <linux/err.h>
26#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/gfp.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050028#include <linux/module.h>
29#include <linux/vmalloc.h>
30#include <linux/fs.h>
Hollis Blanchard7924bd42008-12-02 15:51:55 -060031
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050032#include <asm/cputable.h>
33#include <asm/uaccess.h>
34#include <asm/kvm_ppc.h>
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060035#include <asm/cacheflush.h>
Scott Woodd30f6e42011-12-20 15:34:43 +000036#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050039
Scott Woodd30f6e42011-12-20 15:34:43 +000040#include "timing.h"
Hollis Blanchard75f74f02008-11-05 09:36:16 -060041#include "booke.h"
Alexander Graf97c95052012-08-02 15:10:00 +020042#include "trace.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050043
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060044unsigned long kvmppc_booke_handlers;
45
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050046#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
47#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
48
49struct kvm_stats_debugfs_item debugfs_entries[] = {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050050 { "mmio", VCPU_STAT(mmio_exits) },
51 { "dcr", VCPU_STAT(dcr_exits) },
52 { "sig", VCPU_STAT(signal_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050053 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
54 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
55 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
56 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
57 { "sysc", VCPU_STAT(syscall_exits) },
58 { "isi", VCPU_STAT(isi_exits) },
59 { "dsi", VCPU_STAT(dsi_exits) },
60 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
61 { "dec", VCPU_STAT(dec_exits) },
62 { "ext_intr", VCPU_STAT(ext_intr_exits) },
Hollis Blanchard45c5eb62008-04-25 17:55:49 -050063 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Scott Woodd30f6e42011-12-20 15:34:43 +000064 { "doorbell", VCPU_STAT(dbell_exits) },
65 { "guest doorbell", VCPU_STAT(gdbell_exits) },
Alexander Grafcf1c5ca2012-08-01 12:56:51 +020066 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050067 { NULL }
68};
69
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050070/* TODO: use vcpu_printf() */
71void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
72{
73 int i;
74
Alexander Graf666e7252010-07-29 14:47:43 +020075 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060076 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
Alexander Grafde7906c2010-07-29 14:47:46 +020077 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
78 vcpu->arch.shared->srr1);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050079
80 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
81
82 for (i = 0; i < 32; i += 4) {
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060083 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
Alexander Graf8e5b26b2010-01-08 02:58:01 +010084 kvmppc_get_gpr(vcpu, i),
85 kvmppc_get_gpr(vcpu, i+1),
86 kvmppc_get_gpr(vcpu, i+2),
87 kvmppc_get_gpr(vcpu, i+3));
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050088 }
89}
90
Scott Wood4cd35f62011-06-14 18:34:31 -050091#ifdef CONFIG_SPE
92void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
93{
94 preempt_disable();
95 enable_kernel_spe();
96 kvmppc_save_guest_spe(vcpu);
97 vcpu->arch.shadow_msr &= ~MSR_SPE;
98 preempt_enable();
99}
100
101static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
102{
103 preempt_disable();
104 enable_kernel_spe();
105 kvmppc_load_guest_spe(vcpu);
106 vcpu->arch.shadow_msr |= MSR_SPE;
107 preempt_enable();
108}
109
110static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
111{
112 if (vcpu->arch.shared->msr & MSR_SPE) {
113 if (!(vcpu->arch.shadow_msr & MSR_SPE))
114 kvmppc_vcpu_enable_spe(vcpu);
115 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
116 kvmppc_vcpu_disable_spe(vcpu);
117 }
118}
119#else
120static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
121{
122}
123#endif
124
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500125/*
126 * Helper function for "full" MSR writes. No need to call this if only
127 * EE/CE/ME/DE/RI are changing.
128 */
Scott Wood4cd35f62011-06-14 18:34:31 -0500129void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
130{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500131 u32 old_msr = vcpu->arch.shared->msr;
Scott Wood4cd35f62011-06-14 18:34:31 -0500132
Scott Woodd30f6e42011-12-20 15:34:43 +0000133#ifdef CONFIG_KVM_BOOKE_HV
134 new_msr |= MSR_GS;
135#endif
136
Scott Wood4cd35f62011-06-14 18:34:31 -0500137 vcpu->arch.shared->msr = new_msr;
138
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500139 kvmppc_mmu_msr_notify(vcpu, old_msr);
Scott Wood4cd35f62011-06-14 18:34:31 -0500140 kvmppc_vcpu_sync_spe(vcpu);
141}
142
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600143static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
144 unsigned int priority)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600145{
Alexander Graf63460462012-08-08 00:44:52 +0200146 trace_kvm_booke_queue_irqprio(vcpu, priority);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600147 set_bit(priority, &vcpu->arch.pending_exceptions);
148}
149
Liu Yudaf5e272010-02-02 19:44:35 +0800150static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
151 ulong dear_flags, ulong esr_flags)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600152{
Liu Yudaf5e272010-02-02 19:44:35 +0800153 vcpu->arch.queued_dear = dear_flags;
154 vcpu->arch.queued_esr = esr_flags;
155 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
156}
157
158static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
159 ulong dear_flags, ulong esr_flags)
160{
161 vcpu->arch.queued_dear = dear_flags;
162 vcpu->arch.queued_esr = esr_flags;
163 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
164}
165
166static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
167 ulong esr_flags)
168{
169 vcpu->arch.queued_esr = esr_flags;
170 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
171}
172
173void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
174{
175 vcpu->arch.queued_esr = esr_flags;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600176 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600177}
178
179void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
180{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600181 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600182}
183
184int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
185{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600186 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600187}
188
Alexander Graf7706664d2009-12-21 20:21:24 +0100189void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
190{
191 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
192}
193
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600194void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
195 struct kvm_interrupt *irq)
196{
Alexander Grafc5335f12010-08-30 14:03:24 +0200197 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
198
199 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
200 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
201
202 kvmppc_booke_queue_irqprio(vcpu, prio);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600203}
204
Alexander Graf4496f972010-04-07 10:03:25 +0200205void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
206 struct kvm_interrupt *irq)
207{
208 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
Alexander Grafc5335f12010-08-30 14:03:24 +0200209 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
Alexander Graf4496f972010-04-07 10:03:25 +0200210}
211
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000212static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
213{
214 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
215}
216
217static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
218{
219 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
220}
221
Scott Woodd30f6e42011-12-20 15:34:43 +0000222static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
223{
224#ifdef CONFIG_KVM_BOOKE_HV
225 mtspr(SPRN_GSRR0, srr0);
226 mtspr(SPRN_GSRR1, srr1);
227#else
228 vcpu->arch.shared->srr0 = srr0;
229 vcpu->arch.shared->srr1 = srr1;
230#endif
231}
232
233static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
234{
235 vcpu->arch.csrr0 = srr0;
236 vcpu->arch.csrr1 = srr1;
237}
238
239static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
240{
241 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
242 vcpu->arch.dsrr0 = srr0;
243 vcpu->arch.dsrr1 = srr1;
244 } else {
245 set_guest_csrr(vcpu, srr0, srr1);
246 }
247}
248
249static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
250{
251 vcpu->arch.mcsrr0 = srr0;
252 vcpu->arch.mcsrr1 = srr1;
253}
254
255static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
256{
257#ifdef CONFIG_KVM_BOOKE_HV
258 return mfspr(SPRN_GDEAR);
259#else
260 return vcpu->arch.shared->dar;
261#endif
262}
263
264static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
265{
266#ifdef CONFIG_KVM_BOOKE_HV
267 mtspr(SPRN_GDEAR, dear);
268#else
269 vcpu->arch.shared->dar = dear;
270#endif
271}
272
273static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
274{
275#ifdef CONFIG_KVM_BOOKE_HV
276 return mfspr(SPRN_GESR);
277#else
278 return vcpu->arch.shared->esr;
279#endif
280}
281
282static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
283{
284#ifdef CONFIG_KVM_BOOKE_HV
285 mtspr(SPRN_GESR, esr);
286#else
287 vcpu->arch.shared->esr = esr;
288#endif
289}
290
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600291/* Deliver the interrupt of the corresponding priority, if possible. */
292static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
293 unsigned int priority)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500294{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600295 int allowed = 0;
Alexander Graf79300f82012-02-15 19:12:29 +0000296 ulong msr_mask = 0;
Liu Yudaf5e272010-02-02 19:44:35 +0800297 bool update_esr = false, update_dear = false;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200298 ulong crit_raw = vcpu->arch.shared->critical;
299 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
300 bool crit;
Alexander Grafc5335f12010-08-30 14:03:24 +0200301 bool keep_irq = false;
Scott Woodd30f6e42011-12-20 15:34:43 +0000302 enum int_class int_class;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200303
304 /* Truncate crit indicators in 32 bit mode */
305 if (!(vcpu->arch.shared->msr & MSR_SF)) {
306 crit_raw &= 0xffffffff;
307 crit_r1 &= 0xffffffff;
308 }
309
310 /* Critical section when crit == r1 */
311 crit = (crit_raw == crit_r1);
312 /* ... and we're in supervisor mode */
313 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500314
Alexander Grafc5335f12010-08-30 14:03:24 +0200315 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
316 priority = BOOKE_IRQPRIO_EXTERNAL;
317 keep_irq = true;
318 }
319
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600320 switch (priority) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600321 case BOOKE_IRQPRIO_DTLB_MISS:
Liu Yudaf5e272010-02-02 19:44:35 +0800322 case BOOKE_IRQPRIO_DATA_STORAGE:
323 update_dear = true;
324 /* fall through */
325 case BOOKE_IRQPRIO_INST_STORAGE:
326 case BOOKE_IRQPRIO_PROGRAM:
327 update_esr = true;
328 /* fall through */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600329 case BOOKE_IRQPRIO_ITLB_MISS:
330 case BOOKE_IRQPRIO_SYSCALL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600331 case BOOKE_IRQPRIO_FP_UNAVAIL:
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600332 case BOOKE_IRQPRIO_SPE_UNAVAIL:
333 case BOOKE_IRQPRIO_SPE_FP_DATA:
334 case BOOKE_IRQPRIO_SPE_FP_ROUND:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600335 case BOOKE_IRQPRIO_AP_UNAVAIL:
336 case BOOKE_IRQPRIO_ALIGNMENT:
337 allowed = 1;
Alexander Graf79300f82012-02-15 19:12:29 +0000338 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000339 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500340 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000341 case BOOKE_IRQPRIO_WATCHDOG:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600342 case BOOKE_IRQPRIO_CRITICAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000343 case BOOKE_IRQPRIO_DBELL_CRIT:
Alexander Graf666e7252010-07-29 14:47:43 +0200344 allowed = vcpu->arch.shared->msr & MSR_CE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000345 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000346 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000347 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500348 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600349 case BOOKE_IRQPRIO_MACHINE_CHECK:
Alexander Graf666e7252010-07-29 14:47:43 +0200350 allowed = vcpu->arch.shared->msr & MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000351 allowed = allowed && !crit;
Scott Woodd30f6e42011-12-20 15:34:43 +0000352 int_class = INT_CLASS_MC;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500353 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600354 case BOOKE_IRQPRIO_DECREMENTER:
355 case BOOKE_IRQPRIO_FIT:
Scott Wooddfd4d472011-11-17 12:39:59 +0000356 keep_irq = true;
357 /* fall through */
358 case BOOKE_IRQPRIO_EXTERNAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000359 case BOOKE_IRQPRIO_DBELL:
Alexander Graf666e7252010-07-29 14:47:43 +0200360 allowed = vcpu->arch.shared->msr & MSR_EE;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200361 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000362 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000363 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500364 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600365 case BOOKE_IRQPRIO_DEBUG:
Alexander Graf666e7252010-07-29 14:47:43 +0200366 allowed = vcpu->arch.shared->msr & MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000367 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000368 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000369 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500370 break;
371 }
372
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600373 if (allowed) {
Scott Woodd30f6e42011-12-20 15:34:43 +0000374 switch (int_class) {
375 case INT_CLASS_NONCRIT:
376 set_guest_srr(vcpu, vcpu->arch.pc,
377 vcpu->arch.shared->msr);
378 break;
379 case INT_CLASS_CRIT:
380 set_guest_csrr(vcpu, vcpu->arch.pc,
381 vcpu->arch.shared->msr);
382 break;
383 case INT_CLASS_DBG:
384 set_guest_dsrr(vcpu, vcpu->arch.pc,
385 vcpu->arch.shared->msr);
386 break;
387 case INT_CLASS_MC:
388 set_guest_mcsrr(vcpu, vcpu->arch.pc,
389 vcpu->arch.shared->msr);
390 break;
391 }
392
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600393 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
Liu Yudaf5e272010-02-02 19:44:35 +0800394 if (update_esr == true)
Scott Woodd30f6e42011-12-20 15:34:43 +0000395 set_guest_esr(vcpu, vcpu->arch.queued_esr);
Liu Yudaf5e272010-02-02 19:44:35 +0800396 if (update_dear == true)
Scott Woodd30f6e42011-12-20 15:34:43 +0000397 set_guest_dear(vcpu, vcpu->arch.queued_dear);
Alexander Graf666e7252010-07-29 14:47:43 +0200398 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600399
Alexander Grafc5335f12010-08-30 14:03:24 +0200400 if (!keep_irq)
401 clear_bit(priority, &vcpu->arch.pending_exceptions);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600402 }
403
Scott Woodd30f6e42011-12-20 15:34:43 +0000404#ifdef CONFIG_KVM_BOOKE_HV
405 /*
406 * If an interrupt is pending but masked, raise a guest doorbell
407 * so that we are notified when the guest enables the relevant
408 * MSR bit.
409 */
410 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
411 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
412 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
413 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
414 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
415 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
416#endif
417
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600418 return allowed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500419}
420
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000421/*
422 * Return the number of jiffies until the next timeout. If the timeout is
423 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
424 * because the larger value can break the timer APIs.
425 */
426static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
427{
428 u64 tb, wdt_tb, wdt_ticks = 0;
429 u64 nr_jiffies = 0;
430 u32 period = TCR_GET_WP(vcpu->arch.tcr);
431
432 wdt_tb = 1ULL << (63 - period);
433 tb = get_tb();
434 /*
435 * The watchdog timeout will hapeen when TB bit corresponding
436 * to watchdog will toggle from 0 to 1.
437 */
438 if (tb & wdt_tb)
439 wdt_ticks = wdt_tb;
440
441 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
442
443 /* Convert timebase ticks to jiffies */
444 nr_jiffies = wdt_ticks;
445
446 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
447 nr_jiffies++;
448
449 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
450}
451
452static void arm_next_watchdog(struct kvm_vcpu *vcpu)
453{
454 unsigned long nr_jiffies;
455 unsigned long flags;
456
457 /*
458 * If TSR_ENW and TSR_WIS are not set then no need to exit to
459 * userspace, so clear the KVM_REQ_WATCHDOG request.
460 */
461 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
462 clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
463
464 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
465 nr_jiffies = watchdog_next_timeout(vcpu);
466 /*
467 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
468 * then do not run the watchdog timer as this can break timer APIs.
469 */
470 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
471 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
472 else
473 del_timer(&vcpu->arch.wdt_timer);
474 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
475}
476
477void kvmppc_watchdog_func(unsigned long data)
478{
479 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
480 u32 tsr, new_tsr;
481 int final;
482
483 do {
484 new_tsr = tsr = vcpu->arch.tsr;
485 final = 0;
486
487 /* Time out event */
488 if (tsr & TSR_ENW) {
489 if (tsr & TSR_WIS)
490 final = 1;
491 else
492 new_tsr = tsr | TSR_WIS;
493 } else {
494 new_tsr = tsr | TSR_ENW;
495 }
496 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
497
498 if (new_tsr & TSR_WIS) {
499 smp_wmb();
500 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
501 kvm_vcpu_kick(vcpu);
502 }
503
504 /*
505 * If this is final watchdog expiry and some action is required
506 * then exit to userspace.
507 */
508 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
509 vcpu->arch.watchdog_enabled) {
510 smp_wmb();
511 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
512 kvm_vcpu_kick(vcpu);
513 }
514
515 /*
516 * Stop running the watchdog timer after final expiration to
517 * prevent the host from being flooded with timers if the
518 * guest sets a short period.
519 * Timers will resume when TSR/TCR is updated next time.
520 */
521 if (!final)
522 arm_next_watchdog(vcpu);
523}
524
Scott Wooddfd4d472011-11-17 12:39:59 +0000525static void update_timer_ints(struct kvm_vcpu *vcpu)
526{
527 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
528 kvmppc_core_queue_dec(vcpu);
529 else
530 kvmppc_core_dequeue_dec(vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000531
532 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
533 kvmppc_core_queue_watchdog(vcpu);
534 else
535 kvmppc_core_dequeue_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +0000536}
537
Scott Woodc59a6a32011-11-08 18:23:25 -0600538static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500539{
540 unsigned long *pending = &vcpu->arch.pending_exceptions;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500541 unsigned int priority;
542
Hollis Blanchard9ab80842008-11-05 09:36:22 -0600543 priority = __ffs(*pending);
Alexander Graf8b3a00f2012-02-16 14:12:46 +0000544 while (priority < BOOKE_IRQPRIO_MAX) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600545 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500546 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500547
548 priority = find_next_bit(pending,
549 BITS_PER_BYTE * sizeof(*pending),
550 priority + 1);
551 }
Alexander Graf90bba352010-07-29 14:47:51 +0200552
553 /* Tell the guest about our interrupt status */
Scott Wood29ac26e2011-11-08 18:23:27 -0600554 vcpu->arch.shared->int_pending = !!*pending;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500555}
556
Scott Woodc59a6a32011-11-08 18:23:25 -0600557/* Check pending exceptions and deliver one, if possible. */
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000558int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
Scott Woodc59a6a32011-11-08 18:23:25 -0600559{
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000560 int r = 0;
Scott Woodc59a6a32011-11-08 18:23:25 -0600561 WARN_ON_ONCE(!irqs_disabled());
562
563 kvmppc_core_check_exceptions(vcpu);
564
565 if (vcpu->arch.shared->msr & MSR_WE) {
566 local_irq_enable();
567 kvm_vcpu_block(vcpu);
Alexander Graf966cd0f2012-03-14 16:55:08 +0100568 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Scott Woodc59a6a32011-11-08 18:23:25 -0600569 local_irq_disable();
570
571 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000572 r = 1;
Scott Woodc59a6a32011-11-08 18:23:25 -0600573 };
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000574
575 return r;
576}
577
Alexander Graf7c973a22012-08-13 12:50:35 +0200578int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
Alexander Graf4ffc6352012-08-08 20:31:13 +0200579{
Alexander Graf7c973a22012-08-13 12:50:35 +0200580 int r = 1; /* Indicate we want to get back into the guest */
581
Alexander Graf2d8185d2012-08-10 12:31:12 +0200582 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
583 update_timer_ints(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200584#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Alexander Graf2d8185d2012-08-10 12:31:12 +0200585 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
586 kvmppc_core_flush_tlb(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200587#endif
Alexander Graf7c973a22012-08-13 12:50:35 +0200588
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000589 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
590 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
591 r = 0;
592 }
593
Alexander Graf7c973a22012-08-13 12:50:35 +0200594 return r;
Alexander Graf4ffc6352012-08-08 20:31:13 +0200595}
596
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000597int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
598{
Alexander Graf7ee78852012-08-13 12:44:41 +0200599 int ret, s;
Scott Wood8fae8452011-12-20 15:34:45 +0000600#ifdef CONFIG_PPC_FPU
601 unsigned int fpscr;
602 int fpexc_mode;
603 u64 fpr[32];
604#endif
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000605
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200606 if (!vcpu->arch.sane) {
607 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
608 return -EINVAL;
609 }
610
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000611 local_irq_disable();
Alexander Graf7ee78852012-08-13 12:44:41 +0200612 s = kvmppc_prepare_to_enter(vcpu);
613 if (s <= 0) {
Alexander Graf24afa372012-08-12 12:42:30 +0200614 local_irq_enable();
Alexander Graf7ee78852012-08-13 12:44:41 +0200615 ret = s;
Scott Wood1d1ef222011-11-08 16:11:59 -0600616 goto out;
617 }
Alexander Grafbd2be682012-08-13 01:04:19 +0200618 kvmppc_lazy_ee_enable();
Scott Wood1d1ef222011-11-08 16:11:59 -0600619
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000620 kvm_guest_enter();
Scott Wood8fae8452011-12-20 15:34:45 +0000621
622#ifdef CONFIG_PPC_FPU
623 /* Save userspace FPU state in stack */
624 enable_kernel_fp();
625 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
626 fpscr = current->thread.fpscr.val;
627 fpexc_mode = current->thread.fpexc_mode;
628
629 /* Restore guest FPU state to thread */
630 memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
631 current->thread.fpscr.val = vcpu->arch.fpscr;
632
633 /*
634 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
635 * as always using the FPU. Kernel usage of FP (via
636 * enable_kernel_fp()) in this thread must not occur while
637 * vcpu->fpu_active is set.
638 */
639 vcpu->fpu_active = 1;
640
641 kvmppc_load_guest_fp(vcpu);
642#endif
643
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000644 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000645
Alexander Graf24afa372012-08-12 12:42:30 +0200646 /* No need for kvm_guest_exit. It's done in handle_exit.
647 We also get here with interrupts enabled. */
648
Scott Wood8fae8452011-12-20 15:34:45 +0000649#ifdef CONFIG_PPC_FPU
650 kvmppc_save_guest_fp(vcpu);
651
652 vcpu->fpu_active = 0;
653
654 /* Save guest FPU state from thread */
655 memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
656 vcpu->arch.fpscr = current->thread.fpscr.val;
657
658 /* Restore userspace FPU state from stack */
659 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
660 current->thread.fpscr.val = fpscr;
661 current->thread.fpexc_mode = fpexc_mode;
662#endif
663
Scott Wood1d1ef222011-11-08 16:11:59 -0600664out:
Alexander Grafd69c6432012-08-08 20:44:20 +0200665 vcpu->mode = OUTSIDE_GUEST_MODE;
666 smp_wmb();
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000667 return ret;
668}
669
Scott Woodd30f6e42011-12-20 15:34:43 +0000670static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
671{
672 enum emulation_result er;
673
674 er = kvmppc_emulate_instruction(run, vcpu);
675 switch (er) {
676 case EMULATE_DONE:
677 /* don't overwrite subtypes, just account kvm_stats */
678 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
679 /* Future optimization: only reload non-volatiles if
680 * they were actually modified by emulation. */
681 return RESUME_GUEST_NV;
682
683 case EMULATE_DO_DCR:
684 run->exit_reason = KVM_EXIT_DCR;
685 return RESUME_HOST;
686
687 case EMULATE_FAIL:
Scott Woodd30f6e42011-12-20 15:34:43 +0000688 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
689 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
690 /* For debugging, encode the failing instruction and
691 * report it to userspace. */
692 run->hw.hardware_exit_reason = ~0ULL << 32;
693 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
Alexander Grafd1ff5492012-02-16 13:24:03 +0000694 kvmppc_core_queue_program(vcpu, ESR_PIL);
Scott Woodd30f6e42011-12-20 15:34:43 +0000695 return RESUME_HOST;
696
697 default:
698 BUG();
699 }
700}
701
Alexander Graf4e642cc2012-02-20 23:57:26 +0100702static void kvmppc_fill_pt_regs(struct pt_regs *regs)
703{
704 ulong r1, ip, msr, lr;
705
706 asm("mr %0, 1" : "=r"(r1));
707 asm("mflr %0" : "=r"(lr));
708 asm("mfmsr %0" : "=r"(msr));
709 asm("bl 1f; 1: mflr %0" : "=r"(ip));
710
711 memset(regs, 0, sizeof(*regs));
712 regs->gpr[1] = r1;
713 regs->nip = ip;
714 regs->msr = msr;
715 regs->link = lr;
716}
717
Bharat Bhushan6328e592012-06-20 05:56:53 +0000718/*
719 * For interrupts needed to be handled by host interrupt handlers,
720 * corresponding host handler are called from here in similar way
721 * (but not exact) as they are called from low level handler
722 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
723 */
Alexander Graf4e642cc2012-02-20 23:57:26 +0100724static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
725 unsigned int exit_nr)
726{
727 struct pt_regs regs;
728
729 switch (exit_nr) {
730 case BOOKE_INTERRUPT_EXTERNAL:
731 kvmppc_fill_pt_regs(&regs);
732 do_IRQ(&regs);
733 break;
734 case BOOKE_INTERRUPT_DECREMENTER:
735 kvmppc_fill_pt_regs(&regs);
736 timer_interrupt(&regs);
737 break;
738#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
739 case BOOKE_INTERRUPT_DOORBELL:
740 kvmppc_fill_pt_regs(&regs);
741 doorbell_exception(&regs);
742 break;
743#endif
744 case BOOKE_INTERRUPT_MACHINE_CHECK:
745 /* FIXME */
746 break;
Alexander Graf7cc1e8e2012-02-22 16:26:34 +0100747 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
748 kvmppc_fill_pt_regs(&regs);
749 performance_monitor_exception(&regs);
750 break;
Bharat Bhushan6328e592012-06-20 05:56:53 +0000751 case BOOKE_INTERRUPT_WATCHDOG:
752 kvmppc_fill_pt_regs(&regs);
753#ifdef CONFIG_BOOKE_WDT
754 WatchdogException(&regs);
755#else
756 unknown_exception(&regs);
757#endif
758 break;
759 case BOOKE_INTERRUPT_CRITICAL:
760 unknown_exception(&regs);
761 break;
Alexander Graf4e642cc2012-02-20 23:57:26 +0100762 }
763}
764
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500765/**
766 * kvmppc_handle_exit
767 *
768 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
769 */
770int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
771 unsigned int exit_nr)
772{
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500773 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200774 int s;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500775
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600776 /* update before a new last_exit_type is rewritten */
777 kvmppc_update_timing_stats(vcpu);
778
Alexander Graf4e642cc2012-02-20 23:57:26 +0100779 /* restart interrupts if they were meant for the host */
780 kvmppc_restart_interrupt(vcpu, exit_nr);
Scott Woodd30f6e42011-12-20 15:34:43 +0000781
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500782 local_irq_enable();
783
Alexander Graf97c95052012-08-02 15:10:00 +0200784 trace_kvm_exit(exit_nr, vcpu);
Alexander Graf706fb732012-08-12 11:29:09 +0200785 kvm_guest_exit();
Alexander Graf97c95052012-08-02 15:10:00 +0200786
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500787 run->exit_reason = KVM_EXIT_UNKNOWN;
788 run->ready_for_interrupt_injection = 1;
789
790 switch (exit_nr) {
791 case BOOKE_INTERRUPT_MACHINE_CHECK:
Alexander Grafc35c9d82012-02-20 12:21:18 +0100792 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
793 kvmppc_dump_vcpu(vcpu);
794 /* For debugging, send invalid exit reason to user space */
795 run->hw.hardware_exit_reason = ~1ULL << 32;
796 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
797 r = RESUME_HOST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500798 break;
799
800 case BOOKE_INTERRUPT_EXTERNAL:
Hollis Blanchard7b701592008-12-02 15:51:58 -0600801 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
Hollis Blanchard1b6766c2008-11-05 09:36:21 -0600802 r = RESUME_GUEST;
803 break;
804
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500805 case BOOKE_INTERRUPT_DECREMENTER:
Hollis Blanchard7b701592008-12-02 15:51:58 -0600806 kvmppc_account_exit(vcpu, DEC_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500807 r = RESUME_GUEST;
808 break;
809
Bharat Bhushan6328e592012-06-20 05:56:53 +0000810 case BOOKE_INTERRUPT_WATCHDOG:
811 r = RESUME_GUEST;
812 break;
813
Scott Woodd30f6e42011-12-20 15:34:43 +0000814 case BOOKE_INTERRUPT_DOORBELL:
815 kvmppc_account_exit(vcpu, DBELL_EXITS);
Scott Woodd30f6e42011-12-20 15:34:43 +0000816 r = RESUME_GUEST;
817 break;
818
819 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
820 kvmppc_account_exit(vcpu, GDBELL_EXITS);
821
822 /*
823 * We are here because there is a pending guest interrupt
824 * which could not be delivered as MSR_CE or MSR_ME was not
825 * set. Once we break from here we will retry delivery.
826 */
827 r = RESUME_GUEST;
828 break;
829
830 case BOOKE_INTERRUPT_GUEST_DBELL:
831 kvmppc_account_exit(vcpu, GDBELL_EXITS);
832
833 /*
834 * We are here because there is a pending guest interrupt
835 * which could not be delivered as MSR_EE was not set. Once
836 * we break from here we will retry delivery.
837 */
838 r = RESUME_GUEST;
839 break;
840
Alexander Graf95f2e922012-02-20 22:45:12 +0100841 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
842 r = RESUME_GUEST;
843 break;
844
Scott Woodd30f6e42011-12-20 15:34:43 +0000845 case BOOKE_INTERRUPT_HV_PRIV:
846 r = emulation_exit(run, vcpu);
847 break;
848
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500849 case BOOKE_INTERRUPT_PROGRAM:
Scott Woodd30f6e42011-12-20 15:34:43 +0000850 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
Alexander Graf02685972012-02-20 12:33:22 +0100851 /*
852 * Program traps generated by user-level software must
853 * be handled by the guest kernel.
854 *
855 * In GS mode, hypervisor privileged instructions trap
856 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
857 * actual program interrupts, handled by the guest.
858 */
Liu Yudaf5e272010-02-02 19:44:35 +0800859 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500860 r = RESUME_GUEST;
Hollis Blanchard7b701592008-12-02 15:51:58 -0600861 kvmppc_account_exit(vcpu, USR_PR_INST);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500862 break;
863 }
864
Scott Woodd30f6e42011-12-20 15:34:43 +0000865 r = emulation_exit(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500866 break;
867
Christian Ehrhardtde368dc2008-04-29 18:18:23 +0200868 case BOOKE_INTERRUPT_FP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600869 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600870 kvmppc_account_exit(vcpu, FP_UNAVAIL);
Christian Ehrhardtde368dc2008-04-29 18:18:23 +0200871 r = RESUME_GUEST;
872 break;
873
Scott Wood4cd35f62011-06-14 18:34:31 -0500874#ifdef CONFIG_SPE
875 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
876 if (vcpu->arch.shared->msr & MSR_SPE)
877 kvmppc_vcpu_enable_spe(vcpu);
878 else
879 kvmppc_booke_queue_irqprio(vcpu,
880 BOOKE_IRQPRIO_SPE_UNAVAIL);
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600881 r = RESUME_GUEST;
882 break;
Scott Wood4cd35f62011-06-14 18:34:31 -0500883 }
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600884
885 case BOOKE_INTERRUPT_SPE_FP_DATA:
886 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
887 r = RESUME_GUEST;
888 break;
889
890 case BOOKE_INTERRUPT_SPE_FP_ROUND:
891 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
892 r = RESUME_GUEST;
893 break;
Scott Wood4cd35f62011-06-14 18:34:31 -0500894#else
895 case BOOKE_INTERRUPT_SPE_UNAVAIL:
896 /*
897 * Guest wants SPE, but host kernel doesn't support it. Send
898 * an "unimplemented operation" program check to the guest.
899 */
900 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
901 r = RESUME_GUEST;
902 break;
903
904 /*
905 * These really should never happen without CONFIG_SPE,
906 * as we should never enable the real MSR[SPE] in the guest.
907 */
908 case BOOKE_INTERRUPT_SPE_FP_DATA:
909 case BOOKE_INTERRUPT_SPE_FP_ROUND:
910 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
911 __func__, exit_nr, vcpu->arch.pc);
912 run->hw.hardware_exit_reason = exit_nr;
913 r = RESUME_HOST;
914 break;
915#endif
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600916
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500917 case BOOKE_INTERRUPT_DATA_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +0800918 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
919 vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600920 kvmppc_account_exit(vcpu, DSI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500921 r = RESUME_GUEST;
922 break;
923
924 case BOOKE_INTERRUPT_INST_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +0800925 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600926 kvmppc_account_exit(vcpu, ISI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500927 r = RESUME_GUEST;
928 break;
929
Scott Woodd30f6e42011-12-20 15:34:43 +0000930#ifdef CONFIG_KVM_BOOKE_HV
931 case BOOKE_INTERRUPT_HV_SYSCALL:
932 if (!(vcpu->arch.shared->msr & MSR_PR)) {
933 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
934 } else {
935 /*
936 * hcall from guest userspace -- send privileged
937 * instruction program check.
938 */
939 kvmppc_core_queue_program(vcpu, ESR_PPR);
940 }
941
942 r = RESUME_GUEST;
943 break;
944#else
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500945 case BOOKE_INTERRUPT_SYSCALL:
Alexander Graf2a342ed2010-07-29 14:47:48 +0200946 if (!(vcpu->arch.shared->msr & MSR_PR) &&
947 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
948 /* KVM PV hypercalls */
949 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
950 r = RESUME_GUEST;
951 } else {
952 /* Guest syscalls */
953 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
954 }
Hollis Blanchard7b701592008-12-02 15:51:58 -0600955 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500956 r = RESUME_GUEST;
957 break;
Scott Woodd30f6e42011-12-20 15:34:43 +0000958#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500959
960 case BOOKE_INTERRUPT_DTLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500961 unsigned long eaddr = vcpu->arch.fault_dear;
Hollis Blanchard7924bd42008-12-02 15:51:55 -0600962 int gtlb_index;
Hollis Blanchard475e7cd2009-01-03 16:23:00 -0600963 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500964 gfn_t gfn;
965
Alexander Grafbf7ca4b2012-02-15 23:40:00 +0000966#ifdef CONFIG_KVM_E500V2
Scott Wooda4cd8b22011-06-14 18:34:41 -0500967 if (!(vcpu->arch.shared->msr & MSR_PR) &&
968 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
969 kvmppc_map_magic(vcpu);
970 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
971 r = RESUME_GUEST;
972
973 break;
974 }
975#endif
976
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500977 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -0600978 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -0600979 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500980 /* The guest didn't have a mapping for it. */
Liu Yudaf5e272010-02-02 19:44:35 +0800981 kvmppc_core_queue_dtlb_miss(vcpu,
982 vcpu->arch.fault_dear,
983 vcpu->arch.fault_esr);
Hollis Blanchardb52a6382009-01-03 16:23:11 -0600984 kvmppc_mmu_dtlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600985 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500986 r = RESUME_GUEST;
987 break;
988 }
989
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -0600990 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard475e7cd2009-01-03 16:23:00 -0600991 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500992
993 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
994 /* The guest TLB had a mapping, but the shadow TLB
995 * didn't, and it is RAM. This could be because:
996 * a) the entry is mapping the host kernel, or
997 * b) the guest used a large mapping which we're faking
998 * Either way, we need to satisfy the fault without
999 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001000 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001001 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001002 r = RESUME_GUEST;
1003 } else {
1004 /* Guest has mapped and accessed a page which is not
1005 * actually RAM. */
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001006 vcpu->arch.paddr_accessed = gpaddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +01001007 vcpu->arch.vaddr_accessed = eaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001008 r = kvmppc_emulate_mmio(run, vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001009 kvmppc_account_exit(vcpu, MMIO_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001010 }
1011
1012 break;
1013 }
1014
1015 case BOOKE_INTERRUPT_ITLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001016 unsigned long eaddr = vcpu->arch.pc;
Hollis Blanchard89168612008-12-02 15:51:53 -06001017 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001018 gfn_t gfn;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001019 int gtlb_index;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001020
1021 r = RESUME_GUEST;
1022
1023 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001024 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001025 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001026 /* The guest didn't have a mapping for it. */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001027 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001028 kvmppc_mmu_itlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001029 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001030 break;
1031 }
1032
Hollis Blanchard7b701592008-12-02 15:51:58 -06001033 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001034
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001035 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard89168612008-12-02 15:51:53 -06001036 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001037
1038 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1039 /* The guest TLB had a mapping, but the shadow TLB
1040 * didn't. This could be because:
1041 * a) the entry is mapping the host kernel, or
1042 * b) the guest used a large mapping which we're faking
1043 * Either way, we need to satisfy the fault without
1044 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001045 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001046 } else {
1047 /* Guest mapped and leaped at non-RAM! */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001048 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001049 }
1050
1051 break;
1052 }
1053
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001054 case BOOKE_INTERRUPT_DEBUG: {
1055 u32 dbsr;
1056
1057 vcpu->arch.pc = mfspr(SPRN_CSRR0);
1058
1059 /* clear IAC events in DBSR register */
1060 dbsr = mfspr(SPRN_DBSR);
1061 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
1062 mtspr(SPRN_DBSR, dbsr);
1063
1064 run->exit_reason = KVM_EXIT_DEBUG;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001065 kvmppc_account_exit(vcpu, DEBUG_EXITS);
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001066 r = RESUME_HOST;
1067 break;
1068 }
1069
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001070 default:
1071 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1072 BUG();
1073 }
1074
Alexander Grafa8e4ef82012-02-16 14:07:37 +00001075 /*
1076 * To avoid clobbering exit_reason, only check for signals if we
1077 * aren't already exiting to userspace for some other reason.
1078 */
Alexander Graf03660ba2012-02-28 12:00:41 +01001079 if (!(r & RESUME_HOST)) {
1080 local_irq_disable();
Alexander Graf7ee78852012-08-13 12:44:41 +02001081 s = kvmppc_prepare_to_enter(vcpu);
1082 if (s <= 0) {
Alexander Graf24afa372012-08-12 12:42:30 +02001083 local_irq_enable();
Alexander Graf7ee78852012-08-13 12:44:41 +02001084 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
Alexander Graf24afa372012-08-12 12:42:30 +02001085 } else {
Alexander Grafbd2be682012-08-13 01:04:19 +02001086 kvmppc_lazy_ee_enable();
Alexander Graf03660ba2012-02-28 12:00:41 +01001087 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001088 }
1089
1090 return r;
1091}
1092
1093/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
1094int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1095{
Hollis Blanchard082decf2010-08-07 10:33:56 -07001096 int i;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001097 int r;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001098
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001099 vcpu->arch.pc = 0;
Scott Woodb5904972011-11-08 18:23:30 -06001100 vcpu->arch.shared->pir = vcpu->vcpu_id;
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001101 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
Scott Woodd30f6e42011-12-20 15:34:43 +00001102 kvmppc_set_msr(vcpu, 0);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001103
Scott Woodd30f6e42011-12-20 15:34:43 +00001104#ifndef CONFIG_KVM_BOOKE_HV
1105 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001106 vcpu->arch.shadow_pid = 1;
Scott Woodd30f6e42011-12-20 15:34:43 +00001107 vcpu->arch.shared->msr = 0;
1108#endif
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001109
Hollis Blanchard082decf2010-08-07 10:33:56 -07001110 /* Eye-catching numbers so we know if the guest takes an interrupt
1111 * before it's programmed its own IVPR/IVORs. */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001112 vcpu->arch.ivpr = 0x55550000;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001113 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1114 vcpu->arch.ivor[i] = 0x7700 | i * 4;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001115
Hollis Blanchard73e75b42008-12-02 15:51:57 -06001116 kvmppc_init_timing_stats(vcpu);
1117
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001118 r = kvmppc_core_vcpu_setup(vcpu);
1119 kvmppc_sanity_check(vcpu);
1120 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001121}
1122
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001123int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1124{
1125 /* setup watchdog timer once */
1126 spin_lock_init(&vcpu->arch.wdt_lock);
1127 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
1128 (unsigned long)vcpu);
1129
1130 return 0;
1131}
1132
1133void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1134{
1135 del_timer_sync(&vcpu->arch.wdt_timer);
1136}
1137
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001138int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1139{
1140 int i;
1141
1142 regs->pc = vcpu->arch.pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001143 regs->cr = kvmppc_get_cr(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001144 regs->ctr = vcpu->arch.ctr;
1145 regs->lr = vcpu->arch.lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001146 regs->xer = kvmppc_get_xer(vcpu);
Alexander Graf666e7252010-07-29 14:47:43 +02001147 regs->msr = vcpu->arch.shared->msr;
Alexander Grafde7906c2010-07-29 14:47:46 +02001148 regs->srr0 = vcpu->arch.shared->srr0;
1149 regs->srr1 = vcpu->arch.shared->srr1;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001150 regs->pid = vcpu->arch.pid;
Alexander Grafa73a9592010-07-29 14:47:47 +02001151 regs->sprg0 = vcpu->arch.shared->sprg0;
1152 regs->sprg1 = vcpu->arch.shared->sprg1;
1153 regs->sprg2 = vcpu->arch.shared->sprg2;
1154 regs->sprg3 = vcpu->arch.shared->sprg3;
Scott Woodb5904972011-11-08 18:23:30 -06001155 regs->sprg4 = vcpu->arch.shared->sprg4;
1156 regs->sprg5 = vcpu->arch.shared->sprg5;
1157 regs->sprg6 = vcpu->arch.shared->sprg6;
1158 regs->sprg7 = vcpu->arch.shared->sprg7;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001159
1160 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001161 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001162
1163 return 0;
1164}
1165
1166int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1167{
1168 int i;
1169
1170 vcpu->arch.pc = regs->pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001171 kvmppc_set_cr(vcpu, regs->cr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001172 vcpu->arch.ctr = regs->ctr;
1173 vcpu->arch.lr = regs->lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001174 kvmppc_set_xer(vcpu, regs->xer);
Hollis Blanchardb8fd68a2008-11-05 09:36:20 -06001175 kvmppc_set_msr(vcpu, regs->msr);
Alexander Grafde7906c2010-07-29 14:47:46 +02001176 vcpu->arch.shared->srr0 = regs->srr0;
1177 vcpu->arch.shared->srr1 = regs->srr1;
Scott Wood5ce941e2011-04-27 17:24:21 -05001178 kvmppc_set_pid(vcpu, regs->pid);
Alexander Grafa73a9592010-07-29 14:47:47 +02001179 vcpu->arch.shared->sprg0 = regs->sprg0;
1180 vcpu->arch.shared->sprg1 = regs->sprg1;
1181 vcpu->arch.shared->sprg2 = regs->sprg2;
1182 vcpu->arch.shared->sprg3 = regs->sprg3;
Scott Woodb5904972011-11-08 18:23:30 -06001183 vcpu->arch.shared->sprg4 = regs->sprg4;
1184 vcpu->arch.shared->sprg5 = regs->sprg5;
1185 vcpu->arch.shared->sprg6 = regs->sprg6;
1186 vcpu->arch.shared->sprg7 = regs->sprg7;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001187
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001188 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1189 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001190
1191 return 0;
1192}
1193
Scott Wood5ce941e2011-04-27 17:24:21 -05001194static void get_sregs_base(struct kvm_vcpu *vcpu,
1195 struct kvm_sregs *sregs)
1196{
1197 u64 tb = get_tb();
1198
1199 sregs->u.e.features |= KVM_SREGS_E_BASE;
1200
1201 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1202 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1203 sregs->u.e.mcsr = vcpu->arch.mcsr;
Scott Woodd30f6e42011-12-20 15:34:43 +00001204 sregs->u.e.esr = get_guest_esr(vcpu);
1205 sregs->u.e.dear = get_guest_dear(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001206 sregs->u.e.tsr = vcpu->arch.tsr;
1207 sregs->u.e.tcr = vcpu->arch.tcr;
1208 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1209 sregs->u.e.tb = tb;
1210 sregs->u.e.vrsave = vcpu->arch.vrsave;
1211}
1212
1213static int set_sregs_base(struct kvm_vcpu *vcpu,
1214 struct kvm_sregs *sregs)
1215{
1216 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1217 return 0;
1218
1219 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1220 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1221 vcpu->arch.mcsr = sregs->u.e.mcsr;
Scott Woodd30f6e42011-12-20 15:34:43 +00001222 set_guest_esr(vcpu, sregs->u.e.esr);
1223 set_guest_dear(vcpu, sregs->u.e.dear);
Scott Wood5ce941e2011-04-27 17:24:21 -05001224 vcpu->arch.vrsave = sregs->u.e.vrsave;
Scott Wooddfd4d472011-11-17 12:39:59 +00001225 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001226
Scott Wooddfd4d472011-11-17 12:39:59 +00001227 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
Scott Wood5ce941e2011-04-27 17:24:21 -05001228 vcpu->arch.dec = sregs->u.e.dec;
Scott Wooddfd4d472011-11-17 12:39:59 +00001229 kvmppc_emulate_dec(vcpu);
1230 }
Scott Wood5ce941e2011-04-27 17:24:21 -05001231
1232 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001233 u32 old_tsr = vcpu->arch.tsr;
1234
Scott Wooddfd4d472011-11-17 12:39:59 +00001235 vcpu->arch.tsr = sregs->u.e.tsr;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001236
1237 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1238 arm_next_watchdog(vcpu);
1239
Scott Wooddfd4d472011-11-17 12:39:59 +00001240 update_timer_ints(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001241 }
1242
1243 return 0;
1244}
1245
1246static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1247 struct kvm_sregs *sregs)
1248{
1249 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1250
Scott Wood841741f2011-09-02 17:39:37 -05001251 sregs->u.e.pir = vcpu->vcpu_id;
Scott Wood5ce941e2011-04-27 17:24:21 -05001252 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1253 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1254 sregs->u.e.decar = vcpu->arch.decar;
1255 sregs->u.e.ivpr = vcpu->arch.ivpr;
1256}
1257
1258static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1259 struct kvm_sregs *sregs)
1260{
1261 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1262 return 0;
1263
Scott Wood841741f2011-09-02 17:39:37 -05001264 if (sregs->u.e.pir != vcpu->vcpu_id)
Scott Wood5ce941e2011-04-27 17:24:21 -05001265 return -EINVAL;
1266
1267 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1268 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1269 vcpu->arch.decar = sregs->u.e.decar;
1270 vcpu->arch.ivpr = sregs->u.e.ivpr;
1271
1272 return 0;
1273}
1274
1275void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1276{
1277 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1278
1279 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1280 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1281 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1282 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1283 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1284 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1285 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1286 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1287 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1288 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1289 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1290 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1291 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1292 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1293 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1294 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1295}
1296
1297int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1298{
1299 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1300 return 0;
1301
1302 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1303 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1304 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1305 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1306 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1307 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1308 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1309 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1310 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1311 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1312 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1313 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1314 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1315 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1316 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1317 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1318
1319 return 0;
1320}
1321
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001322int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1323 struct kvm_sregs *sregs)
1324{
Scott Wood5ce941e2011-04-27 17:24:21 -05001325 sregs->pvr = vcpu->arch.pvr;
1326
1327 get_sregs_base(vcpu, sregs);
1328 get_sregs_arch206(vcpu, sregs);
1329 kvmppc_core_get_sregs(vcpu, sregs);
1330 return 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001331}
1332
1333int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1334 struct kvm_sregs *sregs)
1335{
Scott Wood5ce941e2011-04-27 17:24:21 -05001336 int ret;
1337
1338 if (vcpu->arch.pvr != sregs->pvr)
1339 return -EINVAL;
1340
1341 ret = set_sregs_base(vcpu, sregs);
1342 if (ret < 0)
1343 return ret;
1344
1345 ret = set_sregs_arch206(vcpu, sregs);
1346 if (ret < 0)
1347 return ret;
1348
1349 return kvmppc_core_set_sregs(vcpu, sregs);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001350}
1351
Paul Mackerras31f34382011-12-12 12:26:50 +00001352int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1353{
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001354 int r = -EINVAL;
1355
1356 switch (reg->id) {
1357 case KVM_REG_PPC_IAC1:
1358 case KVM_REG_PPC_IAC2:
1359 case KVM_REG_PPC_IAC3:
1360 case KVM_REG_PPC_IAC4: {
1361 int iac = reg->id - KVM_REG_PPC_IAC1;
1362 r = copy_to_user((u64 __user *)(long)reg->addr,
1363 &vcpu->arch.dbg_reg.iac[iac], sizeof(u64));
1364 break;
1365 }
1366 case KVM_REG_PPC_DAC1:
1367 case KVM_REG_PPC_DAC2: {
1368 int dac = reg->id - KVM_REG_PPC_DAC1;
1369 r = copy_to_user((u64 __user *)(long)reg->addr,
1370 &vcpu->arch.dbg_reg.dac[dac], sizeof(u64));
1371 break;
1372 }
1373 default:
1374 break;
1375 }
1376 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001377}
1378
1379int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1380{
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001381 int r = -EINVAL;
1382
1383 switch (reg->id) {
1384 case KVM_REG_PPC_IAC1:
1385 case KVM_REG_PPC_IAC2:
1386 case KVM_REG_PPC_IAC3:
1387 case KVM_REG_PPC_IAC4: {
1388 int iac = reg->id - KVM_REG_PPC_IAC1;
1389 r = copy_from_user(&vcpu->arch.dbg_reg.iac[iac],
1390 (u64 __user *)(long)reg->addr, sizeof(u64));
1391 break;
1392 }
1393 case KVM_REG_PPC_DAC1:
1394 case KVM_REG_PPC_DAC2: {
1395 int dac = reg->id - KVM_REG_PPC_DAC1;
1396 r = copy_from_user(&vcpu->arch.dbg_reg.dac[dac],
1397 (u64 __user *)(long)reg->addr, sizeof(u64));
1398 break;
1399 }
1400 default:
1401 break;
1402 }
1403 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001404}
1405
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001406int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1407{
1408 return -ENOTSUPP;
1409}
1410
1411int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1412{
1413 return -ENOTSUPP;
1414}
1415
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001416int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1417 struct kvm_translation *tr)
1418{
Avi Kivity98001d82010-05-13 11:05:49 +03001419 int r;
1420
Avi Kivity98001d82010-05-13 11:05:49 +03001421 r = kvmppc_core_vcpu_translate(vcpu, tr);
Avi Kivity98001d82010-05-13 11:05:49 +03001422 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001423}
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001424
Alexander Graf4e755752009-10-30 05:47:01 +00001425int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1426{
1427 return -ENOTSUPP;
1428}
1429
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001430int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1431 struct kvm_userspace_memory_region *mem)
1432{
1433 return 0;
1434}
1435
1436void kvmppc_core_commit_memory_region(struct kvm *kvm,
1437 struct kvm_userspace_memory_region *mem)
1438{
1439}
1440
Scott Wooddfd4d472011-11-17 12:39:59 +00001441void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1442{
1443 vcpu->arch.tcr = new_tcr;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001444 arm_next_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +00001445 update_timer_ints(vcpu);
1446}
1447
1448void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1449{
1450 set_bits(tsr_bits, &vcpu->arch.tsr);
1451 smp_wmb();
1452 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1453 kvm_vcpu_kick(vcpu);
1454}
1455
1456void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1457{
1458 clear_bits(tsr_bits, &vcpu->arch.tsr);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001459
1460 /*
1461 * We may have stopped the watchdog due to
1462 * being stuck on final expiration.
1463 */
1464 if (tsr_bits & (TSR_ENW | TSR_WIS))
1465 arm_next_watchdog(vcpu);
1466
Scott Wooddfd4d472011-11-17 12:39:59 +00001467 update_timer_ints(vcpu);
1468}
1469
1470void kvmppc_decrementer_func(unsigned long data)
1471{
1472 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1473
Bharat Bhushan21bd0002012-05-20 23:21:23 +00001474 if (vcpu->arch.tcr & TCR_ARE) {
1475 vcpu->arch.dec = vcpu->arch.decar;
1476 kvmppc_emulate_dec(vcpu);
1477 }
1478
Scott Wooddfd4d472011-11-17 12:39:59 +00001479 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1480}
1481
Scott Wood94fa9d92011-12-20 15:34:22 +00001482void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1483{
Scott Woodd30f6e42011-12-20 15:34:43 +00001484 current->thread.kvm_vcpu = vcpu;
Scott Wood94fa9d92011-12-20 15:34:22 +00001485}
1486
1487void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1488{
Scott Woodd30f6e42011-12-20 15:34:43 +00001489 current->thread.kvm_vcpu = NULL;
Scott Wood94fa9d92011-12-20 15:34:22 +00001490}
1491
Stephen Rothwell2986b8c2009-06-02 11:46:14 +10001492int __init kvmppc_booke_init(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001493{
Scott Woodd30f6e42011-12-20 15:34:43 +00001494#ifndef CONFIG_KVM_BOOKE_HV
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001495 unsigned long ivor[16];
1496 unsigned long max_ivor = 0;
1497 int i;
1498
1499 /* We install our own exception handlers by hijacking IVPR. IVPR must
1500 * be 16-bit aligned, so we need a 64KB allocation. */
1501 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
1502 VCPU_SIZE_ORDER);
1503 if (!kvmppc_booke_handlers)
1504 return -ENOMEM;
1505
1506 /* XXX make sure our handlers are smaller than Linux's */
1507
1508 /* Copy our interrupt handlers to match host IVORs. That way we don't
1509 * have to swap the IVORs on every guest/host transition. */
1510 ivor[0] = mfspr(SPRN_IVOR0);
1511 ivor[1] = mfspr(SPRN_IVOR1);
1512 ivor[2] = mfspr(SPRN_IVOR2);
1513 ivor[3] = mfspr(SPRN_IVOR3);
1514 ivor[4] = mfspr(SPRN_IVOR4);
1515 ivor[5] = mfspr(SPRN_IVOR5);
1516 ivor[6] = mfspr(SPRN_IVOR6);
1517 ivor[7] = mfspr(SPRN_IVOR7);
1518 ivor[8] = mfspr(SPRN_IVOR8);
1519 ivor[9] = mfspr(SPRN_IVOR9);
1520 ivor[10] = mfspr(SPRN_IVOR10);
1521 ivor[11] = mfspr(SPRN_IVOR11);
1522 ivor[12] = mfspr(SPRN_IVOR12);
1523 ivor[13] = mfspr(SPRN_IVOR13);
1524 ivor[14] = mfspr(SPRN_IVOR14);
1525 ivor[15] = mfspr(SPRN_IVOR15);
1526
1527 for (i = 0; i < 16; i++) {
1528 if (ivor[i] > max_ivor)
1529 max_ivor = ivor[i];
1530
1531 memcpy((void *)kvmppc_booke_handlers + ivor[i],
1532 kvmppc_handlers_start + i * kvmppc_handler_len,
1533 kvmppc_handler_len);
1534 }
1535 flush_icache_range(kvmppc_booke_handlers,
1536 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
Scott Woodd30f6e42011-12-20 15:34:43 +00001537#endif /* !BOOKE_HV */
Hollis Blancharddb93f572008-11-05 09:36:18 -06001538 return 0;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001539}
1540
Hollis Blancharddb93f572008-11-05 09:36:18 -06001541void __exit kvmppc_booke_exit(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001542{
1543 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
1544 kvm_exit();
1545}