blob: 339b34a02fb8bde7b4a3dad72ceada710a675590 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010026#include <linux/timer.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010027#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <asm/lowcore.h>
29#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010030#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010031#include <asm/switch_to.h>
Michael Mueller78c4b592013-07-26 15:04:04 +020032#include <asm/facility.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020033#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010034#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035#include "gaccess.h"
36
Cornelia Huck5786fff2012-07-23 17:20:29 +020037#define CREATE_TRACE_POINTS
38#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020039#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020040
Heiko Carstensb0c632d2008-03-25 18:47:20 +010041#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020045 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010046 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010050 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020053 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010054 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010055 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
56 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010057 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020058 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010059 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
60 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
61 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
62 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
63 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
64 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
65 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020066 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010067 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
68 { "instruction_spx", VCPU_STAT(instruction_spx) },
69 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
70 { "instruction_stap", VCPU_STAT(instruction_stap) },
71 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010072 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010073 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
74 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020075 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010076 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
77 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020078 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010079 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010080 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020081 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010082 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
83 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
84 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
85 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
86 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
Christian Borntraeger388186b2011-10-30 15:17:03 +010087 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010088 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +020089 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010090 { NULL }
91};
92
Michael Mueller78c4b592013-07-26 15:04:04 +020093unsigned long *vfacilities;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +020094static struct gmap_notifier gmap_notifier;
Heiko Carstensb0c632d2008-03-25 18:47:20 +010095
Michael Mueller78c4b592013-07-26 15:04:04 +020096/* test availability of vfacility */
Heiko Carstens280ef0f2013-12-17 09:08:28 +010097int test_vfacility(unsigned long nr)
Michael Mueller78c4b592013-07-26 15:04:04 +020098{
99 return __test_facility(nr, (void *) vfacilities);
100}
101
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100102/* Section: not file related */
Alexander Graf10474ae2009-09-15 11:37:46 +0200103int kvm_arch_hardware_enable(void *garbage)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100104{
105 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200106 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100107}
108
109void kvm_arch_hardware_disable(void *garbage)
110{
111}
112
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200113static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
114
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100115int kvm_arch_hardware_setup(void)
116{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200117 gmap_notifier.notifier_call = kvm_gmap_notifier;
118 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100119 return 0;
120}
121
122void kvm_arch_hardware_unsetup(void)
123{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200124 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100125}
126
127void kvm_arch_check_processor_compat(void *rtn)
128{
129}
130
131int kvm_arch_init(void *opaque)
132{
133 return 0;
134}
135
136void kvm_arch_exit(void)
137{
138}
139
140/* Section: device related */
141long kvm_arch_dev_ioctl(struct file *filp,
142 unsigned int ioctl, unsigned long arg)
143{
144 if (ioctl == KVM_S390_ENABLE_SIE)
145 return s390_enable_sie();
146 return -EINVAL;
147}
148
149int kvm_dev_ioctl_check_extension(long ext)
150{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100151 int r;
152
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200153 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100154 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200155 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100156 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100157#ifdef CONFIG_KVM_S390_UCONTROL
158 case KVM_CAP_S390_UCONTROL:
159#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200160 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100161 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200162 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100163 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100164 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huckebc32262014-05-09 15:00:46 +0200165 case KVM_CAP_IRQFD:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100166 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200167 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200168 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200169 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200170 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200171 case KVM_CAP_MP_STATE:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100172 r = 1;
173 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200174 case KVM_CAP_NR_VCPUS:
175 case KVM_CAP_MAX_VCPUS:
176 r = KVM_MAX_VCPUS;
177 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100178 case KVM_CAP_NR_MEMSLOTS:
179 r = KVM_USER_MEM_SLOTS;
180 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200181 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100182 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200183 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200184 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100185 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200186 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100187 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100188}
189
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400190static void kvm_s390_sync_dirty_log(struct kvm *kvm,
191 struct kvm_memory_slot *memslot)
192{
193 gfn_t cur_gfn, last_gfn;
194 unsigned long address;
195 struct gmap *gmap = kvm->arch.gmap;
196
197 down_read(&gmap->mm->mmap_sem);
198 /* Loop over all guest pages */
199 last_gfn = memslot->base_gfn + memslot->npages;
200 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
201 address = gfn_to_hva_memslot(memslot, cur_gfn);
202
203 if (gmap_test_and_clear_dirty(address, gmap))
204 mark_page_dirty(kvm, cur_gfn);
205 }
206 up_read(&gmap->mm->mmap_sem);
207}
208
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100209/* Section: vm related */
210/*
211 * Get (and clear) the dirty memory log for a memory slot.
212 */
213int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
214 struct kvm_dirty_log *log)
215{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400216 int r;
217 unsigned long n;
218 struct kvm_memory_slot *memslot;
219 int is_dirty = 0;
220
221 mutex_lock(&kvm->slots_lock);
222
223 r = -EINVAL;
224 if (log->slot >= KVM_USER_MEM_SLOTS)
225 goto out;
226
227 memslot = id_to_memslot(kvm->memslots, log->slot);
228 r = -ENOENT;
229 if (!memslot->dirty_bitmap)
230 goto out;
231
232 kvm_s390_sync_dirty_log(kvm, memslot);
233 r = kvm_get_dirty_log(kvm, log, &is_dirty);
234 if (r)
235 goto out;
236
237 /* Clear the dirty log */
238 if (is_dirty) {
239 n = kvm_dirty_bitmap_bytes(memslot);
240 memset(memslot->dirty_bitmap, 0, n);
241 }
242 r = 0;
243out:
244 mutex_unlock(&kvm->slots_lock);
245 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100246}
247
Cornelia Huckd938dc52013-10-23 18:26:34 +0200248static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
249{
250 int r;
251
252 if (cap->flags)
253 return -EINVAL;
254
255 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200256 case KVM_CAP_S390_IRQCHIP:
257 kvm->arch.use_irqchip = 1;
258 r = 0;
259 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200260 default:
261 r = -EINVAL;
262 break;
263 }
264 return r;
265}
266
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200267static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
268{
269 int ret;
270 unsigned int idx;
271 switch (attr->attr) {
272 case KVM_S390_VM_MEM_ENABLE_CMMA:
273 ret = -EBUSY;
274 mutex_lock(&kvm->lock);
275 if (atomic_read(&kvm->online_vcpus) == 0) {
276 kvm->arch.use_cmma = 1;
277 ret = 0;
278 }
279 mutex_unlock(&kvm->lock);
280 break;
281 case KVM_S390_VM_MEM_CLR_CMMA:
282 mutex_lock(&kvm->lock);
283 idx = srcu_read_lock(&kvm->srcu);
284 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
285 srcu_read_unlock(&kvm->srcu, idx);
286 mutex_unlock(&kvm->lock);
287 ret = 0;
288 break;
289 default:
290 ret = -ENXIO;
291 break;
292 }
293 return ret;
294}
295
Dominik Dingelf2061652014-04-09 13:13:00 +0200296static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
297{
298 int ret;
299
300 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200301 case KVM_S390_VM_MEM_CTRL:
302 ret = kvm_s390_mem_control(kvm, attr);
303 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200304 default:
305 ret = -ENXIO;
306 break;
307 }
308
309 return ret;
310}
311
312static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
313{
314 return -ENXIO;
315}
316
317static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
318{
319 int ret;
320
321 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200322 case KVM_S390_VM_MEM_CTRL:
323 switch (attr->attr) {
324 case KVM_S390_VM_MEM_ENABLE_CMMA:
325 case KVM_S390_VM_MEM_CLR_CMMA:
326 ret = 0;
327 break;
328 default:
329 ret = -ENXIO;
330 break;
331 }
332 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200333 default:
334 ret = -ENXIO;
335 break;
336 }
337
338 return ret;
339}
340
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100341long kvm_arch_vm_ioctl(struct file *filp,
342 unsigned int ioctl, unsigned long arg)
343{
344 struct kvm *kvm = filp->private_data;
345 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200346 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100347 int r;
348
349 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100350 case KVM_S390_INTERRUPT: {
351 struct kvm_s390_interrupt s390int;
352
353 r = -EFAULT;
354 if (copy_from_user(&s390int, argp, sizeof(s390int)))
355 break;
356 r = kvm_s390_inject_vm(kvm, &s390int);
357 break;
358 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200359 case KVM_ENABLE_CAP: {
360 struct kvm_enable_cap cap;
361 r = -EFAULT;
362 if (copy_from_user(&cap, argp, sizeof(cap)))
363 break;
364 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
365 break;
366 }
Cornelia Huck84223592013-07-15 13:36:01 +0200367 case KVM_CREATE_IRQCHIP: {
368 struct kvm_irq_routing_entry routing;
369
370 r = -EINVAL;
371 if (kvm->arch.use_irqchip) {
372 /* Set up dummy routing. */
373 memset(&routing, 0, sizeof(routing));
374 kvm_set_irq_routing(kvm, &routing, 0, 0);
375 r = 0;
376 }
377 break;
378 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200379 case KVM_SET_DEVICE_ATTR: {
380 r = -EFAULT;
381 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
382 break;
383 r = kvm_s390_vm_set_attr(kvm, &attr);
384 break;
385 }
386 case KVM_GET_DEVICE_ATTR: {
387 r = -EFAULT;
388 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
389 break;
390 r = kvm_s390_vm_get_attr(kvm, &attr);
391 break;
392 }
393 case KVM_HAS_DEVICE_ATTR: {
394 r = -EFAULT;
395 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
396 break;
397 r = kvm_s390_vm_has_attr(kvm, &attr);
398 break;
399 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100400 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300401 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100402 }
403
404 return r;
405}
406
Carsten Ottee08b9632012-01-04 10:25:20 +0100407int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100408{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100409 int rc;
410 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100411 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100412
Carsten Ottee08b9632012-01-04 10:25:20 +0100413 rc = -EINVAL;
414#ifdef CONFIG_KVM_S390_UCONTROL
415 if (type & ~KVM_VM_S390_UCONTROL)
416 goto out_err;
417 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
418 goto out_err;
419#else
420 if (type)
421 goto out_err;
422#endif
423
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100424 rc = s390_enable_sie();
425 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100426 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100427
Carsten Otteb2904112011-10-18 12:27:13 +0200428 rc = -ENOMEM;
429
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100430 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
431 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100432 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100433 spin_lock(&kvm_lock);
434 sca_offset = (sca_offset + 16) & 0x7f0;
435 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
436 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100437
438 sprintf(debug_name, "kvm-%u", current->pid);
439
440 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
441 if (!kvm->arch.dbf)
442 goto out_nodbf;
443
Carsten Otteba5c1e92008-03-25 18:47:26 +0100444 spin_lock_init(&kvm->arch.float_int.lock);
445 INIT_LIST_HEAD(&kvm->arch.float_int.list);
Heiko Carstens8a2422342014-01-10 14:33:28 +0100446 init_waitqueue_head(&kvm->arch.ipte_wq);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100447
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100448 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
449 VM_EVENT(kvm, 3, "%s", "vm created");
450
Carsten Ottee08b9632012-01-04 10:25:20 +0100451 if (type & KVM_VM_S390_UCONTROL) {
452 kvm->arch.gmap = NULL;
453 } else {
454 kvm->arch.gmap = gmap_alloc(current->mm);
455 if (!kvm->arch.gmap)
456 goto out_nogmap;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200457 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200458 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +0100459 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100460
461 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +0200462 kvm->arch.use_irqchip = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100463
David Hildenbrand8ad35752014-03-14 11:00:21 +0100464 spin_lock_init(&kvm->arch.start_stop_lock);
465
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100466 return 0;
Carsten Otte598841c2011-07-24 10:48:21 +0200467out_nogmap:
468 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100469out_nodbf:
470 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100471out_err:
472 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100473}
474
Christian Borntraegerd329c032008-11-26 14:50:27 +0100475void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
476{
477 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +0200478 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100479 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +0200480 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +0100481 if (!kvm_is_ucontrol(vcpu->kvm)) {
482 clear_bit(63 - vcpu->vcpu_id,
483 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
484 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
485 (__u64) vcpu->arch.sie_block)
486 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
487 }
Carsten Otteabf4a712009-05-12 17:21:51 +0200488 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +0100489
490 if (kvm_is_ucontrol(vcpu->kvm))
491 gmap_free(vcpu->arch.gmap);
492
Dominik Dingelb31605c2014-03-25 13:47:11 +0100493 if (kvm_s390_cmma_enabled(vcpu->kvm))
494 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100495 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200496
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100497 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +0200498 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100499}
500
501static void kvm_free_vcpus(struct kvm *kvm)
502{
503 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300504 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +0100505
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300506 kvm_for_each_vcpu(i, vcpu, kvm)
507 kvm_arch_vcpu_destroy(vcpu);
508
509 mutex_lock(&kvm->lock);
510 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
511 kvm->vcpus[i] = NULL;
512
513 atomic_set(&kvm->online_vcpus, 0);
514 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100515}
516
Sheng Yangad8ba2c2009-01-06 10:03:02 +0800517void kvm_arch_sync_events(struct kvm *kvm)
518{
519}
520
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100521void kvm_arch_destroy_vm(struct kvm *kvm)
522{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100523 kvm_free_vcpus(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100524 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100525 debug_unregister(kvm->arch.dbf);
Carsten Otte27e03932012-01-04 10:25:21 +0100526 if (!kvm_is_ucontrol(kvm))
527 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +0200528 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100529 kvm_s390_clear_float_irqs(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100530}
531
532/* Section: vcpu related */
533int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
534{
Dominik Dingel3c038e62013-10-07 17:11:48 +0200535 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
536 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +0100537 if (kvm_is_ucontrol(vcpu->kvm)) {
538 vcpu->arch.gmap = gmap_alloc(current->mm);
539 if (!vcpu->arch.gmap)
540 return -ENOMEM;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200541 vcpu->arch.gmap->private = vcpu->kvm;
Carsten Otte27e03932012-01-04 10:25:21 +0100542 return 0;
543 }
544
Carsten Otte598841c2011-07-24 10:48:21 +0200545 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Christian Borntraeger59674c12012-01-11 11:20:33 +0100546 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
547 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100548 KVM_SYNC_ACRS |
549 KVM_SYNC_CRS;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100550 return 0;
551}
552
553void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
554{
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100555 /* Nothing todo */
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100556}
557
558void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
559{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200560 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
561 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100562 save_access_regs(vcpu->arch.host_acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200563 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
564 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100565 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200566 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100567 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100568}
569
570void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
571{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100572 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200573 gmap_disable(vcpu->arch.gmap);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200574 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
575 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100576 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200577 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
578 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100579 restore_access_regs(vcpu->arch.host_acrs);
580}
581
582static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
583{
584 /* this equals initial cpu reset in pop, but we don't switch to ESA */
585 vcpu->arch.sie_block->gpsw.mask = 0UL;
586 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +0100587 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100588 vcpu->arch.sie_block->cputm = 0UL;
589 vcpu->arch.sie_block->ckc = 0UL;
590 vcpu->arch.sie_block->todpr = 0;
591 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
592 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
593 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
594 vcpu->arch.guest_fpregs.fpc = 0;
595 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
596 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100597 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200598 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
599 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200600 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
601 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +0100602 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100603}
604
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200605int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
606{
607 return 0;
608}
609
Dominik Dingelb31605c2014-03-25 13:47:11 +0100610void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
611{
612 free_page(vcpu->arch.sie_block->cbrlo);
613 vcpu->arch.sie_block->cbrlo = 0;
614}
615
616int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
617{
618 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
619 if (!vcpu->arch.sie_block->cbrlo)
620 return -ENOMEM;
621
622 vcpu->arch.sie_block->ecb2 |= 0x80;
623 vcpu->arch.sie_block->ecb2 &= ~0x08;
624 return 0;
625}
626
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100627int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
628{
Dominik Dingelb31605c2014-03-25 13:47:11 +0100629 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200630
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100631 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
632 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200633 CPUSTAT_STOPPED |
634 CPUSTAT_GED);
Christian Borntraegerfc345312010-06-17 23:16:20 +0200635 vcpu->arch.sie_block->ecb = 6;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200636 if (test_vfacility(50) && test_vfacility(73))
637 vcpu->arch.sie_block->ecb |= 0x10;
638
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200639 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrand49539192014-02-21 08:59:59 +0100640 vcpu->arch.sie_block->eca = 0xD1002000U;
Heiko Carstens217a4402013-12-30 12:54:14 +0100641 if (sclp_has_siif())
642 vcpu->arch.sie_block->eca |= 1;
Michael Mueller78c4b592013-07-26 15:04:04 +0200643 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
Matthew Rosato5a5e6532013-01-29 11:48:20 -0500644 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
645 ICTL_TPROT;
646
Dominik Dingelb31605c2014-03-25 13:47:11 +0100647 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
648 rc = kvm_s390_vcpu_setup_cmma(vcpu);
649 if (rc)
650 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200651 }
Christian Borntraegerca872302009-05-12 17:21:49 +0200652 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
Christian Borntraegerca872302009-05-12 17:21:49 +0200653 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Christian Borntraeger453423d2008-03-25 18:47:29 +0100654 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100655 vcpu->arch.cpu_id.version = 0xff;
Dominik Dingelb31605c2014-03-25 13:47:11 +0100656 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100657}
658
659struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
660 unsigned int id)
661{
Carsten Otte4d475552011-10-18 12:27:12 +0200662 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200663 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +0200664 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100665
Carsten Otte4d475552011-10-18 12:27:12 +0200666 if (id >= KVM_MAX_VCPUS)
667 goto out;
668
669 rc = -ENOMEM;
670
Michael Muellerb110fea2013-06-12 13:54:54 +0200671 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100672 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +0200673 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100674
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200675 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
676 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100677 goto out_free_cpu;
678
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200679 vcpu->arch.sie_block = &sie_page->sie_block;
680 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
681
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100682 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +0100683 if (!kvm_is_ucontrol(kvm)) {
684 if (!kvm->arch.sca) {
685 WARN_ON_ONCE(1);
686 goto out_free_cpu;
687 }
688 if (!kvm->arch.sca->cpu[id].sda)
689 kvm->arch.sca->cpu[id].sda =
690 (__u64) vcpu->arch.sie_block;
691 vcpu->arch.sie_block->scaoh =
692 (__u32)(((__u64)kvm->arch.sca) >> 32);
693 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
694 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
695 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100696
Carsten Otteba5c1e92008-03-25 18:47:26 +0100697 spin_lock_init(&vcpu->arch.local_int.lock);
698 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
699 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +0200700 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100701 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100702
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100703 rc = kvm_vcpu_init(vcpu, kvm, id);
704 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800705 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100706 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
707 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +0200708 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100709
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100710 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800711out_free_sie_block:
712 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100713out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +0200714 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +0200715out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100716 return ERR_PTR(rc);
717}
718
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100719int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
720{
Michael Muellerf87618e2014-02-26 16:14:17 +0100721 return kvm_cpu_has_interrupt(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100722}
723
Christian Borntraeger49b99e12013-05-17 14:41:35 +0200724void s390_vcpu_block(struct kvm_vcpu *vcpu)
725{
726 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
727}
728
729void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
730{
731 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
732}
733
734/*
735 * Kick a guest cpu out of SIE and wait until SIE is not running.
736 * If the CPU is not running (e.g. waiting as idle) the function will
737 * return immediately. */
738void exit_sie(struct kvm_vcpu *vcpu)
739{
740 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
741 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
742 cpu_relax();
743}
744
745/* Kick a guest cpu out of SIE and prevent SIE-reentry */
746void exit_sie_sync(struct kvm_vcpu *vcpu)
747{
748 s390_vcpu_block(vcpu);
749 exit_sie(vcpu);
750}
751
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200752static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
753{
754 int i;
755 struct kvm *kvm = gmap->private;
756 struct kvm_vcpu *vcpu;
757
758 kvm_for_each_vcpu(i, vcpu, kvm) {
759 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +0200760 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200761 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
762 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
763 exit_sie_sync(vcpu);
764 }
765 }
766}
767
Christoffer Dallb6d33832012-03-08 16:44:24 -0500768int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
769{
770 /* kvm common code refers to this, but never calls it */
771 BUG();
772 return 0;
773}
774
Carsten Otte14eebd92012-05-15 14:15:26 +0200775static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
776 struct kvm_one_reg *reg)
777{
778 int r = -EINVAL;
779
780 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200781 case KVM_REG_S390_TODPR:
782 r = put_user(vcpu->arch.sie_block->todpr,
783 (u32 __user *)reg->addr);
784 break;
785 case KVM_REG_S390_EPOCHDIFF:
786 r = put_user(vcpu->arch.sie_block->epoch,
787 (u64 __user *)reg->addr);
788 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200789 case KVM_REG_S390_CPU_TIMER:
790 r = put_user(vcpu->arch.sie_block->cputm,
791 (u64 __user *)reg->addr);
792 break;
793 case KVM_REG_S390_CLOCK_COMP:
794 r = put_user(vcpu->arch.sie_block->ckc,
795 (u64 __user *)reg->addr);
796 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200797 case KVM_REG_S390_PFTOKEN:
798 r = put_user(vcpu->arch.pfault_token,
799 (u64 __user *)reg->addr);
800 break;
801 case KVM_REG_S390_PFCOMPARE:
802 r = put_user(vcpu->arch.pfault_compare,
803 (u64 __user *)reg->addr);
804 break;
805 case KVM_REG_S390_PFSELECT:
806 r = put_user(vcpu->arch.pfault_select,
807 (u64 __user *)reg->addr);
808 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100809 case KVM_REG_S390_PP:
810 r = put_user(vcpu->arch.sie_block->pp,
811 (u64 __user *)reg->addr);
812 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100813 case KVM_REG_S390_GBEA:
814 r = put_user(vcpu->arch.sie_block->gbea,
815 (u64 __user *)reg->addr);
816 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200817 default:
818 break;
819 }
820
821 return r;
822}
823
824static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
825 struct kvm_one_reg *reg)
826{
827 int r = -EINVAL;
828
829 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200830 case KVM_REG_S390_TODPR:
831 r = get_user(vcpu->arch.sie_block->todpr,
832 (u32 __user *)reg->addr);
833 break;
834 case KVM_REG_S390_EPOCHDIFF:
835 r = get_user(vcpu->arch.sie_block->epoch,
836 (u64 __user *)reg->addr);
837 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200838 case KVM_REG_S390_CPU_TIMER:
839 r = get_user(vcpu->arch.sie_block->cputm,
840 (u64 __user *)reg->addr);
841 break;
842 case KVM_REG_S390_CLOCK_COMP:
843 r = get_user(vcpu->arch.sie_block->ckc,
844 (u64 __user *)reg->addr);
845 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200846 case KVM_REG_S390_PFTOKEN:
847 r = get_user(vcpu->arch.pfault_token,
848 (u64 __user *)reg->addr);
849 break;
850 case KVM_REG_S390_PFCOMPARE:
851 r = get_user(vcpu->arch.pfault_compare,
852 (u64 __user *)reg->addr);
853 break;
854 case KVM_REG_S390_PFSELECT:
855 r = get_user(vcpu->arch.pfault_select,
856 (u64 __user *)reg->addr);
857 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100858 case KVM_REG_S390_PP:
859 r = get_user(vcpu->arch.sie_block->pp,
860 (u64 __user *)reg->addr);
861 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100862 case KVM_REG_S390_GBEA:
863 r = get_user(vcpu->arch.sie_block->gbea,
864 (u64 __user *)reg->addr);
865 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200866 default:
867 break;
868 }
869
870 return r;
871}
Christoffer Dallb6d33832012-03-08 16:44:24 -0500872
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100873static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
874{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100875 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100876 return 0;
877}
878
879int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
880{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100881 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100882 return 0;
883}
884
885int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
886{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100887 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100888 return 0;
889}
890
891int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
892 struct kvm_sregs *sregs)
893{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100894 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100895 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +0100896 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100897 return 0;
898}
899
900int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
901 struct kvm_sregs *sregs)
902{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100903 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100904 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100905 return 0;
906}
907
908int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
909{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200910 if (test_fp_ctl(fpu->fpc))
911 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100912 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200913 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
914 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
915 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100916 return 0;
917}
918
919int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
920{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100921 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
922 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100923 return 0;
924}
925
926static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
927{
928 int rc = 0;
929
David Hildenbrand7a42fdc2014-05-05 16:26:19 +0200930 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100931 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100932 else {
933 vcpu->run->psw_mask = psw.mask;
934 vcpu->run->psw_addr = psw.addr;
935 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100936 return rc;
937}
938
939int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
940 struct kvm_translation *tr)
941{
942 return -EINVAL; /* not implemented yet */
943}
944
David Hildenbrand27291e22014-01-23 12:26:52 +0100945#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
946 KVM_GUESTDBG_USE_HW_BP | \
947 KVM_GUESTDBG_ENABLE)
948
Jan Kiszkad0bfb942008-12-15 13:52:10 +0100949int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
950 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100951{
David Hildenbrand27291e22014-01-23 12:26:52 +0100952 int rc = 0;
953
954 vcpu->guest_debug = 0;
955 kvm_s390_clear_bp_data(vcpu);
956
David Hildenbrand2de3bfc2014-05-20 17:25:20 +0200957 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +0100958 return -EINVAL;
959
960 if (dbg->control & KVM_GUESTDBG_ENABLE) {
961 vcpu->guest_debug = dbg->control;
962 /* enforce guest PER */
963 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
964
965 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
966 rc = kvm_s390_import_bp_data(vcpu, dbg);
967 } else {
968 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
969 vcpu->arch.guestdbg.last_bp = 0;
970 }
971
972 if (rc) {
973 vcpu->guest_debug = 0;
974 kvm_s390_clear_bp_data(vcpu);
975 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
976 }
977
978 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100979}
980
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300981int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
982 struct kvm_mp_state *mp_state)
983{
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200984 /* CHECK_STOP and LOAD are not supported yet */
985 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
986 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300987}
988
989int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
990 struct kvm_mp_state *mp_state)
991{
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200992 int rc = 0;
993
994 /* user space knows about this interface - let it control the state */
995 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
996
997 switch (mp_state->mp_state) {
998 case KVM_MP_STATE_STOPPED:
999 kvm_s390_vcpu_stop(vcpu);
1000 break;
1001 case KVM_MP_STATE_OPERATING:
1002 kvm_s390_vcpu_start(vcpu);
1003 break;
1004 case KVM_MP_STATE_LOAD:
1005 case KVM_MP_STATE_CHECK_STOP:
1006 /* fall through - CHECK_STOP and LOAD are not supported yet */
1007 default:
1008 rc = -ENXIO;
1009 }
1010
1011 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001012}
1013
Dominik Dingelb31605c2014-03-25 13:47:11 +01001014bool kvm_s390_cmma_enabled(struct kvm *kvm)
1015{
1016 if (!MACHINE_IS_LPAR)
1017 return false;
1018 /* only enable for z10 and later */
1019 if (!MACHINE_HAS_EDAT1)
1020 return false;
1021 if (!kvm->arch.use_cmma)
1022 return false;
1023 return true;
1024}
1025
David Hildenbrand8ad35752014-03-14 11:00:21 +01001026static bool ibs_enabled(struct kvm_vcpu *vcpu)
1027{
1028 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1029}
1030
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001031static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1032{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001033retry:
1034 s390_vcpu_unblock(vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001035 /*
1036 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1037 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1038 * This ensures that the ipte instruction for this request has
1039 * already finished. We might race against a second unmapper that
1040 * wants to set the blocking bit. Lets just retry the request loop.
1041 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001042 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001043 int rc;
1044 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001045 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001046 PAGE_SIZE * 2);
1047 if (rc)
1048 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001049 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001050 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001051
1052 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1053 if (!ibs_enabled(vcpu)) {
1054 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1055 atomic_set_mask(CPUSTAT_IBS,
1056 &vcpu->arch.sie_block->cpuflags);
1057 }
1058 goto retry;
1059 }
1060
1061 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1062 if (ibs_enabled(vcpu)) {
1063 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1064 atomic_clear_mask(CPUSTAT_IBS,
1065 &vcpu->arch.sie_block->cpuflags);
1066 }
1067 goto retry;
1068 }
1069
David Hildenbrand0759d062014-05-13 16:54:32 +02001070 /* nothing to do, just clear the request */
1071 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1072
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001073 return 0;
1074}
1075
Thomas Huthfa576c52014-05-06 17:20:16 +02001076/**
1077 * kvm_arch_fault_in_page - fault-in guest page if necessary
1078 * @vcpu: The corresponding virtual cpu
1079 * @gpa: Guest physical address
1080 * @writable: Whether the page should be writable or not
1081 *
1082 * Make sure that a guest page has been faulted-in on the host.
1083 *
1084 * Return: Zero on success, negative error code otherwise.
1085 */
1086long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001087{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001088 struct mm_struct *mm = current->mm;
Thomas Huthfa576c52014-05-06 17:20:16 +02001089 hva_t hva;
1090 long rc;
1091
1092 hva = gmap_fault(gpa, vcpu->arch.gmap);
1093 if (IS_ERR_VALUE(hva))
1094 return (long)hva;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001095 down_read(&mm->mmap_sem);
Thomas Huthfa576c52014-05-06 17:20:16 +02001096 rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001097 up_read(&mm->mmap_sem);
Thomas Huthfa576c52014-05-06 17:20:16 +02001098
1099 return rc < 0 ? rc : 0;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001100}
1101
Dominik Dingel3c038e62013-10-07 17:11:48 +02001102static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1103 unsigned long token)
1104{
1105 struct kvm_s390_interrupt inti;
1106 inti.parm64 = token;
1107
1108 if (start_token) {
1109 inti.type = KVM_S390_INT_PFAULT_INIT;
1110 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1111 } else {
1112 inti.type = KVM_S390_INT_PFAULT_DONE;
1113 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1114 }
1115}
1116
1117void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1118 struct kvm_async_pf *work)
1119{
1120 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1121 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1122}
1123
1124void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1125 struct kvm_async_pf *work)
1126{
1127 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1128 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1129}
1130
1131void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1132 struct kvm_async_pf *work)
1133{
1134 /* s390 will always inject the page directly */
1135}
1136
1137bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1138{
1139 /*
1140 * s390 will always inject the page directly,
1141 * but we still want check_async_completion to cleanup
1142 */
1143 return true;
1144}
1145
1146static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1147{
1148 hva_t hva;
1149 struct kvm_arch_async_pf arch;
1150 int rc;
1151
1152 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1153 return 0;
1154 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1155 vcpu->arch.pfault_compare)
1156 return 0;
1157 if (psw_extint_disabled(vcpu))
1158 return 0;
1159 if (kvm_cpu_has_interrupt(vcpu))
1160 return 0;
1161 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1162 return 0;
1163 if (!vcpu->arch.gmap->pfault_enabled)
1164 return 0;
1165
Heiko Carstens81480cc2014-01-01 16:36:07 +01001166 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1167 hva += current->thread.gmap_addr & ~PAGE_MASK;
1168 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001169 return 0;
1170
1171 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1172 return rc;
1173}
1174
Thomas Huth3fb4c402013-09-12 10:33:43 +02001175static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001176{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001177 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001178
Dominik Dingel3c038e62013-10-07 17:11:48 +02001179 /*
1180 * On s390 notifications for arriving pages will be delivered directly
1181 * to the guest but the house keeping for completed pfaults is
1182 * handled outside the worker.
1183 */
1184 kvm_check_async_pf_completion(vcpu);
1185
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001186 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001187
1188 if (need_resched())
1189 schedule();
1190
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02001191 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02001192 s390_handle_mcck();
1193
Carsten Otted6b6d162012-01-04 10:25:25 +01001194 if (!kvm_is_ucontrol(vcpu->kvm))
1195 kvm_s390_deliver_pending_interrupts(vcpu);
Carsten Otte0ff31862008-05-21 13:37:37 +02001196
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001197 rc = kvm_s390_handle_requests(vcpu);
1198 if (rc)
1199 return rc;
1200
David Hildenbrand27291e22014-01-23 12:26:52 +01001201 if (guestdbg_enabled(vcpu)) {
1202 kvm_s390_backup_guest_per_regs(vcpu);
1203 kvm_s390_patch_guest_per_regs(vcpu);
1204 }
1205
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001206 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001207 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1208 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1209 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001210
Thomas Huth3fb4c402013-09-12 10:33:43 +02001211 return 0;
1212}
1213
1214static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1215{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001216 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001217
1218 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1219 vcpu->arch.sie_block->icptcode);
1220 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1221
David Hildenbrand27291e22014-01-23 12:26:52 +01001222 if (guestdbg_enabled(vcpu))
1223 kvm_s390_restore_guest_per_regs(vcpu);
1224
Thomas Huth3fb4c402013-09-12 10:33:43 +02001225 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02001226 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02001227 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1228 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1229 vcpu->run->s390_ucontrol.trans_exc_code =
1230 current->thread.gmap_addr;
1231 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1232 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001233
1234 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001235 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001236 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001237 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001238 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001239 } else {
1240 gpa_t gpa = current->thread.gmap_addr;
1241 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1242 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001243 }
1244
1245 if (rc == -1) {
Christian Borntraeger699bde32014-01-20 12:34:13 +01001246 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1247 trace_kvm_s390_sie_fault(vcpu);
1248 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
Carsten Otte1f0d0f02008-05-21 13:37:40 +02001249 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001250
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001251 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001252
Thomas Hutha76ccff2013-09-12 10:33:44 +02001253 if (rc == 0) {
1254 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01001255 /* Don't exit for host interrupts. */
1256 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02001257 else
1258 rc = kvm_handle_sie_intercept(vcpu);
1259 }
1260
Thomas Huth3fb4c402013-09-12 10:33:43 +02001261 return rc;
1262}
1263
1264static int __vcpu_run(struct kvm_vcpu *vcpu)
1265{
1266 int rc, exit_reason;
1267
Thomas Huth800c1062013-09-12 10:33:45 +02001268 /*
1269 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1270 * ning the guest), so that memslots (and other stuff) are protected
1271 */
1272 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1273
Thomas Hutha76ccff2013-09-12 10:33:44 +02001274 do {
1275 rc = vcpu_pre_run(vcpu);
1276 if (rc)
1277 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001278
Thomas Huth800c1062013-09-12 10:33:45 +02001279 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02001280 /*
1281 * As PF_VCPU will be used in fault handler, between
1282 * guest_enter and guest_exit should be no uaccess.
1283 */
1284 preempt_disable();
1285 kvm_guest_enter();
1286 preempt_enable();
1287 exit_reason = sie64a(vcpu->arch.sie_block,
1288 vcpu->run->s.regs.gprs);
1289 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02001290 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001291
Thomas Hutha76ccff2013-09-12 10:33:44 +02001292 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01001293 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001294
Thomas Huth800c1062013-09-12 10:33:45 +02001295 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01001296 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001297}
1298
1299int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1300{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001301 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001302 sigset_t sigsaved;
1303
David Hildenbrand27291e22014-01-23 12:26:52 +01001304 if (guestdbg_exit_pending(vcpu)) {
1305 kvm_s390_prepare_debug_exit(vcpu);
1306 return 0;
1307 }
1308
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001309 if (vcpu->sigset_active)
1310 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1311
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001312 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1313 kvm_s390_vcpu_start(vcpu);
1314 } else if (is_vcpu_stopped(vcpu)) {
1315 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1316 vcpu->vcpu_id);
1317 return -EINVAL;
1318 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001319
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001320 switch (kvm_run->exit_reason) {
1321 case KVM_EXIT_S390_SIEIC:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001322 case KVM_EXIT_UNKNOWN:
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001323 case KVM_EXIT_INTR:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001324 case KVM_EXIT_S390_RESET:
Carsten Ottee168bf82012-01-04 10:25:22 +01001325 case KVM_EXIT_S390_UCONTROL:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001326 case KVM_EXIT_S390_TSCH:
David Hildenbrand27291e22014-01-23 12:26:52 +01001327 case KVM_EXIT_DEBUG:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001328 break;
1329 default:
1330 BUG();
1331 }
1332
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001333 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1334 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
Christian Borntraeger60b413c2012-01-11 11:20:31 +01001335 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
1336 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
1337 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1338 }
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001339 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1340 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
1341 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1342 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1343 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001344
Heiko Carstensdab4079d2009-06-12 10:26:32 +02001345 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02001346 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001347
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001348 if (signal_pending(current) && !rc) {
1349 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001350 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001351 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001352
David Hildenbrand27291e22014-01-23 12:26:52 +01001353 if (guestdbg_exit_pending(vcpu) && !rc) {
1354 kvm_s390_prepare_debug_exit(vcpu);
1355 rc = 0;
1356 }
1357
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001358 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001359 /* intercept cannot be handled in-kernel, prepare kvm-run */
1360 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1361 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001362 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1363 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1364 rc = 0;
1365 }
1366
1367 if (rc == -EREMOTE) {
1368 /* intercept was handled, but userspace support is needed
1369 * kvm_run has been prepared by the handler */
1370 rc = 0;
1371 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001372
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001373 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1374 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
Michael Muellerfda902c2014-05-13 16:58:30 +02001375 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001376 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001377
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001378 if (vcpu->sigset_active)
1379 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1380
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001381 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02001382 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001383}
1384
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001385/*
1386 * store status at address
1387 * we use have two special cases:
1388 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1389 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1390 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01001391int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001392{
Carsten Otte092670c2011-07-24 10:48:22 +02001393 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02001394 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01001395 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001396 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001397
Heiko Carstensd0bce602014-01-01 16:45:58 +01001398 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1399 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001400 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001401 gpa = SAVE_AREA_BASE;
1402 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1403 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001404 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001405 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1406 }
1407 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1408 vcpu->arch.guest_fpregs.fprs, 128);
1409 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1410 vcpu->run->s.regs.gprs, 128);
1411 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1412 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02001413 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001414 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02001415 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001416 rc |= write_guest_abs(vcpu,
1417 gpa + offsetof(struct save_area, fp_ctrl_reg),
1418 &vcpu->arch.guest_fpregs.fpc, 4);
1419 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1420 &vcpu->arch.sie_block->todpr, 4);
1421 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1422 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01001423 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001424 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1425 &clkcomp, 8);
1426 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1427 &vcpu->run->s.regs.acrs, 64);
1428 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1429 &vcpu->arch.sie_block->gcr, 128);
1430 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001431}
1432
Thomas Huthe8798922013-11-06 15:46:33 +01001433int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1434{
1435 /*
1436 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1437 * copying in vcpu load/put. Lets update our copies before we save
1438 * it into the save area
1439 */
1440 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1441 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1442 save_access_regs(vcpu->run->s.regs.acrs);
1443
1444 return kvm_s390_store_status_unloaded(vcpu, addr);
1445}
1446
David Hildenbrand8ad35752014-03-14 11:00:21 +01001447static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1448{
1449 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1450 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1451 exit_sie_sync(vcpu);
1452}
1453
1454static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1455{
1456 unsigned int i;
1457 struct kvm_vcpu *vcpu;
1458
1459 kvm_for_each_vcpu(i, vcpu, kvm) {
1460 __disable_ibs_on_vcpu(vcpu);
1461 }
1462}
1463
1464static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1465{
1466 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1467 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1468 exit_sie_sync(vcpu);
1469}
1470
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001471void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1472{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001473 int i, online_vcpus, started_vcpus = 0;
1474
1475 if (!is_vcpu_stopped(vcpu))
1476 return;
1477
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001478 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001479 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001480 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001481 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1482
1483 for (i = 0; i < online_vcpus; i++) {
1484 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1485 started_vcpus++;
1486 }
1487
1488 if (started_vcpus == 0) {
1489 /* we're the only active VCPU -> speed it up */
1490 __enable_ibs_on_vcpu(vcpu);
1491 } else if (started_vcpus == 1) {
1492 /*
1493 * As we are starting a second VCPU, we have to disable
1494 * the IBS facility on all VCPUs to remove potentially
1495 * oustanding ENABLE requests.
1496 */
1497 __disable_ibs_on_all_vcpus(vcpu->kvm);
1498 }
1499
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001500 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001501 /*
1502 * Another VCPU might have used IBS while we were offline.
1503 * Let's play safe and flush the VCPU at startup.
1504 */
1505 vcpu->arch.sie_block->ihcpu = 0xffff;
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001506 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001507 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001508}
1509
1510void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1511{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001512 int i, online_vcpus, started_vcpus = 0;
1513 struct kvm_vcpu *started_vcpu = NULL;
1514
1515 if (is_vcpu_stopped(vcpu))
1516 return;
1517
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001518 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001519 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001520 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001521 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1522
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001523 /* Need to lock access to action_bits to avoid a SIGP race condition */
David Hildenbrand4ae3c082014-05-16 10:23:53 +02001524 spin_lock(&vcpu->arch.local_int.lock);
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001525 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001526
1527 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1528 vcpu->arch.local_int.action_bits &=
1529 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
David Hildenbrand4ae3c082014-05-16 10:23:53 +02001530 spin_unlock(&vcpu->arch.local_int.lock);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001531
David Hildenbrand8ad35752014-03-14 11:00:21 +01001532 __disable_ibs_on_vcpu(vcpu);
1533
1534 for (i = 0; i < online_vcpus; i++) {
1535 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1536 started_vcpus++;
1537 started_vcpu = vcpu->kvm->vcpus[i];
1538 }
1539 }
1540
1541 if (started_vcpus == 1) {
1542 /*
1543 * As we only have one VCPU left, we want to enable the
1544 * IBS facility for that VCPU to speed it up.
1545 */
1546 __enable_ibs_on_vcpu(started_vcpu);
1547 }
1548
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001549 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001550 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001551}
1552
Cornelia Huckd6712df2012-12-20 15:32:11 +01001553static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1554 struct kvm_enable_cap *cap)
1555{
1556 int r;
1557
1558 if (cap->flags)
1559 return -EINVAL;
1560
1561 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001562 case KVM_CAP_S390_CSS_SUPPORT:
1563 if (!vcpu->kvm->arch.css_support) {
1564 vcpu->kvm->arch.css_support = 1;
1565 trace_kvm_s390_enable_css(vcpu->kvm);
1566 }
1567 r = 0;
1568 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01001569 default:
1570 r = -EINVAL;
1571 break;
1572 }
1573 return r;
1574}
1575
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001576long kvm_arch_vcpu_ioctl(struct file *filp,
1577 unsigned int ioctl, unsigned long arg)
1578{
1579 struct kvm_vcpu *vcpu = filp->private_data;
1580 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02001581 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03001582 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001583
Avi Kivity93736622010-05-13 12:35:17 +03001584 switch (ioctl) {
1585 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001586 struct kvm_s390_interrupt s390int;
1587
Avi Kivity93736622010-05-13 12:35:17 +03001588 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001589 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03001590 break;
1591 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1592 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001593 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001594 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02001595 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001596 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02001597 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001598 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001599 case KVM_S390_SET_INITIAL_PSW: {
1600 psw_t psw;
1601
Avi Kivitybc923cc2010-05-13 12:21:46 +03001602 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001603 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03001604 break;
1605 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1606 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001607 }
1608 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03001609 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1610 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001611 case KVM_SET_ONE_REG:
1612 case KVM_GET_ONE_REG: {
1613 struct kvm_one_reg reg;
1614 r = -EFAULT;
1615 if (copy_from_user(&reg, argp, sizeof(reg)))
1616 break;
1617 if (ioctl == KVM_SET_ONE_REG)
1618 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1619 else
1620 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1621 break;
1622 }
Carsten Otte27e03932012-01-04 10:25:21 +01001623#ifdef CONFIG_KVM_S390_UCONTROL
1624 case KVM_S390_UCAS_MAP: {
1625 struct kvm_s390_ucas_mapping ucasmap;
1626
1627 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1628 r = -EFAULT;
1629 break;
1630 }
1631
1632 if (!kvm_is_ucontrol(vcpu->kvm)) {
1633 r = -EINVAL;
1634 break;
1635 }
1636
1637 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1638 ucasmap.vcpu_addr, ucasmap.length);
1639 break;
1640 }
1641 case KVM_S390_UCAS_UNMAP: {
1642 struct kvm_s390_ucas_mapping ucasmap;
1643
1644 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1645 r = -EFAULT;
1646 break;
1647 }
1648
1649 if (!kvm_is_ucontrol(vcpu->kvm)) {
1650 r = -EINVAL;
1651 break;
1652 }
1653
1654 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1655 ucasmap.length);
1656 break;
1657 }
1658#endif
Carsten Otteccc79102012-01-04 10:25:26 +01001659 case KVM_S390_VCPU_FAULT: {
1660 r = gmap_fault(arg, vcpu->arch.gmap);
1661 if (!IS_ERR_VALUE(r))
1662 r = 0;
1663 break;
1664 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01001665 case KVM_ENABLE_CAP:
1666 {
1667 struct kvm_enable_cap cap;
1668 r = -EFAULT;
1669 if (copy_from_user(&cap, argp, sizeof(cap)))
1670 break;
1671 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1672 break;
1673 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001674 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01001675 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001676 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03001677 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001678}
1679
Carsten Otte5b1c1492012-01-04 10:25:23 +01001680int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1681{
1682#ifdef CONFIG_KVM_S390_UCONTROL
1683 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1684 && (kvm_is_ucontrol(vcpu->kvm))) {
1685 vmf->page = virt_to_page(vcpu->arch.sie_block);
1686 get_page(vmf->page);
1687 return 0;
1688 }
1689#endif
1690 return VM_FAULT_SIGBUS;
1691}
1692
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301693void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001694 struct kvm_memory_slot *dont)
1695{
1696}
1697
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301698int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1699 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001700{
1701 return 0;
1702}
1703
Takuya Yoshikawae59dbe02013-07-04 13:40:29 +09001704void kvm_arch_memslots_updated(struct kvm *kvm)
1705{
1706}
1707
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001708/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001709int kvm_arch_prepare_memory_region(struct kvm *kvm,
1710 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09001711 struct kvm_userspace_memory_region *mem,
1712 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001713{
Nick Wangdd2887e2013-03-25 17:22:57 +01001714 /* A few sanity checks. We can have memory slots which have to be
1715 located/ended at a segment boundary (1MB). The memory in userland is
1716 ok to be fragmented into various different vmas. It is okay to mmap()
1717 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001718
Carsten Otte598841c2011-07-24 10:48:21 +02001719 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001720 return -EINVAL;
1721
Carsten Otte598841c2011-07-24 10:48:21 +02001722 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001723 return -EINVAL;
1724
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001725 return 0;
1726}
1727
1728void kvm_arch_commit_memory_region(struct kvm *kvm,
1729 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001730 const struct kvm_memory_slot *old,
1731 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001732{
Carsten Ottef7850c92011-07-24 10:48:23 +02001733 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001734
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01001735 /* If the basics of the memslot do not change, we do not want
1736 * to update the gmap. Every update causes several unnecessary
1737 * segment translation exceptions. This is usually handled just
1738 * fine by the normal fault handler + gmap, but it will also
1739 * cause faults on the prefix page of running guest CPUs.
1740 */
1741 if (old->userspace_addr == mem->userspace_addr &&
1742 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1743 old->npages * PAGE_SIZE == mem->memory_size)
1744 return;
Carsten Otte598841c2011-07-24 10:48:21 +02001745
1746 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1747 mem->guest_phys_addr, mem->memory_size);
1748 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02001749 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02001750 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001751}
1752
Marcelo Tosatti2df72e92012-08-24 15:54:57 -03001753void kvm_arch_flush_shadow_all(struct kvm *kvm)
1754{
1755}
1756
1757void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1758 struct kvm_memory_slot *slot)
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03001759{
1760}
1761
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001762static int __init kvm_s390_init(void)
1763{
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001764 int ret;
Avi Kivity0ee75be2010-04-28 15:39:01 +03001765 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001766 if (ret)
1767 return ret;
1768
1769 /*
1770 * guests can ask for up to 255+1 double words, we need a full page
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001771 * to hold the maximum amount of facilities. On the other hand, we
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001772 * only set facilities that are known to work in KVM.
1773 */
Michael Mueller78c4b592013-07-26 15:04:04 +02001774 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1775 if (!vfacilities) {
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001776 kvm_exit();
1777 return -ENOMEM;
1778 }
Michael Mueller78c4b592013-07-26 15:04:04 +02001779 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
Thomas Huthd208c792013-12-12 13:40:40 +01001780 vfacilities[0] &= 0xff82fff3f4fc2000UL;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001781 vfacilities[1] &= 0x005c000000000000UL;
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001782 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001783}
1784
1785static void __exit kvm_s390_exit(void)
1786{
Michael Mueller78c4b592013-07-26 15:04:04 +02001787 free_page((unsigned long) vfacilities);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001788 kvm_exit();
1789}
1790
1791module_init(kvm_s390_init);
1792module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02001793
1794/*
1795 * Enable autoloading of the kvm module.
1796 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1797 * since x86 takes a different approach.
1798 */
1799#include <linux/miscdevice.h>
1800MODULE_ALIAS_MISCDEV(KVM_MINOR);
1801MODULE_ALIAS("devname:kvm");