blob: 9cb6cfaac986fa8d642c75d5f3fb89b325a98b85 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020025#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010027#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010028#include <linux/vmalloc.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010029#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010030#include <asm/lowcore.h>
31#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010032#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010033#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020034#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020035#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010036#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010037#include "gaccess.h"
38
David Hildenbrandea2cdd22015-05-20 13:24:02 +020039#define KMSG_COMPONENT "kvm-s390"
40#undef pr_fmt
41#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
42
Cornelia Huck5786fff2012-07-23 17:20:29 +020043#define CREATE_TRACE_POINTS
44#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020045#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020046
Thomas Huth41408c282015-02-06 15:01:21 +010047#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010048#define LOCAL_IRQS 32
49#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
50 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010051
Heiko Carstensb0c632d2008-03-25 18:47:20 +010052#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
53
54struct kvm_stats_debugfs_item debugfs_entries[] = {
55 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020056 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010057 { "exit_validity", VCPU_STAT(exit_validity) },
58 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
59 { "exit_external_request", VCPU_STAT(exit_external_request) },
60 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010061 { "exit_instruction", VCPU_STAT(exit_instruction) },
62 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
63 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010064 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020065 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020066 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010067 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010068 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
69 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020071 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
73 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
74 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
75 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
76 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
77 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
78 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020079 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010080 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
81 { "instruction_spx", VCPU_STAT(instruction_spx) },
82 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
83 { "instruction_stap", VCPU_STAT(instruction_stap) },
84 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010085 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010086 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
87 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020088 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010089 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
90 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020091 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010092 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010093 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020094 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010095 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020096 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
97 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010098 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020099 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
100 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500101 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100102 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
103 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
104 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200105 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
106 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
107 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100108 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100109 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200110 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100111 { NULL }
112};
113
Michael Mueller9d8d5782015-02-02 15:42:51 +0100114/* upper facilities limit for kvm */
115unsigned long kvm_s390_fac_list_mask[] = {
Christian Borntraegera3ed8da2015-03-18 13:54:31 +0100116 0xffe6fffbfcfdfc40UL,
Guenther Hutzl53df84f2015-02-18 11:13:03 +0100117 0x005e800000000000UL,
Michael Mueller9d8d5782015-02-02 15:42:51 +0100118};
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100119
Michael Mueller9d8d5782015-02-02 15:42:51 +0100120unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200121{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100122 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
123 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b592013-07-26 15:04:04 +0200124}
125
Michael Mueller9d8d5782015-02-02 15:42:51 +0100126static struct gmap_notifier gmap_notifier;
127
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100128/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200129int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100130{
131 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200132 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100133}
134
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200135static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
136
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100137int kvm_arch_hardware_setup(void)
138{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200139 gmap_notifier.notifier_call = kvm_gmap_notifier;
140 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100141 return 0;
142}
143
144void kvm_arch_hardware_unsetup(void)
145{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200146 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100147}
148
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100149int kvm_arch_init(void *opaque)
150{
Cornelia Huck84877d92014-09-02 10:27:35 +0100151 /* Register floating interrupt controller interface. */
152 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100153}
154
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100155/* Section: device related */
156long kvm_arch_dev_ioctl(struct file *filp,
157 unsigned int ioctl, unsigned long arg)
158{
159 if (ioctl == KVM_S390_ENABLE_SIE)
160 return s390_enable_sie();
161 return -EINVAL;
162}
163
Alexander Graf784aa3d2014-07-14 18:27:35 +0200164int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100165{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100166 int r;
167
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200168 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100169 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200170 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100171 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100172#ifdef CONFIG_KVM_S390_UCONTROL
173 case KVM_CAP_S390_UCONTROL:
174#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200175 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100176 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200177 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100178 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100179 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100180 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200181 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200182 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200183 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200184 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200185 case KVM_CAP_MP_STATE:
Jens Freimann47b43c52014-11-11 20:57:06 +0100186 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200187 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100188 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400189 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100190 case KVM_CAP_S390_IRQ_STATE:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100191 r = 1;
192 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100193 case KVM_CAP_S390_MEM_OP:
194 r = MEM_OP_MAX_SIZE;
195 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200196 case KVM_CAP_NR_VCPUS:
197 case KVM_CAP_MAX_VCPUS:
198 r = KVM_MAX_VCPUS;
199 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100200 case KVM_CAP_NR_MEMSLOTS:
201 r = KVM_USER_MEM_SLOTS;
202 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200203 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100204 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200205 break;
Eric Farman68c55752014-06-09 10:57:26 -0400206 case KVM_CAP_S390_VECTOR_REGISTERS:
207 r = MACHINE_HAS_VX;
208 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200209 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100210 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200211 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100212 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100213}
214
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400215static void kvm_s390_sync_dirty_log(struct kvm *kvm,
216 struct kvm_memory_slot *memslot)
217{
218 gfn_t cur_gfn, last_gfn;
219 unsigned long address;
220 struct gmap *gmap = kvm->arch.gmap;
221
222 down_read(&gmap->mm->mmap_sem);
223 /* Loop over all guest pages */
224 last_gfn = memslot->base_gfn + memslot->npages;
225 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
226 address = gfn_to_hva_memslot(memslot, cur_gfn);
227
228 if (gmap_test_and_clear_dirty(address, gmap))
229 mark_page_dirty(kvm, cur_gfn);
230 }
231 up_read(&gmap->mm->mmap_sem);
232}
233
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100234/* Section: vm related */
235/*
236 * Get (and clear) the dirty memory log for a memory slot.
237 */
238int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
239 struct kvm_dirty_log *log)
240{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400241 int r;
242 unsigned long n;
243 struct kvm_memory_slot *memslot;
244 int is_dirty = 0;
245
246 mutex_lock(&kvm->slots_lock);
247
248 r = -EINVAL;
249 if (log->slot >= KVM_USER_MEM_SLOTS)
250 goto out;
251
252 memslot = id_to_memslot(kvm->memslots, log->slot);
253 r = -ENOENT;
254 if (!memslot->dirty_bitmap)
255 goto out;
256
257 kvm_s390_sync_dirty_log(kvm, memslot);
258 r = kvm_get_dirty_log(kvm, log, &is_dirty);
259 if (r)
260 goto out;
261
262 /* Clear the dirty log */
263 if (is_dirty) {
264 n = kvm_dirty_bitmap_bytes(memslot);
265 memset(memslot->dirty_bitmap, 0, n);
266 }
267 r = 0;
268out:
269 mutex_unlock(&kvm->slots_lock);
270 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100271}
272
Cornelia Huckd938dc52013-10-23 18:26:34 +0200273static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
274{
275 int r;
276
277 if (cap->flags)
278 return -EINVAL;
279
280 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200281 case KVM_CAP_S390_IRQCHIP:
282 kvm->arch.use_irqchip = 1;
283 r = 0;
284 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200285 case KVM_CAP_S390_USER_SIGP:
286 kvm->arch.user_sigp = 1;
287 r = 0;
288 break;
Eric Farman68c55752014-06-09 10:57:26 -0400289 case KVM_CAP_S390_VECTOR_REGISTERS:
Michael Mueller18280d82015-03-16 16:05:41 +0100290 if (MACHINE_HAS_VX) {
291 set_kvm_facility(kvm->arch.model.fac->mask, 129);
292 set_kvm_facility(kvm->arch.model.fac->list, 129);
293 r = 0;
294 } else
295 r = -EINVAL;
Eric Farman68c55752014-06-09 10:57:26 -0400296 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100297 case KVM_CAP_S390_USER_STSI:
298 kvm->arch.user_stsi = 1;
299 r = 0;
300 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200301 default:
302 r = -EINVAL;
303 break;
304 }
305 return r;
306}
307
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100308static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
309{
310 int ret;
311
312 switch (attr->attr) {
313 case KVM_S390_VM_MEM_LIMIT_SIZE:
314 ret = 0;
315 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
316 ret = -EFAULT;
317 break;
318 default:
319 ret = -ENXIO;
320 break;
321 }
322 return ret;
323}
324
325static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200326{
327 int ret;
328 unsigned int idx;
329 switch (attr->attr) {
330 case KVM_S390_VM_MEM_ENABLE_CMMA:
331 ret = -EBUSY;
332 mutex_lock(&kvm->lock);
333 if (atomic_read(&kvm->online_vcpus) == 0) {
334 kvm->arch.use_cmma = 1;
335 ret = 0;
336 }
337 mutex_unlock(&kvm->lock);
338 break;
339 case KVM_S390_VM_MEM_CLR_CMMA:
340 mutex_lock(&kvm->lock);
341 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200342 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200343 srcu_read_unlock(&kvm->srcu, idx);
344 mutex_unlock(&kvm->lock);
345 ret = 0;
346 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100347 case KVM_S390_VM_MEM_LIMIT_SIZE: {
348 unsigned long new_limit;
349
350 if (kvm_is_ucontrol(kvm))
351 return -EINVAL;
352
353 if (get_user(new_limit, (u64 __user *)attr->addr))
354 return -EFAULT;
355
356 if (new_limit > kvm->arch.gmap->asce_end)
357 return -E2BIG;
358
359 ret = -EBUSY;
360 mutex_lock(&kvm->lock);
361 if (atomic_read(&kvm->online_vcpus) == 0) {
362 /* gmap_alloc will round the limit up */
363 struct gmap *new = gmap_alloc(current->mm, new_limit);
364
365 if (!new) {
366 ret = -ENOMEM;
367 } else {
368 gmap_free(kvm->arch.gmap);
369 new->private = kvm;
370 kvm->arch.gmap = new;
371 ret = 0;
372 }
373 }
374 mutex_unlock(&kvm->lock);
375 break;
376 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200377 default:
378 ret = -ENXIO;
379 break;
380 }
381 return ret;
382}
383
Tony Krowiaka374e892014-09-03 10:13:53 +0200384static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
385
386static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
387{
388 struct kvm_vcpu *vcpu;
389 int i;
390
Michael Mueller9d8d5782015-02-02 15:42:51 +0100391 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200392 return -EINVAL;
393
394 mutex_lock(&kvm->lock);
395 switch (attr->attr) {
396 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
397 get_random_bytes(
398 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
399 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
400 kvm->arch.crypto.aes_kw = 1;
401 break;
402 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
403 get_random_bytes(
404 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
405 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
406 kvm->arch.crypto.dea_kw = 1;
407 break;
408 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
409 kvm->arch.crypto.aes_kw = 0;
410 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
411 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
412 break;
413 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
414 kvm->arch.crypto.dea_kw = 0;
415 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
416 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
417 break;
418 default:
419 mutex_unlock(&kvm->lock);
420 return -ENXIO;
421 }
422
423 kvm_for_each_vcpu(i, vcpu, kvm) {
424 kvm_s390_vcpu_crypto_setup(vcpu);
425 exit_sie(vcpu);
426 }
427 mutex_unlock(&kvm->lock);
428 return 0;
429}
430
Jason J. Herne72f25022014-11-25 09:46:02 -0500431static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
432{
433 u8 gtod_high;
434
435 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
436 sizeof(gtod_high)))
437 return -EFAULT;
438
439 if (gtod_high != 0)
440 return -EINVAL;
441
442 return 0;
443}
444
445static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
446{
447 struct kvm_vcpu *cur_vcpu;
448 unsigned int vcpu_idx;
449 u64 host_tod, gtod;
450 int r;
451
452 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
453 return -EFAULT;
454
455 r = store_tod_clock(&host_tod);
456 if (r)
457 return r;
458
459 mutex_lock(&kvm->lock);
460 kvm->arch.epoch = gtod - host_tod;
Christian Borntraeger27406cd2015-04-14 12:17:34 +0200461 kvm_s390_vcpu_block_all(kvm);
462 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
Jason J. Herne72f25022014-11-25 09:46:02 -0500463 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
Christian Borntraeger27406cd2015-04-14 12:17:34 +0200464 kvm_s390_vcpu_unblock_all(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -0500465 mutex_unlock(&kvm->lock);
466 return 0;
467}
468
469static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
470{
471 int ret;
472
473 if (attr->flags)
474 return -EINVAL;
475
476 switch (attr->attr) {
477 case KVM_S390_VM_TOD_HIGH:
478 ret = kvm_s390_set_tod_high(kvm, attr);
479 break;
480 case KVM_S390_VM_TOD_LOW:
481 ret = kvm_s390_set_tod_low(kvm, attr);
482 break;
483 default:
484 ret = -ENXIO;
485 break;
486 }
487 return ret;
488}
489
490static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
491{
492 u8 gtod_high = 0;
493
494 if (copy_to_user((void __user *)attr->addr, &gtod_high,
495 sizeof(gtod_high)))
496 return -EFAULT;
497
498 return 0;
499}
500
501static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
502{
503 u64 host_tod, gtod;
504 int r;
505
506 r = store_tod_clock(&host_tod);
507 if (r)
508 return r;
509
510 gtod = host_tod + kvm->arch.epoch;
511 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
512 return -EFAULT;
513
514 return 0;
515}
516
517static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
518{
519 int ret;
520
521 if (attr->flags)
522 return -EINVAL;
523
524 switch (attr->attr) {
525 case KVM_S390_VM_TOD_HIGH:
526 ret = kvm_s390_get_tod_high(kvm, attr);
527 break;
528 case KVM_S390_VM_TOD_LOW:
529 ret = kvm_s390_get_tod_low(kvm, attr);
530 break;
531 default:
532 ret = -ENXIO;
533 break;
534 }
535 return ret;
536}
537
Michael Mueller658b6ed2015-02-02 15:49:35 +0100538static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
539{
540 struct kvm_s390_vm_cpu_processor *proc;
541 int ret = 0;
542
543 mutex_lock(&kvm->lock);
544 if (atomic_read(&kvm->online_vcpus)) {
545 ret = -EBUSY;
546 goto out;
547 }
548 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
549 if (!proc) {
550 ret = -ENOMEM;
551 goto out;
552 }
553 if (!copy_from_user(proc, (void __user *)attr->addr,
554 sizeof(*proc))) {
555 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
556 sizeof(struct cpuid));
557 kvm->arch.model.ibc = proc->ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100558 memcpy(kvm->arch.model.fac->list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +0100559 S390_ARCH_FAC_LIST_SIZE_BYTE);
560 } else
561 ret = -EFAULT;
562 kfree(proc);
563out:
564 mutex_unlock(&kvm->lock);
565 return ret;
566}
567
568static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
569{
570 int ret = -ENXIO;
571
572 switch (attr->attr) {
573 case KVM_S390_VM_CPU_PROCESSOR:
574 ret = kvm_s390_set_processor(kvm, attr);
575 break;
576 }
577 return ret;
578}
579
580static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
581{
582 struct kvm_s390_vm_cpu_processor *proc;
583 int ret = 0;
584
585 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
586 if (!proc) {
587 ret = -ENOMEM;
588 goto out;
589 }
590 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
591 proc->ibc = kvm->arch.model.ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100592 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100593 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
594 ret = -EFAULT;
595 kfree(proc);
596out:
597 return ret;
598}
599
600static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
601{
602 struct kvm_s390_vm_cpu_machine *mach;
603 int ret = 0;
604
605 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
606 if (!mach) {
607 ret = -ENOMEM;
608 goto out;
609 }
610 get_cpu_id((struct cpuid *) &mach->cpuid);
611 mach->ibc = sclp_get_ibc();
Michael Mueller981467c2015-02-24 13:51:04 +0100612 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
613 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100614 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +0100615 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100616 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
617 ret = -EFAULT;
618 kfree(mach);
619out:
620 return ret;
621}
622
623static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
624{
625 int ret = -ENXIO;
626
627 switch (attr->attr) {
628 case KVM_S390_VM_CPU_PROCESSOR:
629 ret = kvm_s390_get_processor(kvm, attr);
630 break;
631 case KVM_S390_VM_CPU_MACHINE:
632 ret = kvm_s390_get_machine(kvm, attr);
633 break;
634 }
635 return ret;
636}
637
Dominik Dingelf2061652014-04-09 13:13:00 +0200638static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
639{
640 int ret;
641
642 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200643 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100644 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200645 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500646 case KVM_S390_VM_TOD:
647 ret = kvm_s390_set_tod(kvm, attr);
648 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100649 case KVM_S390_VM_CPU_MODEL:
650 ret = kvm_s390_set_cpu_model(kvm, attr);
651 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200652 case KVM_S390_VM_CRYPTO:
653 ret = kvm_s390_vm_set_crypto(kvm, attr);
654 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200655 default:
656 ret = -ENXIO;
657 break;
658 }
659
660 return ret;
661}
662
663static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
664{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100665 int ret;
666
667 switch (attr->group) {
668 case KVM_S390_VM_MEM_CTRL:
669 ret = kvm_s390_get_mem_control(kvm, attr);
670 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500671 case KVM_S390_VM_TOD:
672 ret = kvm_s390_get_tod(kvm, attr);
673 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100674 case KVM_S390_VM_CPU_MODEL:
675 ret = kvm_s390_get_cpu_model(kvm, attr);
676 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100677 default:
678 ret = -ENXIO;
679 break;
680 }
681
682 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200683}
684
685static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
686{
687 int ret;
688
689 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200690 case KVM_S390_VM_MEM_CTRL:
691 switch (attr->attr) {
692 case KVM_S390_VM_MEM_ENABLE_CMMA:
693 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100694 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200695 ret = 0;
696 break;
697 default:
698 ret = -ENXIO;
699 break;
700 }
701 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500702 case KVM_S390_VM_TOD:
703 switch (attr->attr) {
704 case KVM_S390_VM_TOD_LOW:
705 case KVM_S390_VM_TOD_HIGH:
706 ret = 0;
707 break;
708 default:
709 ret = -ENXIO;
710 break;
711 }
712 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100713 case KVM_S390_VM_CPU_MODEL:
714 switch (attr->attr) {
715 case KVM_S390_VM_CPU_PROCESSOR:
716 case KVM_S390_VM_CPU_MACHINE:
717 ret = 0;
718 break;
719 default:
720 ret = -ENXIO;
721 break;
722 }
723 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200724 case KVM_S390_VM_CRYPTO:
725 switch (attr->attr) {
726 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
727 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
728 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
729 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
730 ret = 0;
731 break;
732 default:
733 ret = -ENXIO;
734 break;
735 }
736 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200737 default:
738 ret = -ENXIO;
739 break;
740 }
741
742 return ret;
743}
744
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400745static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
746{
747 uint8_t *keys;
748 uint64_t hva;
749 unsigned long curkey;
750 int i, r = 0;
751
752 if (args->flags != 0)
753 return -EINVAL;
754
755 /* Is this guest using storage keys? */
756 if (!mm_use_skey(current->mm))
757 return KVM_S390_GET_SKEYS_NONE;
758
759 /* Enforce sane limit on memory allocation */
760 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
761 return -EINVAL;
762
763 keys = kmalloc_array(args->count, sizeof(uint8_t),
764 GFP_KERNEL | __GFP_NOWARN);
765 if (!keys)
766 keys = vmalloc(sizeof(uint8_t) * args->count);
767 if (!keys)
768 return -ENOMEM;
769
770 for (i = 0; i < args->count; i++) {
771 hva = gfn_to_hva(kvm, args->start_gfn + i);
772 if (kvm_is_error_hva(hva)) {
773 r = -EFAULT;
774 goto out;
775 }
776
777 curkey = get_guest_storage_key(current->mm, hva);
778 if (IS_ERR_VALUE(curkey)) {
779 r = curkey;
780 goto out;
781 }
782 keys[i] = curkey;
783 }
784
785 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
786 sizeof(uint8_t) * args->count);
787 if (r)
788 r = -EFAULT;
789out:
790 kvfree(keys);
791 return r;
792}
793
794static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
795{
796 uint8_t *keys;
797 uint64_t hva;
798 int i, r = 0;
799
800 if (args->flags != 0)
801 return -EINVAL;
802
803 /* Enforce sane limit on memory allocation */
804 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
805 return -EINVAL;
806
807 keys = kmalloc_array(args->count, sizeof(uint8_t),
808 GFP_KERNEL | __GFP_NOWARN);
809 if (!keys)
810 keys = vmalloc(sizeof(uint8_t) * args->count);
811 if (!keys)
812 return -ENOMEM;
813
814 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
815 sizeof(uint8_t) * args->count);
816 if (r) {
817 r = -EFAULT;
818 goto out;
819 }
820
821 /* Enable storage key handling for the guest */
822 s390_enable_skey();
823
824 for (i = 0; i < args->count; i++) {
825 hva = gfn_to_hva(kvm, args->start_gfn + i);
826 if (kvm_is_error_hva(hva)) {
827 r = -EFAULT;
828 goto out;
829 }
830
831 /* Lowest order bit is reserved */
832 if (keys[i] & 0x01) {
833 r = -EINVAL;
834 goto out;
835 }
836
837 r = set_guest_storage_key(current->mm, hva,
838 (unsigned long)keys[i], 0);
839 if (r)
840 goto out;
841 }
842out:
843 kvfree(keys);
844 return r;
845}
846
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100847long kvm_arch_vm_ioctl(struct file *filp,
848 unsigned int ioctl, unsigned long arg)
849{
850 struct kvm *kvm = filp->private_data;
851 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200852 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100853 int r;
854
855 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100856 case KVM_S390_INTERRUPT: {
857 struct kvm_s390_interrupt s390int;
858
859 r = -EFAULT;
860 if (copy_from_user(&s390int, argp, sizeof(s390int)))
861 break;
862 r = kvm_s390_inject_vm(kvm, &s390int);
863 break;
864 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200865 case KVM_ENABLE_CAP: {
866 struct kvm_enable_cap cap;
867 r = -EFAULT;
868 if (copy_from_user(&cap, argp, sizeof(cap)))
869 break;
870 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
871 break;
872 }
Cornelia Huck84223592013-07-15 13:36:01 +0200873 case KVM_CREATE_IRQCHIP: {
874 struct kvm_irq_routing_entry routing;
875
876 r = -EINVAL;
877 if (kvm->arch.use_irqchip) {
878 /* Set up dummy routing. */
879 memset(&routing, 0, sizeof(routing));
880 kvm_set_irq_routing(kvm, &routing, 0, 0);
881 r = 0;
882 }
883 break;
884 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200885 case KVM_SET_DEVICE_ATTR: {
886 r = -EFAULT;
887 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
888 break;
889 r = kvm_s390_vm_set_attr(kvm, &attr);
890 break;
891 }
892 case KVM_GET_DEVICE_ATTR: {
893 r = -EFAULT;
894 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
895 break;
896 r = kvm_s390_vm_get_attr(kvm, &attr);
897 break;
898 }
899 case KVM_HAS_DEVICE_ATTR: {
900 r = -EFAULT;
901 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
902 break;
903 r = kvm_s390_vm_has_attr(kvm, &attr);
904 break;
905 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400906 case KVM_S390_GET_SKEYS: {
907 struct kvm_s390_skeys args;
908
909 r = -EFAULT;
910 if (copy_from_user(&args, argp,
911 sizeof(struct kvm_s390_skeys)))
912 break;
913 r = kvm_s390_get_skeys(kvm, &args);
914 break;
915 }
916 case KVM_S390_SET_SKEYS: {
917 struct kvm_s390_skeys args;
918
919 r = -EFAULT;
920 if (copy_from_user(&args, argp,
921 sizeof(struct kvm_s390_skeys)))
922 break;
923 r = kvm_s390_set_skeys(kvm, &args);
924 break;
925 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100926 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300927 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100928 }
929
930 return r;
931}
932
Tony Krowiak45c9b472015-01-13 11:33:26 -0500933static int kvm_s390_query_ap_config(u8 *config)
934{
935 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +0100936 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -0500937
Christian Borntraeger86044c82015-02-26 13:53:47 +0100938 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -0500939 asm volatile(
940 "lgr 0,%1\n"
941 "lgr 2,%2\n"
942 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +0100943 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -0500944 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +0100945 "1:\n"
946 EX_TABLE(0b, 1b)
947 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -0500948 : "r" (fcn_code), "r" (config)
949 : "cc", "0", "2", "memory"
950 );
951
952 return cc;
953}
954
955static int kvm_s390_apxa_installed(void)
956{
957 u8 config[128];
958 int cc;
959
960 if (test_facility(2) && test_facility(12)) {
961 cc = kvm_s390_query_ap_config(config);
962
963 if (cc)
964 pr_err("PQAP(QCI) failed with cc=%d", cc);
965 else
966 return config[0] & 0x40;
967 }
968
969 return 0;
970}
971
972static void kvm_s390_set_crycb_format(struct kvm *kvm)
973{
974 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
975
976 if (kvm_s390_apxa_installed())
977 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
978 else
979 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
980}
981
Michael Mueller9d8d5782015-02-02 15:42:51 +0100982static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
983{
984 get_cpu_id(cpu_id);
985 cpu_id->version = 0xff;
986}
987
Tony Krowiak5102ee82014-06-27 14:46:01 -0400988static int kvm_s390_crypto_init(struct kvm *kvm)
989{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100990 if (!test_kvm_facility(kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -0400991 return 0;
992
993 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
994 GFP_KERNEL | GFP_DMA);
995 if (!kvm->arch.crypto.crycb)
996 return -ENOMEM;
997
Tony Krowiak45c9b472015-01-13 11:33:26 -0500998 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -0400999
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001000 /* Enable AES/DEA protected key functions by default */
1001 kvm->arch.crypto.aes_kw = 1;
1002 kvm->arch.crypto.dea_kw = 1;
1003 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1004 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1005 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1006 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiaka374e892014-09-03 10:13:53 +02001007
Tony Krowiak5102ee82014-06-27 14:46:01 -04001008 return 0;
1009}
1010
Carsten Ottee08b9632012-01-04 10:25:20 +01001011int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001012{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001013 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001014 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001015 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001016
Carsten Ottee08b9632012-01-04 10:25:20 +01001017 rc = -EINVAL;
1018#ifdef CONFIG_KVM_S390_UCONTROL
1019 if (type & ~KVM_VM_S390_UCONTROL)
1020 goto out_err;
1021 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1022 goto out_err;
1023#else
1024 if (type)
1025 goto out_err;
1026#endif
1027
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001028 rc = s390_enable_sie();
1029 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001030 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001031
Carsten Otteb2904112011-10-18 12:27:13 +02001032 rc = -ENOMEM;
1033
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001034 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1035 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001036 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001037 spin_lock(&kvm_lock);
1038 sca_offset = (sca_offset + 16) & 0x7f0;
1039 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1040 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001041
1042 sprintf(debug_name, "kvm-%u", current->pid);
1043
1044 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
1045 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001046 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001047
Michael Mueller9d8d5782015-02-02 15:42:51 +01001048 /*
1049 * The architectural maximum amount of facilities is 16 kbit. To store
1050 * this amount, 2 kbyte of memory is required. Thus we need a full
Michael Mueller981467c2015-02-24 13:51:04 +01001051 * page to hold the guest facility list (arch.model.fac->list) and the
1052 * facility mask (arch.model.fac->mask). Its address size has to be
Michael Mueller9d8d5782015-02-02 15:42:51 +01001053 * 31 bits and word aligned.
1054 */
1055 kvm->arch.model.fac =
Michael Mueller981467c2015-02-24 13:51:04 +01001056 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001057 if (!kvm->arch.model.fac)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001058 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001059
Michael Muellerfb5bf932015-02-27 14:25:10 +01001060 /* Populate the facility mask initially. */
Michael Mueller981467c2015-02-24 13:51:04 +01001061 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +01001062 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001063 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1064 if (i < kvm_s390_fac_list_mask_size())
Michael Mueller981467c2015-02-24 13:51:04 +01001065 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001066 else
Michael Mueller981467c2015-02-24 13:51:04 +01001067 kvm->arch.model.fac->mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001068 }
1069
Michael Mueller981467c2015-02-24 13:51:04 +01001070 /* Populate the facility list initially. */
1071 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1072 S390_ARCH_FAC_LIST_SIZE_BYTE);
1073
Michael Mueller9d8d5782015-02-02 15:42:51 +01001074 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001075 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001076
Tony Krowiak5102ee82014-06-27 14:46:01 -04001077 if (kvm_s390_crypto_init(kvm) < 0)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001078 goto out_err;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001079
Carsten Otteba5c1e92008-03-25 18:47:26 +01001080 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001081 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1082 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001083 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001084 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001085
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001086 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1087 VM_EVENT(kvm, 3, "%s", "vm created");
1088
Carsten Ottee08b9632012-01-04 10:25:20 +01001089 if (type & KVM_VM_S390_UCONTROL) {
1090 kvm->arch.gmap = NULL;
1091 } else {
Christian Borntraeger03499852014-08-25 12:38:57 +02001092 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001093 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001094 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001095 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001096 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001097 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001098
1099 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001100 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001101 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001102
David Hildenbrand8ad35752014-03-14 11:00:21 +01001103 spin_lock_init(&kvm->arch.start_stop_lock);
1104
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001105 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001106out_err:
Dominik Dingel40f5b732015-03-12 13:55:53 +01001107 kfree(kvm->arch.crypto.crycb);
1108 free_page((unsigned long)kvm->arch.model.fac);
1109 debug_unregister(kvm->arch.dbf);
1110 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001111 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001112}
1113
Christian Borntraegerd329c032008-11-26 14:50:27 +01001114void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1115{
1116 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001117 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001118 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001119 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +01001120 if (!kvm_is_ucontrol(vcpu->kvm)) {
1121 clear_bit(63 - vcpu->vcpu_id,
1122 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1123 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1124 (__u64) vcpu->arch.sie_block)
1125 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
1126 }
Carsten Otteabf4a712009-05-12 17:21:51 +02001127 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +01001128
1129 if (kvm_is_ucontrol(vcpu->kvm))
1130 gmap_free(vcpu->arch.gmap);
1131
Dominik Dingelb31605c2014-03-25 13:47:11 +01001132 if (kvm_s390_cmma_enabled(vcpu->kvm))
1133 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001134 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001135
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001136 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001137 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001138}
1139
1140static void kvm_free_vcpus(struct kvm *kvm)
1141{
1142 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001143 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001144
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001145 kvm_for_each_vcpu(i, vcpu, kvm)
1146 kvm_arch_vcpu_destroy(vcpu);
1147
1148 mutex_lock(&kvm->lock);
1149 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1150 kvm->vcpus[i] = NULL;
1151
1152 atomic_set(&kvm->online_vcpus, 0);
1153 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001154}
1155
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001156void kvm_arch_destroy_vm(struct kvm *kvm)
1157{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001158 kvm_free_vcpus(kvm);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001159 free_page((unsigned long)kvm->arch.model.fac);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001160 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +01001161 debug_unregister(kvm->arch.dbf);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001162 kfree(kvm->arch.crypto.crycb);
Carsten Otte27e03932012-01-04 10:25:21 +01001163 if (!kvm_is_ucontrol(kvm))
1164 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001165 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001166 kvm_s390_clear_float_irqs(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001167}
1168
1169/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001170static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1171{
1172 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1173 if (!vcpu->arch.gmap)
1174 return -ENOMEM;
1175 vcpu->arch.gmap->private = vcpu->kvm;
1176
1177 return 0;
1178}
1179
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001180int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1181{
Dominik Dingel3c038e62013-10-07 17:11:48 +02001182 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1183 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001184 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1185 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001186 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02001187 KVM_SYNC_CRS |
1188 KVM_SYNC_ARCH0 |
1189 KVM_SYNC_PFAULT;
Eric Farman68c55752014-06-09 10:57:26 -04001190 if (test_kvm_facility(vcpu->kvm, 129))
1191 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01001192
1193 if (kvm_is_ucontrol(vcpu->kvm))
1194 return __kvm_ucontrol_vcpu_init(vcpu);
1195
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001196 return 0;
1197}
1198
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001199void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1200{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001201 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
Michael Mueller18280d82015-03-16 16:05:41 +01001202 if (test_kvm_facility(vcpu->kvm, 129))
Eric Farman68c55752014-06-09 10:57:26 -04001203 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1204 else
1205 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001206 save_access_regs(vcpu->arch.host_acrs);
Michael Mueller18280d82015-03-16 16:05:41 +01001207 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman68c55752014-06-09 10:57:26 -04001208 restore_fp_ctl(&vcpu->run->s.regs.fpc);
1209 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1210 } else {
1211 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1212 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1213 }
Christian Borntraeger59674c12012-01-11 11:20:33 +01001214 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001215 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001216 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001217}
1218
1219void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1220{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001221 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001222 gmap_disable(vcpu->arch.gmap);
Michael Mueller18280d82015-03-16 16:05:41 +01001223 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman68c55752014-06-09 10:57:26 -04001224 save_fp_ctl(&vcpu->run->s.regs.fpc);
1225 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1226 } else {
1227 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1228 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1229 }
Christian Borntraeger59674c12012-01-11 11:20:33 +01001230 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001231 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
Michael Mueller18280d82015-03-16 16:05:41 +01001232 if (test_kvm_facility(vcpu->kvm, 129))
Eric Farman68c55752014-06-09 10:57:26 -04001233 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1234 else
1235 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001236 restore_access_regs(vcpu->arch.host_acrs);
1237}
1238
1239static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1240{
1241 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1242 vcpu->arch.sie_block->gpsw.mask = 0UL;
1243 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01001244 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001245 vcpu->arch.sie_block->cputm = 0UL;
1246 vcpu->arch.sie_block->ckc = 0UL;
1247 vcpu->arch.sie_block->todpr = 0;
1248 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1249 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1250 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1251 vcpu->arch.guest_fpregs.fpc = 0;
1252 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1253 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001254 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001255 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1256 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001257 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1258 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001259 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001260}
1261
Dominik Dingel31928aa2014-12-04 15:47:07 +01001262void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001263{
Jason J. Herne72f25022014-11-25 09:46:02 -05001264 mutex_lock(&vcpu->kvm->lock);
1265 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1266 mutex_unlock(&vcpu->kvm->lock);
Dominik Dingeldafd0322014-12-02 16:53:21 +01001267 if (!kvm_is_ucontrol(vcpu->kvm))
1268 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001269}
1270
Tony Krowiak5102ee82014-06-27 14:46:01 -04001271static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1272{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001273 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001274 return;
1275
Tony Krowiaka374e892014-09-03 10:13:53 +02001276 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1277
1278 if (vcpu->kvm->arch.crypto.aes_kw)
1279 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1280 if (vcpu->kvm->arch.crypto.dea_kw)
1281 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1282
Tony Krowiak5102ee82014-06-27 14:46:01 -04001283 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1284}
1285
Dominik Dingelb31605c2014-03-25 13:47:11 +01001286void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1287{
1288 free_page(vcpu->arch.sie_block->cbrlo);
1289 vcpu->arch.sie_block->cbrlo = 0;
1290}
1291
1292int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1293{
1294 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1295 if (!vcpu->arch.sie_block->cbrlo)
1296 return -ENOMEM;
1297
1298 vcpu->arch.sie_block->ecb2 |= 0x80;
1299 vcpu->arch.sie_block->ecb2 &= ~0x08;
1300 return 0;
1301}
1302
Michael Mueller91520f12015-02-27 14:32:11 +01001303static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1304{
1305 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1306
1307 vcpu->arch.cpu_id = model->cpu_id;
1308 vcpu->arch.sie_block->ibc = model->ibc;
1309 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1310}
1311
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001312int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1313{
Dominik Dingelb31605c2014-03-25 13:47:11 +01001314 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001315
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001316 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1317 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001318 CPUSTAT_STOPPED);
1319
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001320 if (test_kvm_facility(vcpu->kvm, 78))
1321 atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1322 else if (test_kvm_facility(vcpu->kvm, 8))
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001323 atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1324
Michael Mueller91520f12015-02-27 14:32:11 +01001325 kvm_s390_vcpu_setup_model(vcpu);
1326
Christian Borntraegerfc345312010-06-17 23:16:20 +02001327 vcpu->arch.sie_block->ecb = 6;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001328 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001329 vcpu->arch.sie_block->ecb |= 0x10;
1330
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +02001331 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001332 vcpu->arch.sie_block->eca = 0xC1002000U;
Heiko Carstens217a4402013-12-30 12:54:14 +01001333 if (sclp_has_siif())
1334 vcpu->arch.sie_block->eca |= 1;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001335 if (sclp_has_sigpif())
1336 vcpu->arch.sie_block->eca |= 0x10000000U;
Michael Mueller18280d82015-03-16 16:05:41 +01001337 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman13211ea2014-04-30 13:39:46 -04001338 vcpu->arch.sie_block->eca |= 0x00020000;
1339 vcpu->arch.sie_block->ecd |= 0x20000000;
1340 }
Thomas Huth492d8642015-02-10 16:11:01 +01001341 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05001342
Dominik Dingelb31605c2014-03-25 13:47:11 +01001343 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1344 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1345 if (rc)
1346 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001347 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01001348 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02001349 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001350
Tony Krowiak5102ee82014-06-27 14:46:01 -04001351 kvm_s390_vcpu_crypto_setup(vcpu);
1352
Dominik Dingelb31605c2014-03-25 13:47:11 +01001353 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001354}
1355
1356struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1357 unsigned int id)
1358{
Carsten Otte4d475552011-10-18 12:27:12 +02001359 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001360 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02001361 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001362
Carsten Otte4d475552011-10-18 12:27:12 +02001363 if (id >= KVM_MAX_VCPUS)
1364 goto out;
1365
1366 rc = -ENOMEM;
1367
Michael Muellerb110fea2013-06-12 13:54:54 +02001368 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001369 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02001370 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001371
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001372 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1373 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001374 goto out_free_cpu;
1375
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001376 vcpu->arch.sie_block = &sie_page->sie_block;
1377 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
Eric Farman68c55752014-06-09 10:57:26 -04001378 vcpu->arch.host_vregs = &sie_page->vregs;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001379
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001380 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +01001381 if (!kvm_is_ucontrol(kvm)) {
1382 if (!kvm->arch.sca) {
1383 WARN_ON_ONCE(1);
1384 goto out_free_cpu;
1385 }
1386 if (!kvm->arch.sca->cpu[id].sda)
1387 kvm->arch.sca->cpu[id].sda =
1388 (__u64) vcpu->arch.sie_block;
1389 vcpu->arch.sie_block->scaoh =
1390 (__u32)(((__u64)kvm->arch.sca) >> 32);
1391 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1392 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1393 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001394
Carsten Otteba5c1e92008-03-25 18:47:26 +01001395 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001396 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02001397 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01001398 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001399
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001400 rc = kvm_vcpu_init(vcpu, kvm, id);
1401 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001402 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001403 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1404 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001405 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001406
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001407 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001408out_free_sie_block:
1409 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001410out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001411 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001412out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001413 return ERR_PTR(rc);
1414}
1415
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001416int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1417{
David Hildenbrand9a022062014-08-05 17:40:47 +02001418 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001419}
1420
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001421void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001422{
1423 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001424 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001425}
1426
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001427void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001428{
1429 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1430}
1431
Christian Borntraeger8e236542015-04-09 13:49:04 +02001432static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1433{
1434 atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001435 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001436}
1437
1438static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1439{
1440 atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1441}
1442
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001443/*
1444 * Kick a guest cpu out of SIE and wait until SIE is not running.
1445 * If the CPU is not running (e.g. waiting as idle) the function will
1446 * return immediately. */
1447void exit_sie(struct kvm_vcpu *vcpu)
1448{
1449 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1450 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1451 cpu_relax();
1452}
1453
Christian Borntraeger8e236542015-04-09 13:49:04 +02001454/* Kick a guest cpu out of SIE to process a request synchronously */
1455void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001456{
Christian Borntraeger8e236542015-04-09 13:49:04 +02001457 kvm_make_request(req, vcpu);
1458 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001459}
1460
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001461static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1462{
1463 int i;
1464 struct kvm *kvm = gmap->private;
1465 struct kvm_vcpu *vcpu;
1466
1467 kvm_for_each_vcpu(i, vcpu, kvm) {
1468 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +02001469 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001470 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001471 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001472 }
1473 }
1474}
1475
Christoffer Dallb6d33832012-03-08 16:44:24 -05001476int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1477{
1478 /* kvm common code refers to this, but never calls it */
1479 BUG();
1480 return 0;
1481}
1482
Carsten Otte14eebd92012-05-15 14:15:26 +02001483static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1484 struct kvm_one_reg *reg)
1485{
1486 int r = -EINVAL;
1487
1488 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001489 case KVM_REG_S390_TODPR:
1490 r = put_user(vcpu->arch.sie_block->todpr,
1491 (u32 __user *)reg->addr);
1492 break;
1493 case KVM_REG_S390_EPOCHDIFF:
1494 r = put_user(vcpu->arch.sie_block->epoch,
1495 (u64 __user *)reg->addr);
1496 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001497 case KVM_REG_S390_CPU_TIMER:
1498 r = put_user(vcpu->arch.sie_block->cputm,
1499 (u64 __user *)reg->addr);
1500 break;
1501 case KVM_REG_S390_CLOCK_COMP:
1502 r = put_user(vcpu->arch.sie_block->ckc,
1503 (u64 __user *)reg->addr);
1504 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001505 case KVM_REG_S390_PFTOKEN:
1506 r = put_user(vcpu->arch.pfault_token,
1507 (u64 __user *)reg->addr);
1508 break;
1509 case KVM_REG_S390_PFCOMPARE:
1510 r = put_user(vcpu->arch.pfault_compare,
1511 (u64 __user *)reg->addr);
1512 break;
1513 case KVM_REG_S390_PFSELECT:
1514 r = put_user(vcpu->arch.pfault_select,
1515 (u64 __user *)reg->addr);
1516 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001517 case KVM_REG_S390_PP:
1518 r = put_user(vcpu->arch.sie_block->pp,
1519 (u64 __user *)reg->addr);
1520 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001521 case KVM_REG_S390_GBEA:
1522 r = put_user(vcpu->arch.sie_block->gbea,
1523 (u64 __user *)reg->addr);
1524 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001525 default:
1526 break;
1527 }
1528
1529 return r;
1530}
1531
1532static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1533 struct kvm_one_reg *reg)
1534{
1535 int r = -EINVAL;
1536
1537 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001538 case KVM_REG_S390_TODPR:
1539 r = get_user(vcpu->arch.sie_block->todpr,
1540 (u32 __user *)reg->addr);
1541 break;
1542 case KVM_REG_S390_EPOCHDIFF:
1543 r = get_user(vcpu->arch.sie_block->epoch,
1544 (u64 __user *)reg->addr);
1545 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001546 case KVM_REG_S390_CPU_TIMER:
1547 r = get_user(vcpu->arch.sie_block->cputm,
1548 (u64 __user *)reg->addr);
1549 break;
1550 case KVM_REG_S390_CLOCK_COMP:
1551 r = get_user(vcpu->arch.sie_block->ckc,
1552 (u64 __user *)reg->addr);
1553 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001554 case KVM_REG_S390_PFTOKEN:
1555 r = get_user(vcpu->arch.pfault_token,
1556 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001557 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1558 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02001559 break;
1560 case KVM_REG_S390_PFCOMPARE:
1561 r = get_user(vcpu->arch.pfault_compare,
1562 (u64 __user *)reg->addr);
1563 break;
1564 case KVM_REG_S390_PFSELECT:
1565 r = get_user(vcpu->arch.pfault_select,
1566 (u64 __user *)reg->addr);
1567 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001568 case KVM_REG_S390_PP:
1569 r = get_user(vcpu->arch.sie_block->pp,
1570 (u64 __user *)reg->addr);
1571 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001572 case KVM_REG_S390_GBEA:
1573 r = get_user(vcpu->arch.sie_block->gbea,
1574 (u64 __user *)reg->addr);
1575 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001576 default:
1577 break;
1578 }
1579
1580 return r;
1581}
Christoffer Dallb6d33832012-03-08 16:44:24 -05001582
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001583static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1584{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001585 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001586 return 0;
1587}
1588
1589int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1590{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001591 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001592 return 0;
1593}
1594
1595int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1596{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001597 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001598 return 0;
1599}
1600
1601int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1602 struct kvm_sregs *sregs)
1603{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001604 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001605 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01001606 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001607 return 0;
1608}
1609
1610int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1611 struct kvm_sregs *sregs)
1612{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001613 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001614 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001615 return 0;
1616}
1617
1618int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1619{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001620 if (test_fp_ctl(fpu->fpc))
1621 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001622 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001623 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1624 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1625 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001626 return 0;
1627}
1628
1629int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1630{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001631 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1632 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001633 return 0;
1634}
1635
1636static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1637{
1638 int rc = 0;
1639
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02001640 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001641 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001642 else {
1643 vcpu->run->psw_mask = psw.mask;
1644 vcpu->run->psw_addr = psw.addr;
1645 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001646 return rc;
1647}
1648
1649int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1650 struct kvm_translation *tr)
1651{
1652 return -EINVAL; /* not implemented yet */
1653}
1654
David Hildenbrand27291e22014-01-23 12:26:52 +01001655#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1656 KVM_GUESTDBG_USE_HW_BP | \
1657 KVM_GUESTDBG_ENABLE)
1658
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001659int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1660 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001661{
David Hildenbrand27291e22014-01-23 12:26:52 +01001662 int rc = 0;
1663
1664 vcpu->guest_debug = 0;
1665 kvm_s390_clear_bp_data(vcpu);
1666
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02001667 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01001668 return -EINVAL;
1669
1670 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1671 vcpu->guest_debug = dbg->control;
1672 /* enforce guest PER */
1673 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1674
1675 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1676 rc = kvm_s390_import_bp_data(vcpu, dbg);
1677 } else {
1678 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1679 vcpu->arch.guestdbg.last_bp = 0;
1680 }
1681
1682 if (rc) {
1683 vcpu->guest_debug = 0;
1684 kvm_s390_clear_bp_data(vcpu);
1685 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1686 }
1687
1688 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001689}
1690
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001691int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1692 struct kvm_mp_state *mp_state)
1693{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001694 /* CHECK_STOP and LOAD are not supported yet */
1695 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1696 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001697}
1698
1699int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1700 struct kvm_mp_state *mp_state)
1701{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001702 int rc = 0;
1703
1704 /* user space knows about this interface - let it control the state */
1705 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1706
1707 switch (mp_state->mp_state) {
1708 case KVM_MP_STATE_STOPPED:
1709 kvm_s390_vcpu_stop(vcpu);
1710 break;
1711 case KVM_MP_STATE_OPERATING:
1712 kvm_s390_vcpu_start(vcpu);
1713 break;
1714 case KVM_MP_STATE_LOAD:
1715 case KVM_MP_STATE_CHECK_STOP:
1716 /* fall through - CHECK_STOP and LOAD are not supported yet */
1717 default:
1718 rc = -ENXIO;
1719 }
1720
1721 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001722}
1723
Dominik Dingelb31605c2014-03-25 13:47:11 +01001724bool kvm_s390_cmma_enabled(struct kvm *kvm)
1725{
1726 if (!MACHINE_IS_LPAR)
1727 return false;
1728 /* only enable for z10 and later */
1729 if (!MACHINE_HAS_EDAT1)
1730 return false;
1731 if (!kvm->arch.use_cmma)
1732 return false;
1733 return true;
1734}
1735
David Hildenbrand8ad35752014-03-14 11:00:21 +01001736static bool ibs_enabled(struct kvm_vcpu *vcpu)
1737{
1738 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1739}
1740
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001741static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1742{
Christian Borntraeger785dbef2015-04-16 16:58:22 +02001743 if (!vcpu->requests)
1744 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001745retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02001746 kvm_s390_vcpu_request_handled(vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001747 /*
1748 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1749 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1750 * This ensures that the ipte instruction for this request has
1751 * already finished. We might race against a second unmapper that
1752 * wants to set the blocking bit. Lets just retry the request loop.
1753 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001754 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001755 int rc;
1756 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001757 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001758 PAGE_SIZE * 2);
1759 if (rc)
1760 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001761 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001762 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001763
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001764 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1765 vcpu->arch.sie_block->ihcpu = 0xffff;
1766 goto retry;
1767 }
1768
David Hildenbrand8ad35752014-03-14 11:00:21 +01001769 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1770 if (!ibs_enabled(vcpu)) {
1771 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1772 atomic_set_mask(CPUSTAT_IBS,
1773 &vcpu->arch.sie_block->cpuflags);
1774 }
1775 goto retry;
1776 }
1777
1778 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1779 if (ibs_enabled(vcpu)) {
1780 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1781 atomic_clear_mask(CPUSTAT_IBS,
1782 &vcpu->arch.sie_block->cpuflags);
1783 }
1784 goto retry;
1785 }
1786
David Hildenbrand0759d062014-05-13 16:54:32 +02001787 /* nothing to do, just clear the request */
1788 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1789
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001790 return 0;
1791}
1792
Thomas Huthfa576c52014-05-06 17:20:16 +02001793/**
1794 * kvm_arch_fault_in_page - fault-in guest page if necessary
1795 * @vcpu: The corresponding virtual cpu
1796 * @gpa: Guest physical address
1797 * @writable: Whether the page should be writable or not
1798 *
1799 * Make sure that a guest page has been faulted-in on the host.
1800 *
1801 * Return: Zero on success, negative error code otherwise.
1802 */
1803long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001804{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001805 return gmap_fault(vcpu->arch.gmap, gpa,
1806 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001807}
1808
Dominik Dingel3c038e62013-10-07 17:11:48 +02001809static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1810 unsigned long token)
1811{
1812 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02001813 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001814
1815 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02001816 irq.u.ext.ext_params2 = token;
1817 irq.type = KVM_S390_INT_PFAULT_INIT;
1818 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02001819 } else {
1820 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02001821 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001822 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1823 }
1824}
1825
1826void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1827 struct kvm_async_pf *work)
1828{
1829 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1830 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1831}
1832
1833void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1834 struct kvm_async_pf *work)
1835{
1836 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1837 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1838}
1839
1840void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1841 struct kvm_async_pf *work)
1842{
1843 /* s390 will always inject the page directly */
1844}
1845
1846bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1847{
1848 /*
1849 * s390 will always inject the page directly,
1850 * but we still want check_async_completion to cleanup
1851 */
1852 return true;
1853}
1854
1855static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1856{
1857 hva_t hva;
1858 struct kvm_arch_async_pf arch;
1859 int rc;
1860
1861 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1862 return 0;
1863 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1864 vcpu->arch.pfault_compare)
1865 return 0;
1866 if (psw_extint_disabled(vcpu))
1867 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02001868 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001869 return 0;
1870 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1871 return 0;
1872 if (!vcpu->arch.gmap->pfault_enabled)
1873 return 0;
1874
Heiko Carstens81480cc2014-01-01 16:36:07 +01001875 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1876 hva += current->thread.gmap_addr & ~PAGE_MASK;
1877 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001878 return 0;
1879
1880 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1881 return rc;
1882}
1883
Thomas Huth3fb4c402013-09-12 10:33:43 +02001884static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001885{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001886 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001887
Dominik Dingel3c038e62013-10-07 17:11:48 +02001888 /*
1889 * On s390 notifications for arriving pages will be delivered directly
1890 * to the guest but the house keeping for completed pfaults is
1891 * handled outside the worker.
1892 */
1893 kvm_check_async_pf_completion(vcpu);
1894
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001895 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001896
1897 if (need_resched())
1898 schedule();
1899
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02001900 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02001901 s390_handle_mcck();
1902
Jens Freimann79395032014-04-17 10:10:30 +02001903 if (!kvm_is_ucontrol(vcpu->kvm)) {
1904 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1905 if (rc)
1906 return rc;
1907 }
Carsten Otte0ff31862008-05-21 13:37:37 +02001908
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001909 rc = kvm_s390_handle_requests(vcpu);
1910 if (rc)
1911 return rc;
1912
David Hildenbrand27291e22014-01-23 12:26:52 +01001913 if (guestdbg_enabled(vcpu)) {
1914 kvm_s390_backup_guest_per_regs(vcpu);
1915 kvm_s390_patch_guest_per_regs(vcpu);
1916 }
1917
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001918 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001919 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1920 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1921 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001922
Thomas Huth3fb4c402013-09-12 10:33:43 +02001923 return 0;
1924}
1925
Thomas Huth492d8642015-02-10 16:11:01 +01001926static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1927{
1928 psw_t *psw = &vcpu->arch.sie_block->gpsw;
1929 u8 opcode;
1930 int rc;
1931
1932 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1933 trace_kvm_s390_sie_fault(vcpu);
1934
1935 /*
1936 * We want to inject an addressing exception, which is defined as a
1937 * suppressing or terminating exception. However, since we came here
1938 * by a DAT access exception, the PSW still points to the faulting
1939 * instruction since DAT exceptions are nullifying. So we've got
1940 * to look up the current opcode to get the length of the instruction
1941 * to be able to forward the PSW.
1942 */
Alexander Yarygin8ae04b82015-01-19 13:24:51 +03001943 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
Thomas Huth492d8642015-02-10 16:11:01 +01001944 if (rc)
1945 return kvm_s390_inject_prog_cond(vcpu, rc);
1946 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1947
1948 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1949}
1950
Thomas Huth3fb4c402013-09-12 10:33:43 +02001951static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1952{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001953 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001954
1955 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1956 vcpu->arch.sie_block->icptcode);
1957 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1958
David Hildenbrand27291e22014-01-23 12:26:52 +01001959 if (guestdbg_enabled(vcpu))
1960 kvm_s390_restore_guest_per_regs(vcpu);
1961
Thomas Huth3fb4c402013-09-12 10:33:43 +02001962 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02001963 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02001964 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1965 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1966 vcpu->run->s390_ucontrol.trans_exc_code =
1967 current->thread.gmap_addr;
1968 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1969 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001970
1971 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001972 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001973 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001974 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001975 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001976 } else {
1977 gpa_t gpa = current->thread.gmap_addr;
1978 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1979 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001980 }
1981
Thomas Huth492d8642015-02-10 16:11:01 +01001982 if (rc == -1)
1983 rc = vcpu_post_run_fault_in_sie(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001984
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001985 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001986
Thomas Hutha76ccff2013-09-12 10:33:44 +02001987 if (rc == 0) {
1988 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01001989 /* Don't exit for host interrupts. */
1990 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02001991 else
1992 rc = kvm_handle_sie_intercept(vcpu);
1993 }
1994
Thomas Huth3fb4c402013-09-12 10:33:43 +02001995 return rc;
1996}
1997
1998static int __vcpu_run(struct kvm_vcpu *vcpu)
1999{
2000 int rc, exit_reason;
2001
Thomas Huth800c1062013-09-12 10:33:45 +02002002 /*
2003 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2004 * ning the guest), so that memslots (and other stuff) are protected
2005 */
2006 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2007
Thomas Hutha76ccff2013-09-12 10:33:44 +02002008 do {
2009 rc = vcpu_pre_run(vcpu);
2010 if (rc)
2011 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002012
Thomas Huth800c1062013-09-12 10:33:45 +02002013 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02002014 /*
2015 * As PF_VCPU will be used in fault handler, between
2016 * guest_enter and guest_exit should be no uaccess.
2017 */
2018 preempt_disable();
2019 kvm_guest_enter();
2020 preempt_enable();
2021 exit_reason = sie64a(vcpu->arch.sie_block,
2022 vcpu->run->s.regs.gprs);
2023 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02002024 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002025
Thomas Hutha76ccff2013-09-12 10:33:44 +02002026 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01002027 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002028
Thomas Huth800c1062013-09-12 10:33:45 +02002029 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01002030 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002031}
2032
David Hildenbrandb028ee32014-07-17 10:47:43 +02002033static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2034{
2035 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2036 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2037 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2038 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2039 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2040 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002041 /* some control register changes require a tlb flush */
2042 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002043 }
2044 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2045 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2046 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2047 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2048 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2049 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2050 }
2051 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2052 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2053 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2054 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002055 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2056 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002057 }
2058 kvm_run->kvm_dirty_regs = 0;
2059}
2060
2061static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2062{
2063 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2064 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2065 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2066 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2067 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2068 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2069 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2070 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2071 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2072 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2073 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2074 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2075}
2076
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002077int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2078{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002079 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002080 sigset_t sigsaved;
2081
David Hildenbrand27291e22014-01-23 12:26:52 +01002082 if (guestdbg_exit_pending(vcpu)) {
2083 kvm_s390_prepare_debug_exit(vcpu);
2084 return 0;
2085 }
2086
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002087 if (vcpu->sigset_active)
2088 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2089
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002090 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2091 kvm_s390_vcpu_start(vcpu);
2092 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02002093 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002094 vcpu->vcpu_id);
2095 return -EINVAL;
2096 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002097
David Hildenbrandb028ee32014-07-17 10:47:43 +02002098 sync_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002099
Heiko Carstensdab4079d2009-06-12 10:26:32 +02002100 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002101 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02002102
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002103 if (signal_pending(current) && !rc) {
2104 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002105 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002106 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002107
David Hildenbrand27291e22014-01-23 12:26:52 +01002108 if (guestdbg_exit_pending(vcpu) && !rc) {
2109 kvm_s390_prepare_debug_exit(vcpu);
2110 rc = 0;
2111 }
2112
Heiko Carstensb8e660b2010-02-26 22:37:41 +01002113 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002114 /* intercept cannot be handled in-kernel, prepare kvm-run */
2115 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
2116 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002117 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2118 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2119 rc = 0;
2120 }
2121
2122 if (rc == -EREMOTE) {
2123 /* intercept was handled, but userspace support is needed
2124 * kvm_run has been prepared by the handler */
2125 rc = 0;
2126 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002127
David Hildenbrandb028ee32014-07-17 10:47:43 +02002128 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002129
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002130 if (vcpu->sigset_active)
2131 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2132
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002133 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02002134 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002135}
2136
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002137/*
2138 * store status at address
2139 * we use have two special cases:
2140 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2141 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2142 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01002143int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002144{
Carsten Otte092670c2011-07-24 10:48:22 +02002145 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02002146 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01002147 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002148 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002149
Heiko Carstensd0bce602014-01-01 16:45:58 +01002150 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2151 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002152 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002153 gpa = SAVE_AREA_BASE;
2154 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2155 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002156 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002157 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2158 }
2159 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2160 vcpu->arch.guest_fpregs.fprs, 128);
2161 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2162 vcpu->run->s.regs.gprs, 128);
2163 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2164 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02002165 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002166 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02002167 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002168 rc |= write_guest_abs(vcpu,
2169 gpa + offsetof(struct save_area, fp_ctrl_reg),
2170 &vcpu->arch.guest_fpregs.fpc, 4);
2171 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2172 &vcpu->arch.sie_block->todpr, 4);
2173 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2174 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01002175 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002176 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2177 &clkcomp, 8);
2178 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2179 &vcpu->run->s.regs.acrs, 64);
2180 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2181 &vcpu->arch.sie_block->gcr, 128);
2182 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002183}
2184
Thomas Huthe8798922013-11-06 15:46:33 +01002185int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2186{
2187 /*
2188 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2189 * copying in vcpu load/put. Lets update our copies before we save
2190 * it into the save area
2191 */
2192 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2193 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2194 save_access_regs(vcpu->run->s.regs.acrs);
2195
2196 return kvm_s390_store_status_unloaded(vcpu, addr);
2197}
2198
Eric Farmanbc17de72014-04-14 16:01:09 -04002199/*
2200 * store additional status at address
2201 */
2202int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2203 unsigned long gpa)
2204{
2205 /* Only bits 0-53 are used for address formation */
2206 if (!(gpa & ~0x3ff))
2207 return 0;
2208
2209 return write_guest_abs(vcpu, gpa & ~0x3ff,
2210 (void *)&vcpu->run->s.regs.vrs, 512);
2211}
2212
2213int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2214{
2215 if (!test_kvm_facility(vcpu->kvm, 129))
2216 return 0;
2217
2218 /*
2219 * The guest VXRS are in the host VXRs due to the lazy
2220 * copying in vcpu load/put. Let's update our copies before we save
2221 * it into the save area.
2222 */
2223 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2224
2225 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2226}
2227
David Hildenbrand8ad35752014-03-14 11:00:21 +01002228static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2229{
2230 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002231 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002232}
2233
2234static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2235{
2236 unsigned int i;
2237 struct kvm_vcpu *vcpu;
2238
2239 kvm_for_each_vcpu(i, vcpu, kvm) {
2240 __disable_ibs_on_vcpu(vcpu);
2241 }
2242}
2243
2244static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2245{
2246 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002247 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002248}
2249
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002250void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2251{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002252 int i, online_vcpus, started_vcpus = 0;
2253
2254 if (!is_vcpu_stopped(vcpu))
2255 return;
2256
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002257 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002258 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002259 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002260 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2261
2262 for (i = 0; i < online_vcpus; i++) {
2263 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2264 started_vcpus++;
2265 }
2266
2267 if (started_vcpus == 0) {
2268 /* we're the only active VCPU -> speed it up */
2269 __enable_ibs_on_vcpu(vcpu);
2270 } else if (started_vcpus == 1) {
2271 /*
2272 * As we are starting a second VCPU, we have to disable
2273 * the IBS facility on all VCPUs to remove potentially
2274 * oustanding ENABLE requests.
2275 */
2276 __disable_ibs_on_all_vcpus(vcpu->kvm);
2277 }
2278
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002279 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002280 /*
2281 * Another VCPU might have used IBS while we were offline.
2282 * Let's play safe and flush the VCPU at startup.
2283 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002284 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002285 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002286 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002287}
2288
2289void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2290{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002291 int i, online_vcpus, started_vcpus = 0;
2292 struct kvm_vcpu *started_vcpu = NULL;
2293
2294 if (is_vcpu_stopped(vcpu))
2295 return;
2296
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002297 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002298 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002299 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002300 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2301
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002302 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02002303 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002304
David Hildenbrand6cddd432014-10-15 16:48:53 +02002305 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002306 __disable_ibs_on_vcpu(vcpu);
2307
2308 for (i = 0; i < online_vcpus; i++) {
2309 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2310 started_vcpus++;
2311 started_vcpu = vcpu->kvm->vcpus[i];
2312 }
2313 }
2314
2315 if (started_vcpus == 1) {
2316 /*
2317 * As we only have one VCPU left, we want to enable the
2318 * IBS facility for that VCPU to speed it up.
2319 */
2320 __enable_ibs_on_vcpu(started_vcpu);
2321 }
2322
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002323 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002324 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002325}
2326
Cornelia Huckd6712df2012-12-20 15:32:11 +01002327static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2328 struct kvm_enable_cap *cap)
2329{
2330 int r;
2331
2332 if (cap->flags)
2333 return -EINVAL;
2334
2335 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002336 case KVM_CAP_S390_CSS_SUPPORT:
2337 if (!vcpu->kvm->arch.css_support) {
2338 vcpu->kvm->arch.css_support = 1;
2339 trace_kvm_s390_enable_css(vcpu->kvm);
2340 }
2341 r = 0;
2342 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01002343 default:
2344 r = -EINVAL;
2345 break;
2346 }
2347 return r;
2348}
2349
Thomas Huth41408c282015-02-06 15:01:21 +01002350static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2351 struct kvm_s390_mem_op *mop)
2352{
2353 void __user *uaddr = (void __user *)mop->buf;
2354 void *tmpbuf = NULL;
2355 int r, srcu_idx;
2356 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2357 | KVM_S390_MEMOP_F_CHECK_ONLY;
2358
2359 if (mop->flags & ~supported_flags)
2360 return -EINVAL;
2361
2362 if (mop->size > MEM_OP_MAX_SIZE)
2363 return -E2BIG;
2364
2365 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2366 tmpbuf = vmalloc(mop->size);
2367 if (!tmpbuf)
2368 return -ENOMEM;
2369 }
2370
2371 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2372
2373 switch (mop->op) {
2374 case KVM_S390_MEMOP_LOGICAL_READ:
2375 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2376 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2377 break;
2378 }
2379 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2380 if (r == 0) {
2381 if (copy_to_user(uaddr, tmpbuf, mop->size))
2382 r = -EFAULT;
2383 }
2384 break;
2385 case KVM_S390_MEMOP_LOGICAL_WRITE:
2386 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2387 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2388 break;
2389 }
2390 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2391 r = -EFAULT;
2392 break;
2393 }
2394 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2395 break;
2396 default:
2397 r = -EINVAL;
2398 }
2399
2400 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2401
2402 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2403 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2404
2405 vfree(tmpbuf);
2406 return r;
2407}
2408
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002409long kvm_arch_vcpu_ioctl(struct file *filp,
2410 unsigned int ioctl, unsigned long arg)
2411{
2412 struct kvm_vcpu *vcpu = filp->private_data;
2413 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02002414 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03002415 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002416
Avi Kivity93736622010-05-13 12:35:17 +03002417 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01002418 case KVM_S390_IRQ: {
2419 struct kvm_s390_irq s390irq;
2420
2421 r = -EFAULT;
2422 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2423 break;
2424 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2425 break;
2426 }
Avi Kivity93736622010-05-13 12:35:17 +03002427 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002428 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02002429 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002430
Avi Kivity93736622010-05-13 12:35:17 +03002431 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002432 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03002433 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02002434 if (s390int_to_s390irq(&s390int, &s390irq))
2435 return -EINVAL;
2436 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity93736622010-05-13 12:35:17 +03002437 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002438 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002439 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02002440 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002441 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02002442 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002443 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002444 case KVM_S390_SET_INITIAL_PSW: {
2445 psw_t psw;
2446
Avi Kivitybc923cc2010-05-13 12:21:46 +03002447 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002448 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03002449 break;
2450 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2451 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002452 }
2453 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03002454 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2455 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002456 case KVM_SET_ONE_REG:
2457 case KVM_GET_ONE_REG: {
2458 struct kvm_one_reg reg;
2459 r = -EFAULT;
2460 if (copy_from_user(&reg, argp, sizeof(reg)))
2461 break;
2462 if (ioctl == KVM_SET_ONE_REG)
2463 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2464 else
2465 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2466 break;
2467 }
Carsten Otte27e03932012-01-04 10:25:21 +01002468#ifdef CONFIG_KVM_S390_UCONTROL
2469 case KVM_S390_UCAS_MAP: {
2470 struct kvm_s390_ucas_mapping ucasmap;
2471
2472 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2473 r = -EFAULT;
2474 break;
2475 }
2476
2477 if (!kvm_is_ucontrol(vcpu->kvm)) {
2478 r = -EINVAL;
2479 break;
2480 }
2481
2482 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2483 ucasmap.vcpu_addr, ucasmap.length);
2484 break;
2485 }
2486 case KVM_S390_UCAS_UNMAP: {
2487 struct kvm_s390_ucas_mapping ucasmap;
2488
2489 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2490 r = -EFAULT;
2491 break;
2492 }
2493
2494 if (!kvm_is_ucontrol(vcpu->kvm)) {
2495 r = -EINVAL;
2496 break;
2497 }
2498
2499 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2500 ucasmap.length);
2501 break;
2502 }
2503#endif
Carsten Otteccc79102012-01-04 10:25:26 +01002504 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002505 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01002506 break;
2507 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01002508 case KVM_ENABLE_CAP:
2509 {
2510 struct kvm_enable_cap cap;
2511 r = -EFAULT;
2512 if (copy_from_user(&cap, argp, sizeof(cap)))
2513 break;
2514 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2515 break;
2516 }
Thomas Huth41408c282015-02-06 15:01:21 +01002517 case KVM_S390_MEM_OP: {
2518 struct kvm_s390_mem_op mem_op;
2519
2520 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2521 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2522 else
2523 r = -EFAULT;
2524 break;
2525 }
Jens Freimann816c7662014-11-24 17:13:46 +01002526 case KVM_S390_SET_IRQ_STATE: {
2527 struct kvm_s390_irq_state irq_state;
2528
2529 r = -EFAULT;
2530 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2531 break;
2532 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2533 irq_state.len == 0 ||
2534 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2535 r = -EINVAL;
2536 break;
2537 }
2538 r = kvm_s390_set_irq_state(vcpu,
2539 (void __user *) irq_state.buf,
2540 irq_state.len);
2541 break;
2542 }
2543 case KVM_S390_GET_IRQ_STATE: {
2544 struct kvm_s390_irq_state irq_state;
2545
2546 r = -EFAULT;
2547 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2548 break;
2549 if (irq_state.len == 0) {
2550 r = -EINVAL;
2551 break;
2552 }
2553 r = kvm_s390_get_irq_state(vcpu,
2554 (__u8 __user *) irq_state.buf,
2555 irq_state.len);
2556 break;
2557 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002558 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01002559 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002560 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03002561 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002562}
2563
Carsten Otte5b1c1492012-01-04 10:25:23 +01002564int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2565{
2566#ifdef CONFIG_KVM_S390_UCONTROL
2567 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2568 && (kvm_is_ucontrol(vcpu->kvm))) {
2569 vmf->page = virt_to_page(vcpu->arch.sie_block);
2570 get_page(vmf->page);
2571 return 0;
2572 }
2573#endif
2574 return VM_FAULT_SIGBUS;
2575}
2576
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05302577int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2578 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09002579{
2580 return 0;
2581}
2582
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002583/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002584int kvm_arch_prepare_memory_region(struct kvm *kvm,
2585 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09002586 struct kvm_userspace_memory_region *mem,
2587 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002588{
Nick Wangdd2887e2013-03-25 17:22:57 +01002589 /* A few sanity checks. We can have memory slots which have to be
2590 located/ended at a segment boundary (1MB). The memory in userland is
2591 ok to be fragmented into various different vmas. It is okay to mmap()
2592 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002593
Carsten Otte598841c2011-07-24 10:48:21 +02002594 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002595 return -EINVAL;
2596
Carsten Otte598841c2011-07-24 10:48:21 +02002597 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002598 return -EINVAL;
2599
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002600 return 0;
2601}
2602
2603void kvm_arch_commit_memory_region(struct kvm *kvm,
2604 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09002605 const struct kvm_memory_slot *old,
2606 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002607{
Carsten Ottef7850c92011-07-24 10:48:23 +02002608 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002609
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01002610 /* If the basics of the memslot do not change, we do not want
2611 * to update the gmap. Every update causes several unnecessary
2612 * segment translation exceptions. This is usually handled just
2613 * fine by the normal fault handler + gmap, but it will also
2614 * cause faults on the prefix page of running guest CPUs.
2615 */
2616 if (old->userspace_addr == mem->userspace_addr &&
2617 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2618 old->npages * PAGE_SIZE == mem->memory_size)
2619 return;
Carsten Otte598841c2011-07-24 10:48:21 +02002620
2621 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2622 mem->guest_phys_addr, mem->memory_size);
2623 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02002624 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02002625 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002626}
2627
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002628static int __init kvm_s390_init(void)
2629{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002630 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002631}
2632
2633static void __exit kvm_s390_exit(void)
2634{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002635 kvm_exit();
2636}
2637
2638module_init(kvm_s390_init);
2639module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02002640
2641/*
2642 * Enable autoloading of the kvm module.
2643 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2644 * since x86 takes a different approach.
2645 */
2646#include <linux/miscdevice.h>
2647MODULE_ALIAS_MISCDEV(KVM_MINOR);
2648MODULE_ALIAS("devname:kvm");