blob: 9072127bd51b96267bf4e8beb5411de972a682bf [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020025#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010027#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010028#include <linux/vmalloc.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010029#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010030#include <asm/lowcore.h>
31#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010032#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010033#include <asm/switch_to.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020034#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010035#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include "gaccess.h"
37
Cornelia Huck5786fff2012-07-23 17:20:29 +020038#define CREATE_TRACE_POINTS
39#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020040#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020041
Thomas Huth41408c282015-02-06 15:01:21 +010042#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
43
Heiko Carstensb0c632d2008-03-25 18:47:20 +010044#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
45
46struct kvm_stats_debugfs_item debugfs_entries[] = {
47 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020048 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010049 { "exit_validity", VCPU_STAT(exit_validity) },
50 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
51 { "exit_external_request", VCPU_STAT(exit_external_request) },
52 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010053 { "exit_instruction", VCPU_STAT(exit_instruction) },
54 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
55 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010056 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020057 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020058 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010059 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010060 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
61 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010062 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020063 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010064 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
65 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
66 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
67 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
68 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
69 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
70 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020071 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010072 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
73 { "instruction_spx", VCPU_STAT(instruction_spx) },
74 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
75 { "instruction_stap", VCPU_STAT(instruction_stap) },
76 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010077 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010078 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
79 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020080 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010081 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
82 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020083 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010084 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010085 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020086 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010087 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020088 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
89 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010090 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020091 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
92 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -050093 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010094 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
95 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
96 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020097 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
98 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
99 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100100 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100101 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200102 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100103 { NULL }
104};
105
Michael Mueller9d8d5782015-02-02 15:42:51 +0100106/* upper facilities limit for kvm */
107unsigned long kvm_s390_fac_list_mask[] = {
108 0xff82fffbf4fc2000UL,
109 0x005c000000000000UL,
110};
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100111
Michael Mueller9d8d5782015-02-02 15:42:51 +0100112unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200113{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100114 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
115 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b592013-07-26 15:04:04 +0200116}
117
Michael Mueller9d8d5782015-02-02 15:42:51 +0100118static struct gmap_notifier gmap_notifier;
119
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100120/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200121int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100122{
123 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200124 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100125}
126
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200127static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
128
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100129int kvm_arch_hardware_setup(void)
130{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200131 gmap_notifier.notifier_call = kvm_gmap_notifier;
132 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100133 return 0;
134}
135
136void kvm_arch_hardware_unsetup(void)
137{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200138 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100139}
140
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100141int kvm_arch_init(void *opaque)
142{
Cornelia Huck84877d92014-09-02 10:27:35 +0100143 /* Register floating interrupt controller interface. */
144 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100145}
146
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100147/* Section: device related */
148long kvm_arch_dev_ioctl(struct file *filp,
149 unsigned int ioctl, unsigned long arg)
150{
151 if (ioctl == KVM_S390_ENABLE_SIE)
152 return s390_enable_sie();
153 return -EINVAL;
154}
155
Alexander Graf784aa3d2014-07-14 18:27:35 +0200156int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100157{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100158 int r;
159
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200160 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100161 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200162 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100163 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100164#ifdef CONFIG_KVM_S390_UCONTROL
165 case KVM_CAP_S390_UCONTROL:
166#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200167 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100168 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200169 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100170 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100171 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huckebc32262014-05-09 15:00:46 +0200172 case KVM_CAP_IRQFD:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100173 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200174 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200175 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200176 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200177 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200178 case KVM_CAP_MP_STATE:
David Hildenbrand2444b352014-10-09 14:10:13 +0200179 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100180 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400181 case KVM_CAP_S390_SKEYS:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100182 r = 1;
183 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100184 case KVM_CAP_S390_MEM_OP:
185 r = MEM_OP_MAX_SIZE;
186 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200187 case KVM_CAP_NR_VCPUS:
188 case KVM_CAP_MAX_VCPUS:
189 r = KVM_MAX_VCPUS;
190 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100191 case KVM_CAP_NR_MEMSLOTS:
192 r = KVM_USER_MEM_SLOTS;
193 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200194 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100195 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200196 break;
Eric Farman68c55752014-06-09 10:57:26 -0400197 case KVM_CAP_S390_VECTOR_REGISTERS:
198 r = MACHINE_HAS_VX;
199 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200200 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100201 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200202 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100203 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100204}
205
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400206static void kvm_s390_sync_dirty_log(struct kvm *kvm,
207 struct kvm_memory_slot *memslot)
208{
209 gfn_t cur_gfn, last_gfn;
210 unsigned long address;
211 struct gmap *gmap = kvm->arch.gmap;
212
213 down_read(&gmap->mm->mmap_sem);
214 /* Loop over all guest pages */
215 last_gfn = memslot->base_gfn + memslot->npages;
216 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
217 address = gfn_to_hva_memslot(memslot, cur_gfn);
218
219 if (gmap_test_and_clear_dirty(address, gmap))
220 mark_page_dirty(kvm, cur_gfn);
221 }
222 up_read(&gmap->mm->mmap_sem);
223}
224
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100225/* Section: vm related */
226/*
227 * Get (and clear) the dirty memory log for a memory slot.
228 */
229int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
230 struct kvm_dirty_log *log)
231{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400232 int r;
233 unsigned long n;
234 struct kvm_memory_slot *memslot;
235 int is_dirty = 0;
236
237 mutex_lock(&kvm->slots_lock);
238
239 r = -EINVAL;
240 if (log->slot >= KVM_USER_MEM_SLOTS)
241 goto out;
242
243 memslot = id_to_memslot(kvm->memslots, log->slot);
244 r = -ENOENT;
245 if (!memslot->dirty_bitmap)
246 goto out;
247
248 kvm_s390_sync_dirty_log(kvm, memslot);
249 r = kvm_get_dirty_log(kvm, log, &is_dirty);
250 if (r)
251 goto out;
252
253 /* Clear the dirty log */
254 if (is_dirty) {
255 n = kvm_dirty_bitmap_bytes(memslot);
256 memset(memslot->dirty_bitmap, 0, n);
257 }
258 r = 0;
259out:
260 mutex_unlock(&kvm->slots_lock);
261 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100262}
263
Cornelia Huckd938dc52013-10-23 18:26:34 +0200264static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
265{
266 int r;
267
268 if (cap->flags)
269 return -EINVAL;
270
271 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200272 case KVM_CAP_S390_IRQCHIP:
273 kvm->arch.use_irqchip = 1;
274 r = 0;
275 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200276 case KVM_CAP_S390_USER_SIGP:
277 kvm->arch.user_sigp = 1;
278 r = 0;
279 break;
Eric Farman68c55752014-06-09 10:57:26 -0400280 case KVM_CAP_S390_VECTOR_REGISTERS:
Michael Mueller18280d82015-03-16 16:05:41 +0100281 if (MACHINE_HAS_VX) {
282 set_kvm_facility(kvm->arch.model.fac->mask, 129);
283 set_kvm_facility(kvm->arch.model.fac->list, 129);
284 r = 0;
285 } else
286 r = -EINVAL;
Eric Farman68c55752014-06-09 10:57:26 -0400287 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100288 case KVM_CAP_S390_USER_STSI:
289 kvm->arch.user_stsi = 1;
290 r = 0;
291 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200292 default:
293 r = -EINVAL;
294 break;
295 }
296 return r;
297}
298
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100299static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
300{
301 int ret;
302
303 switch (attr->attr) {
304 case KVM_S390_VM_MEM_LIMIT_SIZE:
305 ret = 0;
306 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
307 ret = -EFAULT;
308 break;
309 default:
310 ret = -ENXIO;
311 break;
312 }
313 return ret;
314}
315
316static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200317{
318 int ret;
319 unsigned int idx;
320 switch (attr->attr) {
321 case KVM_S390_VM_MEM_ENABLE_CMMA:
322 ret = -EBUSY;
323 mutex_lock(&kvm->lock);
324 if (atomic_read(&kvm->online_vcpus) == 0) {
325 kvm->arch.use_cmma = 1;
326 ret = 0;
327 }
328 mutex_unlock(&kvm->lock);
329 break;
330 case KVM_S390_VM_MEM_CLR_CMMA:
331 mutex_lock(&kvm->lock);
332 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200333 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200334 srcu_read_unlock(&kvm->srcu, idx);
335 mutex_unlock(&kvm->lock);
336 ret = 0;
337 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100338 case KVM_S390_VM_MEM_LIMIT_SIZE: {
339 unsigned long new_limit;
340
341 if (kvm_is_ucontrol(kvm))
342 return -EINVAL;
343
344 if (get_user(new_limit, (u64 __user *)attr->addr))
345 return -EFAULT;
346
347 if (new_limit > kvm->arch.gmap->asce_end)
348 return -E2BIG;
349
350 ret = -EBUSY;
351 mutex_lock(&kvm->lock);
352 if (atomic_read(&kvm->online_vcpus) == 0) {
353 /* gmap_alloc will round the limit up */
354 struct gmap *new = gmap_alloc(current->mm, new_limit);
355
356 if (!new) {
357 ret = -ENOMEM;
358 } else {
359 gmap_free(kvm->arch.gmap);
360 new->private = kvm;
361 kvm->arch.gmap = new;
362 ret = 0;
363 }
364 }
365 mutex_unlock(&kvm->lock);
366 break;
367 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200368 default:
369 ret = -ENXIO;
370 break;
371 }
372 return ret;
373}
374
Tony Krowiaka374e892014-09-03 10:13:53 +0200375static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
376
377static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
378{
379 struct kvm_vcpu *vcpu;
380 int i;
381
Michael Mueller9d8d5782015-02-02 15:42:51 +0100382 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200383 return -EINVAL;
384
385 mutex_lock(&kvm->lock);
386 switch (attr->attr) {
387 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
388 get_random_bytes(
389 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
390 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
391 kvm->arch.crypto.aes_kw = 1;
392 break;
393 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
394 get_random_bytes(
395 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
396 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
397 kvm->arch.crypto.dea_kw = 1;
398 break;
399 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
400 kvm->arch.crypto.aes_kw = 0;
401 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
402 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
403 break;
404 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
405 kvm->arch.crypto.dea_kw = 0;
406 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
407 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
408 break;
409 default:
410 mutex_unlock(&kvm->lock);
411 return -ENXIO;
412 }
413
414 kvm_for_each_vcpu(i, vcpu, kvm) {
415 kvm_s390_vcpu_crypto_setup(vcpu);
416 exit_sie(vcpu);
417 }
418 mutex_unlock(&kvm->lock);
419 return 0;
420}
421
Jason J. Herne72f25022014-11-25 09:46:02 -0500422static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
423{
424 u8 gtod_high;
425
426 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
427 sizeof(gtod_high)))
428 return -EFAULT;
429
430 if (gtod_high != 0)
431 return -EINVAL;
432
433 return 0;
434}
435
436static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
437{
438 struct kvm_vcpu *cur_vcpu;
439 unsigned int vcpu_idx;
440 u64 host_tod, gtod;
441 int r;
442
443 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
444 return -EFAULT;
445
446 r = store_tod_clock(&host_tod);
447 if (r)
448 return r;
449
450 mutex_lock(&kvm->lock);
451 kvm->arch.epoch = gtod - host_tod;
452 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
453 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
454 exit_sie(cur_vcpu);
455 }
456 mutex_unlock(&kvm->lock);
457 return 0;
458}
459
460static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
461{
462 int ret;
463
464 if (attr->flags)
465 return -EINVAL;
466
467 switch (attr->attr) {
468 case KVM_S390_VM_TOD_HIGH:
469 ret = kvm_s390_set_tod_high(kvm, attr);
470 break;
471 case KVM_S390_VM_TOD_LOW:
472 ret = kvm_s390_set_tod_low(kvm, attr);
473 break;
474 default:
475 ret = -ENXIO;
476 break;
477 }
478 return ret;
479}
480
481static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
482{
483 u8 gtod_high = 0;
484
485 if (copy_to_user((void __user *)attr->addr, &gtod_high,
486 sizeof(gtod_high)))
487 return -EFAULT;
488
489 return 0;
490}
491
492static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
493{
494 u64 host_tod, gtod;
495 int r;
496
497 r = store_tod_clock(&host_tod);
498 if (r)
499 return r;
500
501 gtod = host_tod + kvm->arch.epoch;
502 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
503 return -EFAULT;
504
505 return 0;
506}
507
508static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
509{
510 int ret;
511
512 if (attr->flags)
513 return -EINVAL;
514
515 switch (attr->attr) {
516 case KVM_S390_VM_TOD_HIGH:
517 ret = kvm_s390_get_tod_high(kvm, attr);
518 break;
519 case KVM_S390_VM_TOD_LOW:
520 ret = kvm_s390_get_tod_low(kvm, attr);
521 break;
522 default:
523 ret = -ENXIO;
524 break;
525 }
526 return ret;
527}
528
Michael Mueller658b6ed2015-02-02 15:49:35 +0100529static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
530{
531 struct kvm_s390_vm_cpu_processor *proc;
532 int ret = 0;
533
534 mutex_lock(&kvm->lock);
535 if (atomic_read(&kvm->online_vcpus)) {
536 ret = -EBUSY;
537 goto out;
538 }
539 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
540 if (!proc) {
541 ret = -ENOMEM;
542 goto out;
543 }
544 if (!copy_from_user(proc, (void __user *)attr->addr,
545 sizeof(*proc))) {
546 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
547 sizeof(struct cpuid));
548 kvm->arch.model.ibc = proc->ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100549 memcpy(kvm->arch.model.fac->list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +0100550 S390_ARCH_FAC_LIST_SIZE_BYTE);
551 } else
552 ret = -EFAULT;
553 kfree(proc);
554out:
555 mutex_unlock(&kvm->lock);
556 return ret;
557}
558
559static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
560{
561 int ret = -ENXIO;
562
563 switch (attr->attr) {
564 case KVM_S390_VM_CPU_PROCESSOR:
565 ret = kvm_s390_set_processor(kvm, attr);
566 break;
567 }
568 return ret;
569}
570
571static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
572{
573 struct kvm_s390_vm_cpu_processor *proc;
574 int ret = 0;
575
576 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
577 if (!proc) {
578 ret = -ENOMEM;
579 goto out;
580 }
581 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
582 proc->ibc = kvm->arch.model.ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100583 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100584 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
585 ret = -EFAULT;
586 kfree(proc);
587out:
588 return ret;
589}
590
591static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
592{
593 struct kvm_s390_vm_cpu_machine *mach;
594 int ret = 0;
595
596 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
597 if (!mach) {
598 ret = -ENOMEM;
599 goto out;
600 }
601 get_cpu_id((struct cpuid *) &mach->cpuid);
602 mach->ibc = sclp_get_ibc();
Michael Mueller981467c2015-02-24 13:51:04 +0100603 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
604 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100605 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +0100606 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100607 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
608 ret = -EFAULT;
609 kfree(mach);
610out:
611 return ret;
612}
613
614static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
615{
616 int ret = -ENXIO;
617
618 switch (attr->attr) {
619 case KVM_S390_VM_CPU_PROCESSOR:
620 ret = kvm_s390_get_processor(kvm, attr);
621 break;
622 case KVM_S390_VM_CPU_MACHINE:
623 ret = kvm_s390_get_machine(kvm, attr);
624 break;
625 }
626 return ret;
627}
628
Dominik Dingelf2061652014-04-09 13:13:00 +0200629static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
630{
631 int ret;
632
633 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200634 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100635 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200636 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500637 case KVM_S390_VM_TOD:
638 ret = kvm_s390_set_tod(kvm, attr);
639 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100640 case KVM_S390_VM_CPU_MODEL:
641 ret = kvm_s390_set_cpu_model(kvm, attr);
642 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200643 case KVM_S390_VM_CRYPTO:
644 ret = kvm_s390_vm_set_crypto(kvm, attr);
645 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200646 default:
647 ret = -ENXIO;
648 break;
649 }
650
651 return ret;
652}
653
654static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
655{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100656 int ret;
657
658 switch (attr->group) {
659 case KVM_S390_VM_MEM_CTRL:
660 ret = kvm_s390_get_mem_control(kvm, attr);
661 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500662 case KVM_S390_VM_TOD:
663 ret = kvm_s390_get_tod(kvm, attr);
664 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100665 case KVM_S390_VM_CPU_MODEL:
666 ret = kvm_s390_get_cpu_model(kvm, attr);
667 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100668 default:
669 ret = -ENXIO;
670 break;
671 }
672
673 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200674}
675
676static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
677{
678 int ret;
679
680 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200681 case KVM_S390_VM_MEM_CTRL:
682 switch (attr->attr) {
683 case KVM_S390_VM_MEM_ENABLE_CMMA:
684 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100685 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200686 ret = 0;
687 break;
688 default:
689 ret = -ENXIO;
690 break;
691 }
692 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500693 case KVM_S390_VM_TOD:
694 switch (attr->attr) {
695 case KVM_S390_VM_TOD_LOW:
696 case KVM_S390_VM_TOD_HIGH:
697 ret = 0;
698 break;
699 default:
700 ret = -ENXIO;
701 break;
702 }
703 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100704 case KVM_S390_VM_CPU_MODEL:
705 switch (attr->attr) {
706 case KVM_S390_VM_CPU_PROCESSOR:
707 case KVM_S390_VM_CPU_MACHINE:
708 ret = 0;
709 break;
710 default:
711 ret = -ENXIO;
712 break;
713 }
714 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200715 case KVM_S390_VM_CRYPTO:
716 switch (attr->attr) {
717 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
718 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
719 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
720 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
721 ret = 0;
722 break;
723 default:
724 ret = -ENXIO;
725 break;
726 }
727 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200728 default:
729 ret = -ENXIO;
730 break;
731 }
732
733 return ret;
734}
735
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400736static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
737{
738 uint8_t *keys;
739 uint64_t hva;
740 unsigned long curkey;
741 int i, r = 0;
742
743 if (args->flags != 0)
744 return -EINVAL;
745
746 /* Is this guest using storage keys? */
747 if (!mm_use_skey(current->mm))
748 return KVM_S390_GET_SKEYS_NONE;
749
750 /* Enforce sane limit on memory allocation */
751 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
752 return -EINVAL;
753
754 keys = kmalloc_array(args->count, sizeof(uint8_t),
755 GFP_KERNEL | __GFP_NOWARN);
756 if (!keys)
757 keys = vmalloc(sizeof(uint8_t) * args->count);
758 if (!keys)
759 return -ENOMEM;
760
761 for (i = 0; i < args->count; i++) {
762 hva = gfn_to_hva(kvm, args->start_gfn + i);
763 if (kvm_is_error_hva(hva)) {
764 r = -EFAULT;
765 goto out;
766 }
767
768 curkey = get_guest_storage_key(current->mm, hva);
769 if (IS_ERR_VALUE(curkey)) {
770 r = curkey;
771 goto out;
772 }
773 keys[i] = curkey;
774 }
775
776 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
777 sizeof(uint8_t) * args->count);
778 if (r)
779 r = -EFAULT;
780out:
781 kvfree(keys);
782 return r;
783}
784
785static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
786{
787 uint8_t *keys;
788 uint64_t hva;
789 int i, r = 0;
790
791 if (args->flags != 0)
792 return -EINVAL;
793
794 /* Enforce sane limit on memory allocation */
795 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
796 return -EINVAL;
797
798 keys = kmalloc_array(args->count, sizeof(uint8_t),
799 GFP_KERNEL | __GFP_NOWARN);
800 if (!keys)
801 keys = vmalloc(sizeof(uint8_t) * args->count);
802 if (!keys)
803 return -ENOMEM;
804
805 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
806 sizeof(uint8_t) * args->count);
807 if (r) {
808 r = -EFAULT;
809 goto out;
810 }
811
812 /* Enable storage key handling for the guest */
813 s390_enable_skey();
814
815 for (i = 0; i < args->count; i++) {
816 hva = gfn_to_hva(kvm, args->start_gfn + i);
817 if (kvm_is_error_hva(hva)) {
818 r = -EFAULT;
819 goto out;
820 }
821
822 /* Lowest order bit is reserved */
823 if (keys[i] & 0x01) {
824 r = -EINVAL;
825 goto out;
826 }
827
828 r = set_guest_storage_key(current->mm, hva,
829 (unsigned long)keys[i], 0);
830 if (r)
831 goto out;
832 }
833out:
834 kvfree(keys);
835 return r;
836}
837
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100838long kvm_arch_vm_ioctl(struct file *filp,
839 unsigned int ioctl, unsigned long arg)
840{
841 struct kvm *kvm = filp->private_data;
842 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200843 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100844 int r;
845
846 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100847 case KVM_S390_INTERRUPT: {
848 struct kvm_s390_interrupt s390int;
849
850 r = -EFAULT;
851 if (copy_from_user(&s390int, argp, sizeof(s390int)))
852 break;
853 r = kvm_s390_inject_vm(kvm, &s390int);
854 break;
855 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200856 case KVM_ENABLE_CAP: {
857 struct kvm_enable_cap cap;
858 r = -EFAULT;
859 if (copy_from_user(&cap, argp, sizeof(cap)))
860 break;
861 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
862 break;
863 }
Cornelia Huck84223592013-07-15 13:36:01 +0200864 case KVM_CREATE_IRQCHIP: {
865 struct kvm_irq_routing_entry routing;
866
867 r = -EINVAL;
868 if (kvm->arch.use_irqchip) {
869 /* Set up dummy routing. */
870 memset(&routing, 0, sizeof(routing));
871 kvm_set_irq_routing(kvm, &routing, 0, 0);
872 r = 0;
873 }
874 break;
875 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200876 case KVM_SET_DEVICE_ATTR: {
877 r = -EFAULT;
878 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
879 break;
880 r = kvm_s390_vm_set_attr(kvm, &attr);
881 break;
882 }
883 case KVM_GET_DEVICE_ATTR: {
884 r = -EFAULT;
885 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
886 break;
887 r = kvm_s390_vm_get_attr(kvm, &attr);
888 break;
889 }
890 case KVM_HAS_DEVICE_ATTR: {
891 r = -EFAULT;
892 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
893 break;
894 r = kvm_s390_vm_has_attr(kvm, &attr);
895 break;
896 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400897 case KVM_S390_GET_SKEYS: {
898 struct kvm_s390_skeys args;
899
900 r = -EFAULT;
901 if (copy_from_user(&args, argp,
902 sizeof(struct kvm_s390_skeys)))
903 break;
904 r = kvm_s390_get_skeys(kvm, &args);
905 break;
906 }
907 case KVM_S390_SET_SKEYS: {
908 struct kvm_s390_skeys args;
909
910 r = -EFAULT;
911 if (copy_from_user(&args, argp,
912 sizeof(struct kvm_s390_skeys)))
913 break;
914 r = kvm_s390_set_skeys(kvm, &args);
915 break;
916 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100917 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300918 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100919 }
920
921 return r;
922}
923
Tony Krowiak45c9b472015-01-13 11:33:26 -0500924static int kvm_s390_query_ap_config(u8 *config)
925{
926 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +0100927 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -0500928
Christian Borntraeger86044c82015-02-26 13:53:47 +0100929 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -0500930 asm volatile(
931 "lgr 0,%1\n"
932 "lgr 2,%2\n"
933 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +0100934 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -0500935 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +0100936 "1:\n"
937 EX_TABLE(0b, 1b)
938 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -0500939 : "r" (fcn_code), "r" (config)
940 : "cc", "0", "2", "memory"
941 );
942
943 return cc;
944}
945
946static int kvm_s390_apxa_installed(void)
947{
948 u8 config[128];
949 int cc;
950
951 if (test_facility(2) && test_facility(12)) {
952 cc = kvm_s390_query_ap_config(config);
953
954 if (cc)
955 pr_err("PQAP(QCI) failed with cc=%d", cc);
956 else
957 return config[0] & 0x40;
958 }
959
960 return 0;
961}
962
963static void kvm_s390_set_crycb_format(struct kvm *kvm)
964{
965 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
966
967 if (kvm_s390_apxa_installed())
968 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
969 else
970 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
971}
972
Michael Mueller9d8d5782015-02-02 15:42:51 +0100973static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
974{
975 get_cpu_id(cpu_id);
976 cpu_id->version = 0xff;
977}
978
Tony Krowiak5102ee82014-06-27 14:46:01 -0400979static int kvm_s390_crypto_init(struct kvm *kvm)
980{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100981 if (!test_kvm_facility(kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -0400982 return 0;
983
984 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
985 GFP_KERNEL | GFP_DMA);
986 if (!kvm->arch.crypto.crycb)
987 return -ENOMEM;
988
Tony Krowiak45c9b472015-01-13 11:33:26 -0500989 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -0400990
Tony Krowiaked6f76b2015-02-24 14:06:57 -0500991 /* Enable AES/DEA protected key functions by default */
992 kvm->arch.crypto.aes_kw = 1;
993 kvm->arch.crypto.dea_kw = 1;
994 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
995 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
996 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
997 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiaka374e892014-09-03 10:13:53 +0200998
Tony Krowiak5102ee82014-06-27 14:46:01 -0400999 return 0;
1000}
1001
Carsten Ottee08b9632012-01-04 10:25:20 +01001002int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001003{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001004 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001005 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001006 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001007
Carsten Ottee08b9632012-01-04 10:25:20 +01001008 rc = -EINVAL;
1009#ifdef CONFIG_KVM_S390_UCONTROL
1010 if (type & ~KVM_VM_S390_UCONTROL)
1011 goto out_err;
1012 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1013 goto out_err;
1014#else
1015 if (type)
1016 goto out_err;
1017#endif
1018
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001019 rc = s390_enable_sie();
1020 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001021 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001022
Carsten Otteb2904112011-10-18 12:27:13 +02001023 rc = -ENOMEM;
1024
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001025 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1026 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001027 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001028 spin_lock(&kvm_lock);
1029 sca_offset = (sca_offset + 16) & 0x7f0;
1030 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1031 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001032
1033 sprintf(debug_name, "kvm-%u", current->pid);
1034
1035 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
1036 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001037 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001038
Michael Mueller9d8d5782015-02-02 15:42:51 +01001039 /*
1040 * The architectural maximum amount of facilities is 16 kbit. To store
1041 * this amount, 2 kbyte of memory is required. Thus we need a full
Michael Mueller981467c2015-02-24 13:51:04 +01001042 * page to hold the guest facility list (arch.model.fac->list) and the
1043 * facility mask (arch.model.fac->mask). Its address size has to be
Michael Mueller9d8d5782015-02-02 15:42:51 +01001044 * 31 bits and word aligned.
1045 */
1046 kvm->arch.model.fac =
Michael Mueller981467c2015-02-24 13:51:04 +01001047 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001048 if (!kvm->arch.model.fac)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001049 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001050
Michael Muellerfb5bf932015-02-27 14:25:10 +01001051 /* Populate the facility mask initially. */
Michael Mueller981467c2015-02-24 13:51:04 +01001052 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +01001053 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001054 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1055 if (i < kvm_s390_fac_list_mask_size())
Michael Mueller981467c2015-02-24 13:51:04 +01001056 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001057 else
Michael Mueller981467c2015-02-24 13:51:04 +01001058 kvm->arch.model.fac->mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001059 }
1060
Michael Mueller981467c2015-02-24 13:51:04 +01001061 /* Populate the facility list initially. */
1062 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1063 S390_ARCH_FAC_LIST_SIZE_BYTE);
1064
Michael Mueller9d8d5782015-02-02 15:42:51 +01001065 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001066 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001067
Tony Krowiak5102ee82014-06-27 14:46:01 -04001068 if (kvm_s390_crypto_init(kvm) < 0)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001069 goto out_err;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001070
Carsten Otteba5c1e92008-03-25 18:47:26 +01001071 spin_lock_init(&kvm->arch.float_int.lock);
1072 INIT_LIST_HEAD(&kvm->arch.float_int.list);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001073 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001074 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001075
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001076 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1077 VM_EVENT(kvm, 3, "%s", "vm created");
1078
Carsten Ottee08b9632012-01-04 10:25:20 +01001079 if (type & KVM_VM_S390_UCONTROL) {
1080 kvm->arch.gmap = NULL;
1081 } else {
Christian Borntraeger03499852014-08-25 12:38:57 +02001082 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001083 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001084 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001085 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001086 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001087 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001088
1089 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001090 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001091 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001092
David Hildenbrand8ad35752014-03-14 11:00:21 +01001093 spin_lock_init(&kvm->arch.start_stop_lock);
1094
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001095 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001096out_err:
Dominik Dingel40f5b732015-03-12 13:55:53 +01001097 kfree(kvm->arch.crypto.crycb);
1098 free_page((unsigned long)kvm->arch.model.fac);
1099 debug_unregister(kvm->arch.dbf);
1100 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001101 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001102}
1103
Christian Borntraegerd329c032008-11-26 14:50:27 +01001104void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1105{
1106 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001107 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001108 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001109 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +01001110 if (!kvm_is_ucontrol(vcpu->kvm)) {
1111 clear_bit(63 - vcpu->vcpu_id,
1112 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1113 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1114 (__u64) vcpu->arch.sie_block)
1115 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
1116 }
Carsten Otteabf4a712009-05-12 17:21:51 +02001117 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +01001118
1119 if (kvm_is_ucontrol(vcpu->kvm))
1120 gmap_free(vcpu->arch.gmap);
1121
Dominik Dingelb31605c2014-03-25 13:47:11 +01001122 if (kvm_s390_cmma_enabled(vcpu->kvm))
1123 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001124 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001125
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001126 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001127 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001128}
1129
1130static void kvm_free_vcpus(struct kvm *kvm)
1131{
1132 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001133 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001134
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001135 kvm_for_each_vcpu(i, vcpu, kvm)
1136 kvm_arch_vcpu_destroy(vcpu);
1137
1138 mutex_lock(&kvm->lock);
1139 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1140 kvm->vcpus[i] = NULL;
1141
1142 atomic_set(&kvm->online_vcpus, 0);
1143 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001144}
1145
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001146void kvm_arch_destroy_vm(struct kvm *kvm)
1147{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001148 kvm_free_vcpus(kvm);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001149 free_page((unsigned long)kvm->arch.model.fac);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001150 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +01001151 debug_unregister(kvm->arch.dbf);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001152 kfree(kvm->arch.crypto.crycb);
Carsten Otte27e03932012-01-04 10:25:21 +01001153 if (!kvm_is_ucontrol(kvm))
1154 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001155 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001156 kvm_s390_clear_float_irqs(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001157}
1158
1159/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001160static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1161{
1162 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1163 if (!vcpu->arch.gmap)
1164 return -ENOMEM;
1165 vcpu->arch.gmap->private = vcpu->kvm;
1166
1167 return 0;
1168}
1169
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001170int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1171{
Dominik Dingel3c038e62013-10-07 17:11:48 +02001172 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1173 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001174 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1175 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001176 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02001177 KVM_SYNC_CRS |
1178 KVM_SYNC_ARCH0 |
1179 KVM_SYNC_PFAULT;
Eric Farman68c55752014-06-09 10:57:26 -04001180 if (test_kvm_facility(vcpu->kvm, 129))
1181 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01001182
1183 if (kvm_is_ucontrol(vcpu->kvm))
1184 return __kvm_ucontrol_vcpu_init(vcpu);
1185
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001186 return 0;
1187}
1188
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001189void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1190{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001191 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
Michael Mueller18280d82015-03-16 16:05:41 +01001192 if (test_kvm_facility(vcpu->kvm, 129))
Eric Farman68c55752014-06-09 10:57:26 -04001193 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1194 else
1195 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001196 save_access_regs(vcpu->arch.host_acrs);
Michael Mueller18280d82015-03-16 16:05:41 +01001197 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman68c55752014-06-09 10:57:26 -04001198 restore_fp_ctl(&vcpu->run->s.regs.fpc);
1199 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1200 } else {
1201 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1202 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1203 }
Christian Borntraeger59674c12012-01-11 11:20:33 +01001204 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001205 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001206 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001207}
1208
1209void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1210{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001211 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001212 gmap_disable(vcpu->arch.gmap);
Michael Mueller18280d82015-03-16 16:05:41 +01001213 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman68c55752014-06-09 10:57:26 -04001214 save_fp_ctl(&vcpu->run->s.regs.fpc);
1215 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1216 } else {
1217 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1218 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1219 }
Christian Borntraeger59674c12012-01-11 11:20:33 +01001220 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001221 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
Michael Mueller18280d82015-03-16 16:05:41 +01001222 if (test_kvm_facility(vcpu->kvm, 129))
Eric Farman68c55752014-06-09 10:57:26 -04001223 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1224 else
1225 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001226 restore_access_regs(vcpu->arch.host_acrs);
1227}
1228
1229static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1230{
1231 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1232 vcpu->arch.sie_block->gpsw.mask = 0UL;
1233 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01001234 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001235 vcpu->arch.sie_block->cputm = 0UL;
1236 vcpu->arch.sie_block->ckc = 0UL;
1237 vcpu->arch.sie_block->todpr = 0;
1238 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1239 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1240 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1241 vcpu->arch.guest_fpregs.fpc = 0;
1242 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1243 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001244 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001245 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1246 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001247 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1248 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001249 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001250}
1251
Dominik Dingel31928aa2014-12-04 15:47:07 +01001252void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001253{
Jason J. Herne72f25022014-11-25 09:46:02 -05001254 mutex_lock(&vcpu->kvm->lock);
1255 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1256 mutex_unlock(&vcpu->kvm->lock);
Dominik Dingeldafd0322014-12-02 16:53:21 +01001257 if (!kvm_is_ucontrol(vcpu->kvm))
1258 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001259}
1260
Tony Krowiak5102ee82014-06-27 14:46:01 -04001261static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1262{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001263 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001264 return;
1265
Tony Krowiaka374e892014-09-03 10:13:53 +02001266 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1267
1268 if (vcpu->kvm->arch.crypto.aes_kw)
1269 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1270 if (vcpu->kvm->arch.crypto.dea_kw)
1271 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1272
Tony Krowiak5102ee82014-06-27 14:46:01 -04001273 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1274}
1275
Dominik Dingelb31605c2014-03-25 13:47:11 +01001276void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1277{
1278 free_page(vcpu->arch.sie_block->cbrlo);
1279 vcpu->arch.sie_block->cbrlo = 0;
1280}
1281
1282int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1283{
1284 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1285 if (!vcpu->arch.sie_block->cbrlo)
1286 return -ENOMEM;
1287
1288 vcpu->arch.sie_block->ecb2 |= 0x80;
1289 vcpu->arch.sie_block->ecb2 &= ~0x08;
1290 return 0;
1291}
1292
Michael Mueller91520f12015-02-27 14:32:11 +01001293static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1294{
1295 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1296
1297 vcpu->arch.cpu_id = model->cpu_id;
1298 vcpu->arch.sie_block->ibc = model->ibc;
1299 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1300}
1301
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001302int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1303{
Dominik Dingelb31605c2014-03-25 13:47:11 +01001304 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001305
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001306 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1307 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +02001308 CPUSTAT_STOPPED |
1309 CPUSTAT_GED);
Michael Mueller91520f12015-02-27 14:32:11 +01001310 kvm_s390_vcpu_setup_model(vcpu);
1311
Christian Borntraegerfc345312010-06-17 23:16:20 +02001312 vcpu->arch.sie_block->ecb = 6;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001313 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001314 vcpu->arch.sie_block->ecb |= 0x10;
1315
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +02001316 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001317 vcpu->arch.sie_block->eca = 0xC1002000U;
Heiko Carstens217a4402013-12-30 12:54:14 +01001318 if (sclp_has_siif())
1319 vcpu->arch.sie_block->eca |= 1;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001320 if (sclp_has_sigpif())
1321 vcpu->arch.sie_block->eca |= 0x10000000U;
Michael Mueller18280d82015-03-16 16:05:41 +01001322 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman13211ea2014-04-30 13:39:46 -04001323 vcpu->arch.sie_block->eca |= 0x00020000;
1324 vcpu->arch.sie_block->ecd |= 0x20000000;
1325 }
Thomas Huth492d8642015-02-10 16:11:01 +01001326 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05001327
Dominik Dingelb31605c2014-03-25 13:47:11 +01001328 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1329 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1330 if (rc)
1331 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001332 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01001333 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02001334 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001335
Tony Krowiak5102ee82014-06-27 14:46:01 -04001336 kvm_s390_vcpu_crypto_setup(vcpu);
1337
Dominik Dingelb31605c2014-03-25 13:47:11 +01001338 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001339}
1340
1341struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1342 unsigned int id)
1343{
Carsten Otte4d475552011-10-18 12:27:12 +02001344 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001345 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02001346 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001347
Carsten Otte4d475552011-10-18 12:27:12 +02001348 if (id >= KVM_MAX_VCPUS)
1349 goto out;
1350
1351 rc = -ENOMEM;
1352
Michael Muellerb110fea2013-06-12 13:54:54 +02001353 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001354 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02001355 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001356
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001357 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1358 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001359 goto out_free_cpu;
1360
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001361 vcpu->arch.sie_block = &sie_page->sie_block;
1362 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
Eric Farman68c55752014-06-09 10:57:26 -04001363 vcpu->arch.host_vregs = &sie_page->vregs;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001364
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001365 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +01001366 if (!kvm_is_ucontrol(kvm)) {
1367 if (!kvm->arch.sca) {
1368 WARN_ON_ONCE(1);
1369 goto out_free_cpu;
1370 }
1371 if (!kvm->arch.sca->cpu[id].sda)
1372 kvm->arch.sca->cpu[id].sda =
1373 (__u64) vcpu->arch.sie_block;
1374 vcpu->arch.sie_block->scaoh =
1375 (__u32)(((__u64)kvm->arch.sca) >> 32);
1376 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1377 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1378 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001379
Carsten Otteba5c1e92008-03-25 18:47:26 +01001380 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001381 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02001382 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01001383 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001384
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001385 rc = kvm_vcpu_init(vcpu, kvm, id);
1386 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001387 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001388 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1389 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001390 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001391
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001392 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001393out_free_sie_block:
1394 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001395out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001396 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001397out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001398 return ERR_PTR(rc);
1399}
1400
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001401int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1402{
David Hildenbrand9a022062014-08-05 17:40:47 +02001403 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001404}
1405
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001406void s390_vcpu_block(struct kvm_vcpu *vcpu)
1407{
1408 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1409}
1410
1411void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1412{
1413 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1414}
1415
1416/*
1417 * Kick a guest cpu out of SIE and wait until SIE is not running.
1418 * If the CPU is not running (e.g. waiting as idle) the function will
1419 * return immediately. */
1420void exit_sie(struct kvm_vcpu *vcpu)
1421{
1422 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1423 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1424 cpu_relax();
1425}
1426
1427/* Kick a guest cpu out of SIE and prevent SIE-reentry */
1428void exit_sie_sync(struct kvm_vcpu *vcpu)
1429{
1430 s390_vcpu_block(vcpu);
1431 exit_sie(vcpu);
1432}
1433
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001434static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1435{
1436 int i;
1437 struct kvm *kvm = gmap->private;
1438 struct kvm_vcpu *vcpu;
1439
1440 kvm_for_each_vcpu(i, vcpu, kvm) {
1441 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +02001442 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001443 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1444 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
1445 exit_sie_sync(vcpu);
1446 }
1447 }
1448}
1449
Christoffer Dallb6d33832012-03-08 16:44:24 -05001450int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1451{
1452 /* kvm common code refers to this, but never calls it */
1453 BUG();
1454 return 0;
1455}
1456
Carsten Otte14eebd92012-05-15 14:15:26 +02001457static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1458 struct kvm_one_reg *reg)
1459{
1460 int r = -EINVAL;
1461
1462 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001463 case KVM_REG_S390_TODPR:
1464 r = put_user(vcpu->arch.sie_block->todpr,
1465 (u32 __user *)reg->addr);
1466 break;
1467 case KVM_REG_S390_EPOCHDIFF:
1468 r = put_user(vcpu->arch.sie_block->epoch,
1469 (u64 __user *)reg->addr);
1470 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001471 case KVM_REG_S390_CPU_TIMER:
1472 r = put_user(vcpu->arch.sie_block->cputm,
1473 (u64 __user *)reg->addr);
1474 break;
1475 case KVM_REG_S390_CLOCK_COMP:
1476 r = put_user(vcpu->arch.sie_block->ckc,
1477 (u64 __user *)reg->addr);
1478 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001479 case KVM_REG_S390_PFTOKEN:
1480 r = put_user(vcpu->arch.pfault_token,
1481 (u64 __user *)reg->addr);
1482 break;
1483 case KVM_REG_S390_PFCOMPARE:
1484 r = put_user(vcpu->arch.pfault_compare,
1485 (u64 __user *)reg->addr);
1486 break;
1487 case KVM_REG_S390_PFSELECT:
1488 r = put_user(vcpu->arch.pfault_select,
1489 (u64 __user *)reg->addr);
1490 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001491 case KVM_REG_S390_PP:
1492 r = put_user(vcpu->arch.sie_block->pp,
1493 (u64 __user *)reg->addr);
1494 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001495 case KVM_REG_S390_GBEA:
1496 r = put_user(vcpu->arch.sie_block->gbea,
1497 (u64 __user *)reg->addr);
1498 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001499 default:
1500 break;
1501 }
1502
1503 return r;
1504}
1505
1506static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1507 struct kvm_one_reg *reg)
1508{
1509 int r = -EINVAL;
1510
1511 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001512 case KVM_REG_S390_TODPR:
1513 r = get_user(vcpu->arch.sie_block->todpr,
1514 (u32 __user *)reg->addr);
1515 break;
1516 case KVM_REG_S390_EPOCHDIFF:
1517 r = get_user(vcpu->arch.sie_block->epoch,
1518 (u64 __user *)reg->addr);
1519 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001520 case KVM_REG_S390_CPU_TIMER:
1521 r = get_user(vcpu->arch.sie_block->cputm,
1522 (u64 __user *)reg->addr);
1523 break;
1524 case KVM_REG_S390_CLOCK_COMP:
1525 r = get_user(vcpu->arch.sie_block->ckc,
1526 (u64 __user *)reg->addr);
1527 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001528 case KVM_REG_S390_PFTOKEN:
1529 r = get_user(vcpu->arch.pfault_token,
1530 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001531 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1532 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02001533 break;
1534 case KVM_REG_S390_PFCOMPARE:
1535 r = get_user(vcpu->arch.pfault_compare,
1536 (u64 __user *)reg->addr);
1537 break;
1538 case KVM_REG_S390_PFSELECT:
1539 r = get_user(vcpu->arch.pfault_select,
1540 (u64 __user *)reg->addr);
1541 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001542 case KVM_REG_S390_PP:
1543 r = get_user(vcpu->arch.sie_block->pp,
1544 (u64 __user *)reg->addr);
1545 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001546 case KVM_REG_S390_GBEA:
1547 r = get_user(vcpu->arch.sie_block->gbea,
1548 (u64 __user *)reg->addr);
1549 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001550 default:
1551 break;
1552 }
1553
1554 return r;
1555}
Christoffer Dallb6d33832012-03-08 16:44:24 -05001556
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001557static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1558{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001559 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001560 return 0;
1561}
1562
1563int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1564{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001565 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001566 return 0;
1567}
1568
1569int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1570{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001571 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001572 return 0;
1573}
1574
1575int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1576 struct kvm_sregs *sregs)
1577{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001578 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001579 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01001580 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001581 return 0;
1582}
1583
1584int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1585 struct kvm_sregs *sregs)
1586{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001587 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001588 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001589 return 0;
1590}
1591
1592int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1593{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001594 if (test_fp_ctl(fpu->fpc))
1595 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001596 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001597 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1598 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1599 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001600 return 0;
1601}
1602
1603int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1604{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001605 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1606 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001607 return 0;
1608}
1609
1610static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1611{
1612 int rc = 0;
1613
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02001614 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001615 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001616 else {
1617 vcpu->run->psw_mask = psw.mask;
1618 vcpu->run->psw_addr = psw.addr;
1619 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001620 return rc;
1621}
1622
1623int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1624 struct kvm_translation *tr)
1625{
1626 return -EINVAL; /* not implemented yet */
1627}
1628
David Hildenbrand27291e22014-01-23 12:26:52 +01001629#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1630 KVM_GUESTDBG_USE_HW_BP | \
1631 KVM_GUESTDBG_ENABLE)
1632
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001633int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1634 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001635{
David Hildenbrand27291e22014-01-23 12:26:52 +01001636 int rc = 0;
1637
1638 vcpu->guest_debug = 0;
1639 kvm_s390_clear_bp_data(vcpu);
1640
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02001641 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01001642 return -EINVAL;
1643
1644 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1645 vcpu->guest_debug = dbg->control;
1646 /* enforce guest PER */
1647 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1648
1649 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1650 rc = kvm_s390_import_bp_data(vcpu, dbg);
1651 } else {
1652 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1653 vcpu->arch.guestdbg.last_bp = 0;
1654 }
1655
1656 if (rc) {
1657 vcpu->guest_debug = 0;
1658 kvm_s390_clear_bp_data(vcpu);
1659 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1660 }
1661
1662 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001663}
1664
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001665int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1666 struct kvm_mp_state *mp_state)
1667{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001668 /* CHECK_STOP and LOAD are not supported yet */
1669 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1670 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001671}
1672
1673int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1674 struct kvm_mp_state *mp_state)
1675{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001676 int rc = 0;
1677
1678 /* user space knows about this interface - let it control the state */
1679 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1680
1681 switch (mp_state->mp_state) {
1682 case KVM_MP_STATE_STOPPED:
1683 kvm_s390_vcpu_stop(vcpu);
1684 break;
1685 case KVM_MP_STATE_OPERATING:
1686 kvm_s390_vcpu_start(vcpu);
1687 break;
1688 case KVM_MP_STATE_LOAD:
1689 case KVM_MP_STATE_CHECK_STOP:
1690 /* fall through - CHECK_STOP and LOAD are not supported yet */
1691 default:
1692 rc = -ENXIO;
1693 }
1694
1695 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001696}
1697
Dominik Dingelb31605c2014-03-25 13:47:11 +01001698bool kvm_s390_cmma_enabled(struct kvm *kvm)
1699{
1700 if (!MACHINE_IS_LPAR)
1701 return false;
1702 /* only enable for z10 and later */
1703 if (!MACHINE_HAS_EDAT1)
1704 return false;
1705 if (!kvm->arch.use_cmma)
1706 return false;
1707 return true;
1708}
1709
David Hildenbrand8ad35752014-03-14 11:00:21 +01001710static bool ibs_enabled(struct kvm_vcpu *vcpu)
1711{
1712 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1713}
1714
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001715static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1716{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001717retry:
1718 s390_vcpu_unblock(vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001719 /*
1720 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1721 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1722 * This ensures that the ipte instruction for this request has
1723 * already finished. We might race against a second unmapper that
1724 * wants to set the blocking bit. Lets just retry the request loop.
1725 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001726 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001727 int rc;
1728 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001729 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001730 PAGE_SIZE * 2);
1731 if (rc)
1732 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001733 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001734 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001735
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001736 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1737 vcpu->arch.sie_block->ihcpu = 0xffff;
1738 goto retry;
1739 }
1740
David Hildenbrand8ad35752014-03-14 11:00:21 +01001741 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1742 if (!ibs_enabled(vcpu)) {
1743 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1744 atomic_set_mask(CPUSTAT_IBS,
1745 &vcpu->arch.sie_block->cpuflags);
1746 }
1747 goto retry;
1748 }
1749
1750 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1751 if (ibs_enabled(vcpu)) {
1752 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1753 atomic_clear_mask(CPUSTAT_IBS,
1754 &vcpu->arch.sie_block->cpuflags);
1755 }
1756 goto retry;
1757 }
1758
David Hildenbrand0759d062014-05-13 16:54:32 +02001759 /* nothing to do, just clear the request */
1760 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1761
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001762 return 0;
1763}
1764
Thomas Huthfa576c52014-05-06 17:20:16 +02001765/**
1766 * kvm_arch_fault_in_page - fault-in guest page if necessary
1767 * @vcpu: The corresponding virtual cpu
1768 * @gpa: Guest physical address
1769 * @writable: Whether the page should be writable or not
1770 *
1771 * Make sure that a guest page has been faulted-in on the host.
1772 *
1773 * Return: Zero on success, negative error code otherwise.
1774 */
1775long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001776{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001777 return gmap_fault(vcpu->arch.gmap, gpa,
1778 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001779}
1780
Dominik Dingel3c038e62013-10-07 17:11:48 +02001781static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1782 unsigned long token)
1783{
1784 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02001785 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001786
1787 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02001788 irq.u.ext.ext_params2 = token;
1789 irq.type = KVM_S390_INT_PFAULT_INIT;
1790 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02001791 } else {
1792 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02001793 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001794 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1795 }
1796}
1797
1798void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1799 struct kvm_async_pf *work)
1800{
1801 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1802 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1803}
1804
1805void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1806 struct kvm_async_pf *work)
1807{
1808 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1809 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1810}
1811
1812void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1813 struct kvm_async_pf *work)
1814{
1815 /* s390 will always inject the page directly */
1816}
1817
1818bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1819{
1820 /*
1821 * s390 will always inject the page directly,
1822 * but we still want check_async_completion to cleanup
1823 */
1824 return true;
1825}
1826
1827static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1828{
1829 hva_t hva;
1830 struct kvm_arch_async_pf arch;
1831 int rc;
1832
1833 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1834 return 0;
1835 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1836 vcpu->arch.pfault_compare)
1837 return 0;
1838 if (psw_extint_disabled(vcpu))
1839 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02001840 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001841 return 0;
1842 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1843 return 0;
1844 if (!vcpu->arch.gmap->pfault_enabled)
1845 return 0;
1846
Heiko Carstens81480cc2014-01-01 16:36:07 +01001847 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1848 hva += current->thread.gmap_addr & ~PAGE_MASK;
1849 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001850 return 0;
1851
1852 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1853 return rc;
1854}
1855
Thomas Huth3fb4c402013-09-12 10:33:43 +02001856static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001857{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001858 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001859
Dominik Dingel3c038e62013-10-07 17:11:48 +02001860 /*
1861 * On s390 notifications for arriving pages will be delivered directly
1862 * to the guest but the house keeping for completed pfaults is
1863 * handled outside the worker.
1864 */
1865 kvm_check_async_pf_completion(vcpu);
1866
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001867 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001868
1869 if (need_resched())
1870 schedule();
1871
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02001872 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02001873 s390_handle_mcck();
1874
Jens Freimann79395032014-04-17 10:10:30 +02001875 if (!kvm_is_ucontrol(vcpu->kvm)) {
1876 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1877 if (rc)
1878 return rc;
1879 }
Carsten Otte0ff31862008-05-21 13:37:37 +02001880
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001881 rc = kvm_s390_handle_requests(vcpu);
1882 if (rc)
1883 return rc;
1884
David Hildenbrand27291e22014-01-23 12:26:52 +01001885 if (guestdbg_enabled(vcpu)) {
1886 kvm_s390_backup_guest_per_regs(vcpu);
1887 kvm_s390_patch_guest_per_regs(vcpu);
1888 }
1889
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001890 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001891 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1892 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1893 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001894
Thomas Huth3fb4c402013-09-12 10:33:43 +02001895 return 0;
1896}
1897
Thomas Huth492d8642015-02-10 16:11:01 +01001898static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1899{
1900 psw_t *psw = &vcpu->arch.sie_block->gpsw;
1901 u8 opcode;
1902 int rc;
1903
1904 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1905 trace_kvm_s390_sie_fault(vcpu);
1906
1907 /*
1908 * We want to inject an addressing exception, which is defined as a
1909 * suppressing or terminating exception. However, since we came here
1910 * by a DAT access exception, the PSW still points to the faulting
1911 * instruction since DAT exceptions are nullifying. So we've got
1912 * to look up the current opcode to get the length of the instruction
1913 * to be able to forward the PSW.
1914 */
Alexander Yarygin8ae04b82015-01-19 13:24:51 +03001915 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
Thomas Huth492d8642015-02-10 16:11:01 +01001916 if (rc)
1917 return kvm_s390_inject_prog_cond(vcpu, rc);
1918 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1919
1920 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1921}
1922
Thomas Huth3fb4c402013-09-12 10:33:43 +02001923static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1924{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001925 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001926
1927 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1928 vcpu->arch.sie_block->icptcode);
1929 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1930
David Hildenbrand27291e22014-01-23 12:26:52 +01001931 if (guestdbg_enabled(vcpu))
1932 kvm_s390_restore_guest_per_regs(vcpu);
1933
Thomas Huth3fb4c402013-09-12 10:33:43 +02001934 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02001935 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02001936 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1937 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1938 vcpu->run->s390_ucontrol.trans_exc_code =
1939 current->thread.gmap_addr;
1940 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1941 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001942
1943 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001944 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001945 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001946 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001947 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001948 } else {
1949 gpa_t gpa = current->thread.gmap_addr;
1950 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1951 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001952 }
1953
Thomas Huth492d8642015-02-10 16:11:01 +01001954 if (rc == -1)
1955 rc = vcpu_post_run_fault_in_sie(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001956
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001957 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001958
Thomas Hutha76ccff2013-09-12 10:33:44 +02001959 if (rc == 0) {
1960 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01001961 /* Don't exit for host interrupts. */
1962 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02001963 else
1964 rc = kvm_handle_sie_intercept(vcpu);
1965 }
1966
Thomas Huth3fb4c402013-09-12 10:33:43 +02001967 return rc;
1968}
1969
1970static int __vcpu_run(struct kvm_vcpu *vcpu)
1971{
1972 int rc, exit_reason;
1973
Thomas Huth800c1062013-09-12 10:33:45 +02001974 /*
1975 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1976 * ning the guest), so that memslots (and other stuff) are protected
1977 */
1978 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1979
Thomas Hutha76ccff2013-09-12 10:33:44 +02001980 do {
1981 rc = vcpu_pre_run(vcpu);
1982 if (rc)
1983 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001984
Thomas Huth800c1062013-09-12 10:33:45 +02001985 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02001986 /*
1987 * As PF_VCPU will be used in fault handler, between
1988 * guest_enter and guest_exit should be no uaccess.
1989 */
1990 preempt_disable();
1991 kvm_guest_enter();
1992 preempt_enable();
1993 exit_reason = sie64a(vcpu->arch.sie_block,
1994 vcpu->run->s.regs.gprs);
1995 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02001996 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001997
Thomas Hutha76ccff2013-09-12 10:33:44 +02001998 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01001999 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002000
Thomas Huth800c1062013-09-12 10:33:45 +02002001 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01002002 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002003}
2004
David Hildenbrandb028ee32014-07-17 10:47:43 +02002005static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2006{
2007 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2008 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2009 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2010 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2011 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2012 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002013 /* some control register changes require a tlb flush */
2014 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002015 }
2016 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2017 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2018 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2019 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2020 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2021 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2022 }
2023 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2024 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2025 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2026 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002027 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2028 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002029 }
2030 kvm_run->kvm_dirty_regs = 0;
2031}
2032
2033static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2034{
2035 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2036 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2037 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2038 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2039 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2040 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2041 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2042 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2043 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2044 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2045 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2046 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2047}
2048
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002049int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2050{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002051 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002052 sigset_t sigsaved;
2053
David Hildenbrand27291e22014-01-23 12:26:52 +01002054 if (guestdbg_exit_pending(vcpu)) {
2055 kvm_s390_prepare_debug_exit(vcpu);
2056 return 0;
2057 }
2058
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002059 if (vcpu->sigset_active)
2060 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2061
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002062 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2063 kvm_s390_vcpu_start(vcpu);
2064 } else if (is_vcpu_stopped(vcpu)) {
2065 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
2066 vcpu->vcpu_id);
2067 return -EINVAL;
2068 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002069
David Hildenbrandb028ee32014-07-17 10:47:43 +02002070 sync_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002071
Heiko Carstensdab4079d2009-06-12 10:26:32 +02002072 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002073 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02002074
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002075 if (signal_pending(current) && !rc) {
2076 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002077 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002078 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002079
David Hildenbrand27291e22014-01-23 12:26:52 +01002080 if (guestdbg_exit_pending(vcpu) && !rc) {
2081 kvm_s390_prepare_debug_exit(vcpu);
2082 rc = 0;
2083 }
2084
Heiko Carstensb8e660b2010-02-26 22:37:41 +01002085 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002086 /* intercept cannot be handled in-kernel, prepare kvm-run */
2087 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
2088 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002089 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2090 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2091 rc = 0;
2092 }
2093
2094 if (rc == -EREMOTE) {
2095 /* intercept was handled, but userspace support is needed
2096 * kvm_run has been prepared by the handler */
2097 rc = 0;
2098 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002099
David Hildenbrandb028ee32014-07-17 10:47:43 +02002100 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002101
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002102 if (vcpu->sigset_active)
2103 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2104
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002105 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02002106 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002107}
2108
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002109/*
2110 * store status at address
2111 * we use have two special cases:
2112 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2113 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2114 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01002115int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002116{
Carsten Otte092670c2011-07-24 10:48:22 +02002117 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02002118 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01002119 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002120 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002121
Heiko Carstensd0bce602014-01-01 16:45:58 +01002122 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2123 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002124 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002125 gpa = SAVE_AREA_BASE;
2126 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2127 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002128 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002129 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2130 }
2131 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2132 vcpu->arch.guest_fpregs.fprs, 128);
2133 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2134 vcpu->run->s.regs.gprs, 128);
2135 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2136 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02002137 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002138 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02002139 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002140 rc |= write_guest_abs(vcpu,
2141 gpa + offsetof(struct save_area, fp_ctrl_reg),
2142 &vcpu->arch.guest_fpregs.fpc, 4);
2143 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2144 &vcpu->arch.sie_block->todpr, 4);
2145 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2146 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01002147 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002148 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2149 &clkcomp, 8);
2150 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2151 &vcpu->run->s.regs.acrs, 64);
2152 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2153 &vcpu->arch.sie_block->gcr, 128);
2154 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002155}
2156
Thomas Huthe8798922013-11-06 15:46:33 +01002157int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2158{
2159 /*
2160 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2161 * copying in vcpu load/put. Lets update our copies before we save
2162 * it into the save area
2163 */
2164 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2165 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2166 save_access_regs(vcpu->run->s.regs.acrs);
2167
2168 return kvm_s390_store_status_unloaded(vcpu, addr);
2169}
2170
Eric Farmanbc17de72014-04-14 16:01:09 -04002171/*
2172 * store additional status at address
2173 */
2174int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2175 unsigned long gpa)
2176{
2177 /* Only bits 0-53 are used for address formation */
2178 if (!(gpa & ~0x3ff))
2179 return 0;
2180
2181 return write_guest_abs(vcpu, gpa & ~0x3ff,
2182 (void *)&vcpu->run->s.regs.vrs, 512);
2183}
2184
2185int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2186{
2187 if (!test_kvm_facility(vcpu->kvm, 129))
2188 return 0;
2189
2190 /*
2191 * The guest VXRS are in the host VXRs due to the lazy
2192 * copying in vcpu load/put. Let's update our copies before we save
2193 * it into the save area.
2194 */
2195 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2196
2197 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2198}
2199
David Hildenbrand8ad35752014-03-14 11:00:21 +01002200static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2201{
2202 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2203 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
2204 exit_sie_sync(vcpu);
2205}
2206
2207static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2208{
2209 unsigned int i;
2210 struct kvm_vcpu *vcpu;
2211
2212 kvm_for_each_vcpu(i, vcpu, kvm) {
2213 __disable_ibs_on_vcpu(vcpu);
2214 }
2215}
2216
2217static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2218{
2219 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2220 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
2221 exit_sie_sync(vcpu);
2222}
2223
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002224void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2225{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002226 int i, online_vcpus, started_vcpus = 0;
2227
2228 if (!is_vcpu_stopped(vcpu))
2229 return;
2230
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002231 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002232 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002233 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002234 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2235
2236 for (i = 0; i < online_vcpus; i++) {
2237 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2238 started_vcpus++;
2239 }
2240
2241 if (started_vcpus == 0) {
2242 /* we're the only active VCPU -> speed it up */
2243 __enable_ibs_on_vcpu(vcpu);
2244 } else if (started_vcpus == 1) {
2245 /*
2246 * As we are starting a second VCPU, we have to disable
2247 * the IBS facility on all VCPUs to remove potentially
2248 * oustanding ENABLE requests.
2249 */
2250 __disable_ibs_on_all_vcpus(vcpu->kvm);
2251 }
2252
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002253 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002254 /*
2255 * Another VCPU might have used IBS while we were offline.
2256 * Let's play safe and flush the VCPU at startup.
2257 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002258 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002259 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002260 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002261}
2262
2263void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2264{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002265 int i, online_vcpus, started_vcpus = 0;
2266 struct kvm_vcpu *started_vcpu = NULL;
2267
2268 if (is_vcpu_stopped(vcpu))
2269 return;
2270
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002271 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002272 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002273 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002274 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2275
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002276 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02002277 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002278
David Hildenbrand6cddd432014-10-15 16:48:53 +02002279 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002280 __disable_ibs_on_vcpu(vcpu);
2281
2282 for (i = 0; i < online_vcpus; i++) {
2283 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2284 started_vcpus++;
2285 started_vcpu = vcpu->kvm->vcpus[i];
2286 }
2287 }
2288
2289 if (started_vcpus == 1) {
2290 /*
2291 * As we only have one VCPU left, we want to enable the
2292 * IBS facility for that VCPU to speed it up.
2293 */
2294 __enable_ibs_on_vcpu(started_vcpu);
2295 }
2296
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002297 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002298 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002299}
2300
Cornelia Huckd6712df2012-12-20 15:32:11 +01002301static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2302 struct kvm_enable_cap *cap)
2303{
2304 int r;
2305
2306 if (cap->flags)
2307 return -EINVAL;
2308
2309 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002310 case KVM_CAP_S390_CSS_SUPPORT:
2311 if (!vcpu->kvm->arch.css_support) {
2312 vcpu->kvm->arch.css_support = 1;
2313 trace_kvm_s390_enable_css(vcpu->kvm);
2314 }
2315 r = 0;
2316 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01002317 default:
2318 r = -EINVAL;
2319 break;
2320 }
2321 return r;
2322}
2323
Thomas Huth41408c282015-02-06 15:01:21 +01002324static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2325 struct kvm_s390_mem_op *mop)
2326{
2327 void __user *uaddr = (void __user *)mop->buf;
2328 void *tmpbuf = NULL;
2329 int r, srcu_idx;
2330 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2331 | KVM_S390_MEMOP_F_CHECK_ONLY;
2332
2333 if (mop->flags & ~supported_flags)
2334 return -EINVAL;
2335
2336 if (mop->size > MEM_OP_MAX_SIZE)
2337 return -E2BIG;
2338
2339 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2340 tmpbuf = vmalloc(mop->size);
2341 if (!tmpbuf)
2342 return -ENOMEM;
2343 }
2344
2345 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2346
2347 switch (mop->op) {
2348 case KVM_S390_MEMOP_LOGICAL_READ:
2349 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2350 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2351 break;
2352 }
2353 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2354 if (r == 0) {
2355 if (copy_to_user(uaddr, tmpbuf, mop->size))
2356 r = -EFAULT;
2357 }
2358 break;
2359 case KVM_S390_MEMOP_LOGICAL_WRITE:
2360 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2361 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2362 break;
2363 }
2364 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2365 r = -EFAULT;
2366 break;
2367 }
2368 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2369 break;
2370 default:
2371 r = -EINVAL;
2372 }
2373
2374 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2375
2376 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2377 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2378
2379 vfree(tmpbuf);
2380 return r;
2381}
2382
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002383long kvm_arch_vcpu_ioctl(struct file *filp,
2384 unsigned int ioctl, unsigned long arg)
2385{
2386 struct kvm_vcpu *vcpu = filp->private_data;
2387 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02002388 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03002389 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002390
Avi Kivity93736622010-05-13 12:35:17 +03002391 switch (ioctl) {
2392 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002393 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02002394 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002395
Avi Kivity93736622010-05-13 12:35:17 +03002396 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002397 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03002398 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02002399 if (s390int_to_s390irq(&s390int, &s390irq))
2400 return -EINVAL;
2401 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity93736622010-05-13 12:35:17 +03002402 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002403 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002404 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02002405 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002406 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02002407 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002408 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002409 case KVM_S390_SET_INITIAL_PSW: {
2410 psw_t psw;
2411
Avi Kivitybc923cc2010-05-13 12:21:46 +03002412 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002413 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03002414 break;
2415 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2416 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002417 }
2418 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03002419 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2420 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002421 case KVM_SET_ONE_REG:
2422 case KVM_GET_ONE_REG: {
2423 struct kvm_one_reg reg;
2424 r = -EFAULT;
2425 if (copy_from_user(&reg, argp, sizeof(reg)))
2426 break;
2427 if (ioctl == KVM_SET_ONE_REG)
2428 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2429 else
2430 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2431 break;
2432 }
Carsten Otte27e03932012-01-04 10:25:21 +01002433#ifdef CONFIG_KVM_S390_UCONTROL
2434 case KVM_S390_UCAS_MAP: {
2435 struct kvm_s390_ucas_mapping ucasmap;
2436
2437 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2438 r = -EFAULT;
2439 break;
2440 }
2441
2442 if (!kvm_is_ucontrol(vcpu->kvm)) {
2443 r = -EINVAL;
2444 break;
2445 }
2446
2447 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2448 ucasmap.vcpu_addr, ucasmap.length);
2449 break;
2450 }
2451 case KVM_S390_UCAS_UNMAP: {
2452 struct kvm_s390_ucas_mapping ucasmap;
2453
2454 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2455 r = -EFAULT;
2456 break;
2457 }
2458
2459 if (!kvm_is_ucontrol(vcpu->kvm)) {
2460 r = -EINVAL;
2461 break;
2462 }
2463
2464 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2465 ucasmap.length);
2466 break;
2467 }
2468#endif
Carsten Otteccc79102012-01-04 10:25:26 +01002469 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002470 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01002471 break;
2472 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01002473 case KVM_ENABLE_CAP:
2474 {
2475 struct kvm_enable_cap cap;
2476 r = -EFAULT;
2477 if (copy_from_user(&cap, argp, sizeof(cap)))
2478 break;
2479 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2480 break;
2481 }
Thomas Huth41408c282015-02-06 15:01:21 +01002482 case KVM_S390_MEM_OP: {
2483 struct kvm_s390_mem_op mem_op;
2484
2485 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2486 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2487 else
2488 r = -EFAULT;
2489 break;
2490 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002491 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01002492 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002493 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03002494 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002495}
2496
Carsten Otte5b1c1492012-01-04 10:25:23 +01002497int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2498{
2499#ifdef CONFIG_KVM_S390_UCONTROL
2500 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2501 && (kvm_is_ucontrol(vcpu->kvm))) {
2502 vmf->page = virt_to_page(vcpu->arch.sie_block);
2503 get_page(vmf->page);
2504 return 0;
2505 }
2506#endif
2507 return VM_FAULT_SIGBUS;
2508}
2509
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05302510int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2511 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09002512{
2513 return 0;
2514}
2515
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002516/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002517int kvm_arch_prepare_memory_region(struct kvm *kvm,
2518 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09002519 struct kvm_userspace_memory_region *mem,
2520 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002521{
Nick Wangdd2887e2013-03-25 17:22:57 +01002522 /* A few sanity checks. We can have memory slots which have to be
2523 located/ended at a segment boundary (1MB). The memory in userland is
2524 ok to be fragmented into various different vmas. It is okay to mmap()
2525 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002526
Carsten Otte598841c2011-07-24 10:48:21 +02002527 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002528 return -EINVAL;
2529
Carsten Otte598841c2011-07-24 10:48:21 +02002530 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002531 return -EINVAL;
2532
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002533 return 0;
2534}
2535
2536void kvm_arch_commit_memory_region(struct kvm *kvm,
2537 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09002538 const struct kvm_memory_slot *old,
2539 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002540{
Carsten Ottef7850c92011-07-24 10:48:23 +02002541 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002542
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01002543 /* If the basics of the memslot do not change, we do not want
2544 * to update the gmap. Every update causes several unnecessary
2545 * segment translation exceptions. This is usually handled just
2546 * fine by the normal fault handler + gmap, but it will also
2547 * cause faults on the prefix page of running guest CPUs.
2548 */
2549 if (old->userspace_addr == mem->userspace_addr &&
2550 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2551 old->npages * PAGE_SIZE == mem->memory_size)
2552 return;
Carsten Otte598841c2011-07-24 10:48:21 +02002553
2554 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2555 mem->guest_phys_addr, mem->memory_size);
2556 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02002557 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02002558 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002559}
2560
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002561static int __init kvm_s390_init(void)
2562{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002563 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002564}
2565
2566static void __exit kvm_s390_exit(void)
2567{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002568 kvm_exit();
2569}
2570
2571module_init(kvm_s390_init);
2572module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02002573
2574/*
2575 * Enable autoloading of the kvm module.
2576 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2577 * since x86 takes a different approach.
2578 */
2579#include <linux/miscdevice.h>
2580MODULE_ALIAS_MISCDEV(KVM_MINOR);
2581MODULE_ALIAS("devname:kvm");