blob: b2371c0fd1f8cbecf406599dea6966f8cedfc546 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020025#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010027#include <linux/timer.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010028#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010029#include <asm/lowcore.h>
30#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010031#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010032#include <asm/switch_to.h>
Michael Mueller78c4b592013-07-26 15:04:04 +020033#include <asm/facility.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020034#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010035#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include "gaccess.h"
37
Cornelia Huck5786fff2012-07-23 17:20:29 +020038#define CREATE_TRACE_POINTS
39#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020040#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020041
Heiko Carstensb0c632d2008-03-25 18:47:20 +010042#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43
44struct kvm_stats_debugfs_item debugfs_entries[] = {
45 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020046 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010047 { "exit_validity", VCPU_STAT(exit_validity) },
48 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
49 { "exit_external_request", VCPU_STAT(exit_external_request) },
50 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010051 { "exit_instruction", VCPU_STAT(exit_instruction) },
52 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
53 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020054 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020055 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010056 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010057 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
58 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010059 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020060 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010061 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
62 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
63 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
64 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
65 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
66 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
67 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020068 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010069 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
70 { "instruction_spx", VCPU_STAT(instruction_spx) },
71 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
72 { "instruction_stap", VCPU_STAT(instruction_stap) },
73 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010074 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010075 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
76 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020077 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010078 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
79 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020080 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010081 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010082 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020083 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010084 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020085 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
86 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010087 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020088 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
89 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010090 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
91 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
92 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020093 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
94 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
95 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +010096 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010097 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +020098 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010099 { NULL }
100};
101
Michael Mueller78c4b592013-07-26 15:04:04 +0200102unsigned long *vfacilities;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200103static struct gmap_notifier gmap_notifier;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100104
Michael Mueller78c4b592013-07-26 15:04:04 +0200105/* test availability of vfacility */
Heiko Carstens280ef0f2013-12-17 09:08:28 +0100106int test_vfacility(unsigned long nr)
Michael Mueller78c4b592013-07-26 15:04:04 +0200107{
108 return __test_facility(nr, (void *) vfacilities);
109}
110
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100111/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200112int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100113{
114 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200115 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100116}
117
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200118static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
119
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100120int kvm_arch_hardware_setup(void)
121{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200122 gmap_notifier.notifier_call = kvm_gmap_notifier;
123 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100124 return 0;
125}
126
127void kvm_arch_hardware_unsetup(void)
128{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200129 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100130}
131
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100132int kvm_arch_init(void *opaque)
133{
Cornelia Huck84877d92014-09-02 10:27:35 +0100134 /* Register floating interrupt controller interface. */
135 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100136}
137
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100138/* Section: device related */
139long kvm_arch_dev_ioctl(struct file *filp,
140 unsigned int ioctl, unsigned long arg)
141{
142 if (ioctl == KVM_S390_ENABLE_SIE)
143 return s390_enable_sie();
144 return -EINVAL;
145}
146
Alexander Graf784aa3d2014-07-14 18:27:35 +0200147int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100148{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100149 int r;
150
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200151 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100152 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200153 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100154 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100155#ifdef CONFIG_KVM_S390_UCONTROL
156 case KVM_CAP_S390_UCONTROL:
157#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200158 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100159 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200160 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100161 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100162 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huckebc32262014-05-09 15:00:46 +0200163 case KVM_CAP_IRQFD:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100164 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200165 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200166 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200167 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200168 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200169 case KVM_CAP_MP_STATE:
David Hildenbrand2444b352014-10-09 14:10:13 +0200170 case KVM_CAP_S390_USER_SIGP:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100171 r = 1;
172 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200173 case KVM_CAP_NR_VCPUS:
174 case KVM_CAP_MAX_VCPUS:
175 r = KVM_MAX_VCPUS;
176 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100177 case KVM_CAP_NR_MEMSLOTS:
178 r = KVM_USER_MEM_SLOTS;
179 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200180 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100181 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200182 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200183 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100184 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200185 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100186 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100187}
188
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400189static void kvm_s390_sync_dirty_log(struct kvm *kvm,
190 struct kvm_memory_slot *memslot)
191{
192 gfn_t cur_gfn, last_gfn;
193 unsigned long address;
194 struct gmap *gmap = kvm->arch.gmap;
195
196 down_read(&gmap->mm->mmap_sem);
197 /* Loop over all guest pages */
198 last_gfn = memslot->base_gfn + memslot->npages;
199 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
200 address = gfn_to_hva_memslot(memslot, cur_gfn);
201
202 if (gmap_test_and_clear_dirty(address, gmap))
203 mark_page_dirty(kvm, cur_gfn);
204 }
205 up_read(&gmap->mm->mmap_sem);
206}
207
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100208/* Section: vm related */
209/*
210 * Get (and clear) the dirty memory log for a memory slot.
211 */
212int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
213 struct kvm_dirty_log *log)
214{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400215 int r;
216 unsigned long n;
217 struct kvm_memory_slot *memslot;
218 int is_dirty = 0;
219
220 mutex_lock(&kvm->slots_lock);
221
222 r = -EINVAL;
223 if (log->slot >= KVM_USER_MEM_SLOTS)
224 goto out;
225
226 memslot = id_to_memslot(kvm->memslots, log->slot);
227 r = -ENOENT;
228 if (!memslot->dirty_bitmap)
229 goto out;
230
231 kvm_s390_sync_dirty_log(kvm, memslot);
232 r = kvm_get_dirty_log(kvm, log, &is_dirty);
233 if (r)
234 goto out;
235
236 /* Clear the dirty log */
237 if (is_dirty) {
238 n = kvm_dirty_bitmap_bytes(memslot);
239 memset(memslot->dirty_bitmap, 0, n);
240 }
241 r = 0;
242out:
243 mutex_unlock(&kvm->slots_lock);
244 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100245}
246
Cornelia Huckd938dc52013-10-23 18:26:34 +0200247static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
248{
249 int r;
250
251 if (cap->flags)
252 return -EINVAL;
253
254 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200255 case KVM_CAP_S390_IRQCHIP:
256 kvm->arch.use_irqchip = 1;
257 r = 0;
258 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200259 case KVM_CAP_S390_USER_SIGP:
260 kvm->arch.user_sigp = 1;
261 r = 0;
262 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200263 default:
264 r = -EINVAL;
265 break;
266 }
267 return r;
268}
269
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100270static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
271{
272 int ret;
273
274 switch (attr->attr) {
275 case KVM_S390_VM_MEM_LIMIT_SIZE:
276 ret = 0;
277 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
278 ret = -EFAULT;
279 break;
280 default:
281 ret = -ENXIO;
282 break;
283 }
284 return ret;
285}
286
287static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200288{
289 int ret;
290 unsigned int idx;
291 switch (attr->attr) {
292 case KVM_S390_VM_MEM_ENABLE_CMMA:
293 ret = -EBUSY;
294 mutex_lock(&kvm->lock);
295 if (atomic_read(&kvm->online_vcpus) == 0) {
296 kvm->arch.use_cmma = 1;
297 ret = 0;
298 }
299 mutex_unlock(&kvm->lock);
300 break;
301 case KVM_S390_VM_MEM_CLR_CMMA:
302 mutex_lock(&kvm->lock);
303 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200304 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200305 srcu_read_unlock(&kvm->srcu, idx);
306 mutex_unlock(&kvm->lock);
307 ret = 0;
308 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100309 case KVM_S390_VM_MEM_LIMIT_SIZE: {
310 unsigned long new_limit;
311
312 if (kvm_is_ucontrol(kvm))
313 return -EINVAL;
314
315 if (get_user(new_limit, (u64 __user *)attr->addr))
316 return -EFAULT;
317
318 if (new_limit > kvm->arch.gmap->asce_end)
319 return -E2BIG;
320
321 ret = -EBUSY;
322 mutex_lock(&kvm->lock);
323 if (atomic_read(&kvm->online_vcpus) == 0) {
324 /* gmap_alloc will round the limit up */
325 struct gmap *new = gmap_alloc(current->mm, new_limit);
326
327 if (!new) {
328 ret = -ENOMEM;
329 } else {
330 gmap_free(kvm->arch.gmap);
331 new->private = kvm;
332 kvm->arch.gmap = new;
333 ret = 0;
334 }
335 }
336 mutex_unlock(&kvm->lock);
337 break;
338 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200339 default:
340 ret = -ENXIO;
341 break;
342 }
343 return ret;
344}
345
Tony Krowiaka374e892014-09-03 10:13:53 +0200346static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
347
348static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
349{
350 struct kvm_vcpu *vcpu;
351 int i;
352
353 if (!test_vfacility(76))
354 return -EINVAL;
355
356 mutex_lock(&kvm->lock);
357 switch (attr->attr) {
358 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
359 get_random_bytes(
360 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
361 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
362 kvm->arch.crypto.aes_kw = 1;
363 break;
364 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
365 get_random_bytes(
366 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
367 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
368 kvm->arch.crypto.dea_kw = 1;
369 break;
370 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
371 kvm->arch.crypto.aes_kw = 0;
372 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
373 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
374 break;
375 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
376 kvm->arch.crypto.dea_kw = 0;
377 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
378 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
379 break;
380 default:
381 mutex_unlock(&kvm->lock);
382 return -ENXIO;
383 }
384
385 kvm_for_each_vcpu(i, vcpu, kvm) {
386 kvm_s390_vcpu_crypto_setup(vcpu);
387 exit_sie(vcpu);
388 }
389 mutex_unlock(&kvm->lock);
390 return 0;
391}
392
Jason J. Herne72f25022014-11-25 09:46:02 -0500393static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
394{
395 u8 gtod_high;
396
397 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
398 sizeof(gtod_high)))
399 return -EFAULT;
400
401 if (gtod_high != 0)
402 return -EINVAL;
403
404 return 0;
405}
406
407static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
408{
409 struct kvm_vcpu *cur_vcpu;
410 unsigned int vcpu_idx;
411 u64 host_tod, gtod;
412 int r;
413
414 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
415 return -EFAULT;
416
417 r = store_tod_clock(&host_tod);
418 if (r)
419 return r;
420
421 mutex_lock(&kvm->lock);
422 kvm->arch.epoch = gtod - host_tod;
423 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
424 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
425 exit_sie(cur_vcpu);
426 }
427 mutex_unlock(&kvm->lock);
428 return 0;
429}
430
431static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
432{
433 int ret;
434
435 if (attr->flags)
436 return -EINVAL;
437
438 switch (attr->attr) {
439 case KVM_S390_VM_TOD_HIGH:
440 ret = kvm_s390_set_tod_high(kvm, attr);
441 break;
442 case KVM_S390_VM_TOD_LOW:
443 ret = kvm_s390_set_tod_low(kvm, attr);
444 break;
445 default:
446 ret = -ENXIO;
447 break;
448 }
449 return ret;
450}
451
452static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
453{
454 u8 gtod_high = 0;
455
456 if (copy_to_user((void __user *)attr->addr, &gtod_high,
457 sizeof(gtod_high)))
458 return -EFAULT;
459
460 return 0;
461}
462
463static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
464{
465 u64 host_tod, gtod;
466 int r;
467
468 r = store_tod_clock(&host_tod);
469 if (r)
470 return r;
471
472 gtod = host_tod + kvm->arch.epoch;
473 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
474 return -EFAULT;
475
476 return 0;
477}
478
479static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
480{
481 int ret;
482
483 if (attr->flags)
484 return -EINVAL;
485
486 switch (attr->attr) {
487 case KVM_S390_VM_TOD_HIGH:
488 ret = kvm_s390_get_tod_high(kvm, attr);
489 break;
490 case KVM_S390_VM_TOD_LOW:
491 ret = kvm_s390_get_tod_low(kvm, attr);
492 break;
493 default:
494 ret = -ENXIO;
495 break;
496 }
497 return ret;
498}
499
Dominik Dingelf2061652014-04-09 13:13:00 +0200500static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
501{
502 int ret;
503
504 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200505 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100506 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200507 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500508 case KVM_S390_VM_TOD:
509 ret = kvm_s390_set_tod(kvm, attr);
510 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200511 case KVM_S390_VM_CRYPTO:
512 ret = kvm_s390_vm_set_crypto(kvm, attr);
513 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200514 default:
515 ret = -ENXIO;
516 break;
517 }
518
519 return ret;
520}
521
522static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
523{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100524 int ret;
525
526 switch (attr->group) {
527 case KVM_S390_VM_MEM_CTRL:
528 ret = kvm_s390_get_mem_control(kvm, attr);
529 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500530 case KVM_S390_VM_TOD:
531 ret = kvm_s390_get_tod(kvm, attr);
532 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100533 default:
534 ret = -ENXIO;
535 break;
536 }
537
538 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200539}
540
541static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
542{
543 int ret;
544
545 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200546 case KVM_S390_VM_MEM_CTRL:
547 switch (attr->attr) {
548 case KVM_S390_VM_MEM_ENABLE_CMMA:
549 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100550 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200551 ret = 0;
552 break;
553 default:
554 ret = -ENXIO;
555 break;
556 }
557 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500558 case KVM_S390_VM_TOD:
559 switch (attr->attr) {
560 case KVM_S390_VM_TOD_LOW:
561 case KVM_S390_VM_TOD_HIGH:
562 ret = 0;
563 break;
564 default:
565 ret = -ENXIO;
566 break;
567 }
568 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200569 case KVM_S390_VM_CRYPTO:
570 switch (attr->attr) {
571 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
572 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
573 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
574 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
575 ret = 0;
576 break;
577 default:
578 ret = -ENXIO;
579 break;
580 }
581 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200582 default:
583 ret = -ENXIO;
584 break;
585 }
586
587 return ret;
588}
589
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100590long kvm_arch_vm_ioctl(struct file *filp,
591 unsigned int ioctl, unsigned long arg)
592{
593 struct kvm *kvm = filp->private_data;
594 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200595 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100596 int r;
597
598 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100599 case KVM_S390_INTERRUPT: {
600 struct kvm_s390_interrupt s390int;
601
602 r = -EFAULT;
603 if (copy_from_user(&s390int, argp, sizeof(s390int)))
604 break;
605 r = kvm_s390_inject_vm(kvm, &s390int);
606 break;
607 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200608 case KVM_ENABLE_CAP: {
609 struct kvm_enable_cap cap;
610 r = -EFAULT;
611 if (copy_from_user(&cap, argp, sizeof(cap)))
612 break;
613 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
614 break;
615 }
Cornelia Huck84223592013-07-15 13:36:01 +0200616 case KVM_CREATE_IRQCHIP: {
617 struct kvm_irq_routing_entry routing;
618
619 r = -EINVAL;
620 if (kvm->arch.use_irqchip) {
621 /* Set up dummy routing. */
622 memset(&routing, 0, sizeof(routing));
623 kvm_set_irq_routing(kvm, &routing, 0, 0);
624 r = 0;
625 }
626 break;
627 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200628 case KVM_SET_DEVICE_ATTR: {
629 r = -EFAULT;
630 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
631 break;
632 r = kvm_s390_vm_set_attr(kvm, &attr);
633 break;
634 }
635 case KVM_GET_DEVICE_ATTR: {
636 r = -EFAULT;
637 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
638 break;
639 r = kvm_s390_vm_get_attr(kvm, &attr);
640 break;
641 }
642 case KVM_HAS_DEVICE_ATTR: {
643 r = -EFAULT;
644 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
645 break;
646 r = kvm_s390_vm_has_attr(kvm, &attr);
647 break;
648 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100649 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300650 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100651 }
652
653 return r;
654}
655
Tony Krowiak5102ee82014-06-27 14:46:01 -0400656static int kvm_s390_crypto_init(struct kvm *kvm)
657{
658 if (!test_vfacility(76))
659 return 0;
660
661 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
662 GFP_KERNEL | GFP_DMA);
663 if (!kvm->arch.crypto.crycb)
664 return -ENOMEM;
665
666 kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
667 CRYCB_FORMAT1;
668
Tony Krowiaka374e892014-09-03 10:13:53 +0200669 /* Disable AES/DEA protected key functions by default */
670 kvm->arch.crypto.aes_kw = 0;
671 kvm->arch.crypto.dea_kw = 0;
672
Tony Krowiak5102ee82014-06-27 14:46:01 -0400673 return 0;
674}
675
Carsten Ottee08b9632012-01-04 10:25:20 +0100676int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100677{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100678 int rc;
679 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100680 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100681
Carsten Ottee08b9632012-01-04 10:25:20 +0100682 rc = -EINVAL;
683#ifdef CONFIG_KVM_S390_UCONTROL
684 if (type & ~KVM_VM_S390_UCONTROL)
685 goto out_err;
686 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
687 goto out_err;
688#else
689 if (type)
690 goto out_err;
691#endif
692
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100693 rc = s390_enable_sie();
694 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100695 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100696
Carsten Otteb2904112011-10-18 12:27:13 +0200697 rc = -ENOMEM;
698
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100699 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
700 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100701 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100702 spin_lock(&kvm_lock);
703 sca_offset = (sca_offset + 16) & 0x7f0;
704 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
705 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100706
707 sprintf(debug_name, "kvm-%u", current->pid);
708
709 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
710 if (!kvm->arch.dbf)
711 goto out_nodbf;
712
Tony Krowiak5102ee82014-06-27 14:46:01 -0400713 if (kvm_s390_crypto_init(kvm) < 0)
714 goto out_crypto;
715
Carsten Otteba5c1e92008-03-25 18:47:26 +0100716 spin_lock_init(&kvm->arch.float_int.lock);
717 INIT_LIST_HEAD(&kvm->arch.float_int.list);
Heiko Carstens8a2422342014-01-10 14:33:28 +0100718 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +0200719 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100720
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100721 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
722 VM_EVENT(kvm, 3, "%s", "vm created");
723
Carsten Ottee08b9632012-01-04 10:25:20 +0100724 if (type & KVM_VM_S390_UCONTROL) {
725 kvm->arch.gmap = NULL;
726 } else {
Christian Borntraeger03499852014-08-25 12:38:57 +0200727 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +0100728 if (!kvm->arch.gmap)
729 goto out_nogmap;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200730 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200731 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +0100732 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100733
734 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +0200735 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -0500736 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100737
David Hildenbrand8ad35752014-03-14 11:00:21 +0100738 spin_lock_init(&kvm->arch.start_stop_lock);
739
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100740 return 0;
Carsten Otte598841c2011-07-24 10:48:21 +0200741out_nogmap:
Tony Krowiak5102ee82014-06-27 14:46:01 -0400742 kfree(kvm->arch.crypto.crycb);
743out_crypto:
Carsten Otte598841c2011-07-24 10:48:21 +0200744 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100745out_nodbf:
746 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100747out_err:
748 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100749}
750
Christian Borntraegerd329c032008-11-26 14:50:27 +0100751void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
752{
753 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +0200754 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100755 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +0200756 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +0100757 if (!kvm_is_ucontrol(vcpu->kvm)) {
758 clear_bit(63 - vcpu->vcpu_id,
759 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
760 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
761 (__u64) vcpu->arch.sie_block)
762 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
763 }
Carsten Otteabf4a712009-05-12 17:21:51 +0200764 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +0100765
766 if (kvm_is_ucontrol(vcpu->kvm))
767 gmap_free(vcpu->arch.gmap);
768
Dominik Dingelb31605c2014-03-25 13:47:11 +0100769 if (kvm_s390_cmma_enabled(vcpu->kvm))
770 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100771 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200772
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100773 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +0200774 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100775}
776
777static void kvm_free_vcpus(struct kvm *kvm)
778{
779 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300780 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +0100781
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300782 kvm_for_each_vcpu(i, vcpu, kvm)
783 kvm_arch_vcpu_destroy(vcpu);
784
785 mutex_lock(&kvm->lock);
786 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
787 kvm->vcpus[i] = NULL;
788
789 atomic_set(&kvm->online_vcpus, 0);
790 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100791}
792
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100793void kvm_arch_destroy_vm(struct kvm *kvm)
794{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100795 kvm_free_vcpus(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100796 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100797 debug_unregister(kvm->arch.dbf);
Tony Krowiak5102ee82014-06-27 14:46:01 -0400798 kfree(kvm->arch.crypto.crycb);
Carsten Otte27e03932012-01-04 10:25:21 +0100799 if (!kvm_is_ucontrol(kvm))
800 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +0200801 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100802 kvm_s390_clear_float_irqs(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100803}
804
805/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +0100806static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
807{
808 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
809 if (!vcpu->arch.gmap)
810 return -ENOMEM;
811 vcpu->arch.gmap->private = vcpu->kvm;
812
813 return 0;
814}
815
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100816int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
817{
Dominik Dingel3c038e62013-10-07 17:11:48 +0200818 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
819 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100820 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
821 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100822 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +0200823 KVM_SYNC_CRS |
824 KVM_SYNC_ARCH0 |
825 KVM_SYNC_PFAULT;
Dominik Dingeldafd0322014-12-02 16:53:21 +0100826
827 if (kvm_is_ucontrol(vcpu->kvm))
828 return __kvm_ucontrol_vcpu_init(vcpu);
829
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100830 return 0;
831}
832
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100833void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
834{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200835 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
836 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100837 save_access_regs(vcpu->arch.host_acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200838 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
839 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100840 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200841 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100842 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100843}
844
845void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
846{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100847 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200848 gmap_disable(vcpu->arch.gmap);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200849 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
850 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100851 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200852 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
853 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100854 restore_access_regs(vcpu->arch.host_acrs);
855}
856
857static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
858{
859 /* this equals initial cpu reset in pop, but we don't switch to ESA */
860 vcpu->arch.sie_block->gpsw.mask = 0UL;
861 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +0100862 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100863 vcpu->arch.sie_block->cputm = 0UL;
864 vcpu->arch.sie_block->ckc = 0UL;
865 vcpu->arch.sie_block->todpr = 0;
866 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
867 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
868 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
869 vcpu->arch.guest_fpregs.fpc = 0;
870 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
871 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100872 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200873 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
874 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200875 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
876 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +0100877 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100878}
879
Dominik Dingel31928aa2014-12-04 15:47:07 +0100880void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200881{
Jason J. Herne72f25022014-11-25 09:46:02 -0500882 mutex_lock(&vcpu->kvm->lock);
883 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
884 mutex_unlock(&vcpu->kvm->lock);
Dominik Dingeldafd0322014-12-02 16:53:21 +0100885 if (!kvm_is_ucontrol(vcpu->kvm))
886 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200887}
888
Tony Krowiak5102ee82014-06-27 14:46:01 -0400889static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
890{
891 if (!test_vfacility(76))
892 return;
893
Tony Krowiaka374e892014-09-03 10:13:53 +0200894 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
895
896 if (vcpu->kvm->arch.crypto.aes_kw)
897 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
898 if (vcpu->kvm->arch.crypto.dea_kw)
899 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
900
Tony Krowiak5102ee82014-06-27 14:46:01 -0400901 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
902}
903
Dominik Dingelb31605c2014-03-25 13:47:11 +0100904void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
905{
906 free_page(vcpu->arch.sie_block->cbrlo);
907 vcpu->arch.sie_block->cbrlo = 0;
908}
909
910int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
911{
912 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
913 if (!vcpu->arch.sie_block->cbrlo)
914 return -ENOMEM;
915
916 vcpu->arch.sie_block->ecb2 |= 0x80;
917 vcpu->arch.sie_block->ecb2 &= ~0x08;
918 return 0;
919}
920
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100921int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
922{
Dominik Dingelb31605c2014-03-25 13:47:11 +0100923 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200924
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100925 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
926 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200927 CPUSTAT_STOPPED |
928 CPUSTAT_GED);
Christian Borntraegerfc345312010-06-17 23:16:20 +0200929 vcpu->arch.sie_block->ecb = 6;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200930 if (test_vfacility(50) && test_vfacility(73))
931 vcpu->arch.sie_block->ecb |= 0x10;
932
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200933 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrandea5f4962014-10-14 15:29:30 +0200934 vcpu->arch.sie_block->eca = 0xC1002000U;
Heiko Carstens217a4402013-12-30 12:54:14 +0100935 if (sclp_has_siif())
936 vcpu->arch.sie_block->eca |= 1;
David Hildenbrandea5f4962014-10-14 15:29:30 +0200937 if (sclp_has_sigpif())
938 vcpu->arch.sie_block->eca |= 0x10000000U;
Michael Mueller78c4b592013-07-26 15:04:04 +0200939 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
Matthew Rosato5a5e6532013-01-29 11:48:20 -0500940 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
941 ICTL_TPROT;
942
Dominik Dingelb31605c2014-03-25 13:47:11 +0100943 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
944 rc = kvm_s390_vcpu_setup_cmma(vcpu);
945 if (rc)
946 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200947 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +0100948 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +0200949 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Christian Borntraeger453423d2008-03-25 18:47:29 +0100950 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100951 vcpu->arch.cpu_id.version = 0xff;
Tony Krowiak5102ee82014-06-27 14:46:01 -0400952
953 kvm_s390_vcpu_crypto_setup(vcpu);
954
Dominik Dingelb31605c2014-03-25 13:47:11 +0100955 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100956}
957
958struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
959 unsigned int id)
960{
Carsten Otte4d475552011-10-18 12:27:12 +0200961 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200962 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +0200963 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100964
Carsten Otte4d475552011-10-18 12:27:12 +0200965 if (id >= KVM_MAX_VCPUS)
966 goto out;
967
968 rc = -ENOMEM;
969
Michael Muellerb110fea2013-06-12 13:54:54 +0200970 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100971 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +0200972 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100973
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200974 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
975 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100976 goto out_free_cpu;
977
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200978 vcpu->arch.sie_block = &sie_page->sie_block;
979 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
980
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100981 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +0100982 if (!kvm_is_ucontrol(kvm)) {
983 if (!kvm->arch.sca) {
984 WARN_ON_ONCE(1);
985 goto out_free_cpu;
986 }
987 if (!kvm->arch.sca->cpu[id].sda)
988 kvm->arch.sca->cpu[id].sda =
989 (__u64) vcpu->arch.sie_block;
990 vcpu->arch.sie_block->scaoh =
991 (__u32)(((__u64)kvm->arch.sca) >> 32);
992 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
993 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
994 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100995
Carsten Otteba5c1e92008-03-25 18:47:26 +0100996 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100997 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +0200998 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100999 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001000
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001001 rc = kvm_vcpu_init(vcpu, kvm, id);
1002 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001003 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001004 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1005 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001006 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001007
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001008 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001009out_free_sie_block:
1010 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001011out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001012 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001013out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001014 return ERR_PTR(rc);
1015}
1016
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001017int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1018{
David Hildenbrand9a022062014-08-05 17:40:47 +02001019 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001020}
1021
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001022void s390_vcpu_block(struct kvm_vcpu *vcpu)
1023{
1024 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1025}
1026
1027void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1028{
1029 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1030}
1031
1032/*
1033 * Kick a guest cpu out of SIE and wait until SIE is not running.
1034 * If the CPU is not running (e.g. waiting as idle) the function will
1035 * return immediately. */
1036void exit_sie(struct kvm_vcpu *vcpu)
1037{
1038 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1039 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1040 cpu_relax();
1041}
1042
1043/* Kick a guest cpu out of SIE and prevent SIE-reentry */
1044void exit_sie_sync(struct kvm_vcpu *vcpu)
1045{
1046 s390_vcpu_block(vcpu);
1047 exit_sie(vcpu);
1048}
1049
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001050static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1051{
1052 int i;
1053 struct kvm *kvm = gmap->private;
1054 struct kvm_vcpu *vcpu;
1055
1056 kvm_for_each_vcpu(i, vcpu, kvm) {
1057 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +02001058 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001059 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1060 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
1061 exit_sie_sync(vcpu);
1062 }
1063 }
1064}
1065
Christoffer Dallb6d33832012-03-08 16:44:24 -05001066int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1067{
1068 /* kvm common code refers to this, but never calls it */
1069 BUG();
1070 return 0;
1071}
1072
Carsten Otte14eebd92012-05-15 14:15:26 +02001073static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1074 struct kvm_one_reg *reg)
1075{
1076 int r = -EINVAL;
1077
1078 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001079 case KVM_REG_S390_TODPR:
1080 r = put_user(vcpu->arch.sie_block->todpr,
1081 (u32 __user *)reg->addr);
1082 break;
1083 case KVM_REG_S390_EPOCHDIFF:
1084 r = put_user(vcpu->arch.sie_block->epoch,
1085 (u64 __user *)reg->addr);
1086 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001087 case KVM_REG_S390_CPU_TIMER:
1088 r = put_user(vcpu->arch.sie_block->cputm,
1089 (u64 __user *)reg->addr);
1090 break;
1091 case KVM_REG_S390_CLOCK_COMP:
1092 r = put_user(vcpu->arch.sie_block->ckc,
1093 (u64 __user *)reg->addr);
1094 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001095 case KVM_REG_S390_PFTOKEN:
1096 r = put_user(vcpu->arch.pfault_token,
1097 (u64 __user *)reg->addr);
1098 break;
1099 case KVM_REG_S390_PFCOMPARE:
1100 r = put_user(vcpu->arch.pfault_compare,
1101 (u64 __user *)reg->addr);
1102 break;
1103 case KVM_REG_S390_PFSELECT:
1104 r = put_user(vcpu->arch.pfault_select,
1105 (u64 __user *)reg->addr);
1106 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001107 case KVM_REG_S390_PP:
1108 r = put_user(vcpu->arch.sie_block->pp,
1109 (u64 __user *)reg->addr);
1110 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001111 case KVM_REG_S390_GBEA:
1112 r = put_user(vcpu->arch.sie_block->gbea,
1113 (u64 __user *)reg->addr);
1114 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001115 default:
1116 break;
1117 }
1118
1119 return r;
1120}
1121
1122static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1123 struct kvm_one_reg *reg)
1124{
1125 int r = -EINVAL;
1126
1127 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001128 case KVM_REG_S390_TODPR:
1129 r = get_user(vcpu->arch.sie_block->todpr,
1130 (u32 __user *)reg->addr);
1131 break;
1132 case KVM_REG_S390_EPOCHDIFF:
1133 r = get_user(vcpu->arch.sie_block->epoch,
1134 (u64 __user *)reg->addr);
1135 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001136 case KVM_REG_S390_CPU_TIMER:
1137 r = get_user(vcpu->arch.sie_block->cputm,
1138 (u64 __user *)reg->addr);
1139 break;
1140 case KVM_REG_S390_CLOCK_COMP:
1141 r = get_user(vcpu->arch.sie_block->ckc,
1142 (u64 __user *)reg->addr);
1143 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001144 case KVM_REG_S390_PFTOKEN:
1145 r = get_user(vcpu->arch.pfault_token,
1146 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001147 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1148 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02001149 break;
1150 case KVM_REG_S390_PFCOMPARE:
1151 r = get_user(vcpu->arch.pfault_compare,
1152 (u64 __user *)reg->addr);
1153 break;
1154 case KVM_REG_S390_PFSELECT:
1155 r = get_user(vcpu->arch.pfault_select,
1156 (u64 __user *)reg->addr);
1157 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001158 case KVM_REG_S390_PP:
1159 r = get_user(vcpu->arch.sie_block->pp,
1160 (u64 __user *)reg->addr);
1161 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001162 case KVM_REG_S390_GBEA:
1163 r = get_user(vcpu->arch.sie_block->gbea,
1164 (u64 __user *)reg->addr);
1165 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001166 default:
1167 break;
1168 }
1169
1170 return r;
1171}
Christoffer Dallb6d33832012-03-08 16:44:24 -05001172
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001173static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1174{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001175 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001176 return 0;
1177}
1178
1179int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1180{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001181 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001182 return 0;
1183}
1184
1185int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1186{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001187 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001188 return 0;
1189}
1190
1191int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1192 struct kvm_sregs *sregs)
1193{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001194 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001195 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01001196 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001197 return 0;
1198}
1199
1200int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1201 struct kvm_sregs *sregs)
1202{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001203 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001204 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001205 return 0;
1206}
1207
1208int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1209{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001210 if (test_fp_ctl(fpu->fpc))
1211 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001212 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001213 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1214 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1215 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001216 return 0;
1217}
1218
1219int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1220{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001221 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1222 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001223 return 0;
1224}
1225
1226static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1227{
1228 int rc = 0;
1229
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02001230 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001231 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001232 else {
1233 vcpu->run->psw_mask = psw.mask;
1234 vcpu->run->psw_addr = psw.addr;
1235 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001236 return rc;
1237}
1238
1239int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1240 struct kvm_translation *tr)
1241{
1242 return -EINVAL; /* not implemented yet */
1243}
1244
David Hildenbrand27291e22014-01-23 12:26:52 +01001245#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1246 KVM_GUESTDBG_USE_HW_BP | \
1247 KVM_GUESTDBG_ENABLE)
1248
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001249int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1250 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001251{
David Hildenbrand27291e22014-01-23 12:26:52 +01001252 int rc = 0;
1253
1254 vcpu->guest_debug = 0;
1255 kvm_s390_clear_bp_data(vcpu);
1256
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02001257 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01001258 return -EINVAL;
1259
1260 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1261 vcpu->guest_debug = dbg->control;
1262 /* enforce guest PER */
1263 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1264
1265 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1266 rc = kvm_s390_import_bp_data(vcpu, dbg);
1267 } else {
1268 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1269 vcpu->arch.guestdbg.last_bp = 0;
1270 }
1271
1272 if (rc) {
1273 vcpu->guest_debug = 0;
1274 kvm_s390_clear_bp_data(vcpu);
1275 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1276 }
1277
1278 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001279}
1280
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001281int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1282 struct kvm_mp_state *mp_state)
1283{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001284 /* CHECK_STOP and LOAD are not supported yet */
1285 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1286 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001287}
1288
1289int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1290 struct kvm_mp_state *mp_state)
1291{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001292 int rc = 0;
1293
1294 /* user space knows about this interface - let it control the state */
1295 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1296
1297 switch (mp_state->mp_state) {
1298 case KVM_MP_STATE_STOPPED:
1299 kvm_s390_vcpu_stop(vcpu);
1300 break;
1301 case KVM_MP_STATE_OPERATING:
1302 kvm_s390_vcpu_start(vcpu);
1303 break;
1304 case KVM_MP_STATE_LOAD:
1305 case KVM_MP_STATE_CHECK_STOP:
1306 /* fall through - CHECK_STOP and LOAD are not supported yet */
1307 default:
1308 rc = -ENXIO;
1309 }
1310
1311 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001312}
1313
Dominik Dingelb31605c2014-03-25 13:47:11 +01001314bool kvm_s390_cmma_enabled(struct kvm *kvm)
1315{
1316 if (!MACHINE_IS_LPAR)
1317 return false;
1318 /* only enable for z10 and later */
1319 if (!MACHINE_HAS_EDAT1)
1320 return false;
1321 if (!kvm->arch.use_cmma)
1322 return false;
1323 return true;
1324}
1325
David Hildenbrand8ad35752014-03-14 11:00:21 +01001326static bool ibs_enabled(struct kvm_vcpu *vcpu)
1327{
1328 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1329}
1330
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001331static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1332{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001333retry:
1334 s390_vcpu_unblock(vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001335 /*
1336 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1337 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1338 * This ensures that the ipte instruction for this request has
1339 * already finished. We might race against a second unmapper that
1340 * wants to set the blocking bit. Lets just retry the request loop.
1341 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001342 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001343 int rc;
1344 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001345 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001346 PAGE_SIZE * 2);
1347 if (rc)
1348 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001349 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001350 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001351
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001352 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1353 vcpu->arch.sie_block->ihcpu = 0xffff;
1354 goto retry;
1355 }
1356
David Hildenbrand8ad35752014-03-14 11:00:21 +01001357 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1358 if (!ibs_enabled(vcpu)) {
1359 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1360 atomic_set_mask(CPUSTAT_IBS,
1361 &vcpu->arch.sie_block->cpuflags);
1362 }
1363 goto retry;
1364 }
1365
1366 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1367 if (ibs_enabled(vcpu)) {
1368 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1369 atomic_clear_mask(CPUSTAT_IBS,
1370 &vcpu->arch.sie_block->cpuflags);
1371 }
1372 goto retry;
1373 }
1374
David Hildenbrand0759d062014-05-13 16:54:32 +02001375 /* nothing to do, just clear the request */
1376 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1377
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001378 return 0;
1379}
1380
Thomas Huthfa576c52014-05-06 17:20:16 +02001381/**
1382 * kvm_arch_fault_in_page - fault-in guest page if necessary
1383 * @vcpu: The corresponding virtual cpu
1384 * @gpa: Guest physical address
1385 * @writable: Whether the page should be writable or not
1386 *
1387 * Make sure that a guest page has been faulted-in on the host.
1388 *
1389 * Return: Zero on success, negative error code otherwise.
1390 */
1391long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001392{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001393 return gmap_fault(vcpu->arch.gmap, gpa,
1394 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001395}
1396
Dominik Dingel3c038e62013-10-07 17:11:48 +02001397static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1398 unsigned long token)
1399{
1400 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02001401 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001402
1403 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02001404 irq.u.ext.ext_params2 = token;
1405 irq.type = KVM_S390_INT_PFAULT_INIT;
1406 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02001407 } else {
1408 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02001409 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001410 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1411 }
1412}
1413
1414void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1415 struct kvm_async_pf *work)
1416{
1417 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1418 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1419}
1420
1421void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1422 struct kvm_async_pf *work)
1423{
1424 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1425 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1426}
1427
1428void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1429 struct kvm_async_pf *work)
1430{
1431 /* s390 will always inject the page directly */
1432}
1433
1434bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1435{
1436 /*
1437 * s390 will always inject the page directly,
1438 * but we still want check_async_completion to cleanup
1439 */
1440 return true;
1441}
1442
1443static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1444{
1445 hva_t hva;
1446 struct kvm_arch_async_pf arch;
1447 int rc;
1448
1449 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1450 return 0;
1451 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1452 vcpu->arch.pfault_compare)
1453 return 0;
1454 if (psw_extint_disabled(vcpu))
1455 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02001456 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001457 return 0;
1458 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1459 return 0;
1460 if (!vcpu->arch.gmap->pfault_enabled)
1461 return 0;
1462
Heiko Carstens81480cc2014-01-01 16:36:07 +01001463 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1464 hva += current->thread.gmap_addr & ~PAGE_MASK;
1465 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001466 return 0;
1467
1468 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1469 return rc;
1470}
1471
Thomas Huth3fb4c402013-09-12 10:33:43 +02001472static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001473{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001474 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001475
Dominik Dingel3c038e62013-10-07 17:11:48 +02001476 /*
1477 * On s390 notifications for arriving pages will be delivered directly
1478 * to the guest but the house keeping for completed pfaults is
1479 * handled outside the worker.
1480 */
1481 kvm_check_async_pf_completion(vcpu);
1482
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001483 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001484
1485 if (need_resched())
1486 schedule();
1487
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02001488 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02001489 s390_handle_mcck();
1490
Jens Freimann79395032014-04-17 10:10:30 +02001491 if (!kvm_is_ucontrol(vcpu->kvm)) {
1492 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1493 if (rc)
1494 return rc;
1495 }
Carsten Otte0ff31862008-05-21 13:37:37 +02001496
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001497 rc = kvm_s390_handle_requests(vcpu);
1498 if (rc)
1499 return rc;
1500
David Hildenbrand27291e22014-01-23 12:26:52 +01001501 if (guestdbg_enabled(vcpu)) {
1502 kvm_s390_backup_guest_per_regs(vcpu);
1503 kvm_s390_patch_guest_per_regs(vcpu);
1504 }
1505
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001506 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001507 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1508 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1509 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001510
Thomas Huth3fb4c402013-09-12 10:33:43 +02001511 return 0;
1512}
1513
1514static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1515{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001516 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001517
1518 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1519 vcpu->arch.sie_block->icptcode);
1520 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1521
David Hildenbrand27291e22014-01-23 12:26:52 +01001522 if (guestdbg_enabled(vcpu))
1523 kvm_s390_restore_guest_per_regs(vcpu);
1524
Thomas Huth3fb4c402013-09-12 10:33:43 +02001525 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02001526 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02001527 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1528 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1529 vcpu->run->s390_ucontrol.trans_exc_code =
1530 current->thread.gmap_addr;
1531 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1532 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001533
1534 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001535 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001536 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001537 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001538 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001539 } else {
1540 gpa_t gpa = current->thread.gmap_addr;
1541 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1542 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001543 }
1544
1545 if (rc == -1) {
Christian Borntraeger699bde32014-01-20 12:34:13 +01001546 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1547 trace_kvm_s390_sie_fault(vcpu);
1548 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
Carsten Otte1f0d0f02008-05-21 13:37:40 +02001549 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001550
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001551 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001552
Thomas Hutha76ccff2013-09-12 10:33:44 +02001553 if (rc == 0) {
1554 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01001555 /* Don't exit for host interrupts. */
1556 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02001557 else
1558 rc = kvm_handle_sie_intercept(vcpu);
1559 }
1560
Thomas Huth3fb4c402013-09-12 10:33:43 +02001561 return rc;
1562}
1563
1564static int __vcpu_run(struct kvm_vcpu *vcpu)
1565{
1566 int rc, exit_reason;
1567
Thomas Huth800c1062013-09-12 10:33:45 +02001568 /*
1569 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1570 * ning the guest), so that memslots (and other stuff) are protected
1571 */
1572 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1573
Thomas Hutha76ccff2013-09-12 10:33:44 +02001574 do {
1575 rc = vcpu_pre_run(vcpu);
1576 if (rc)
1577 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001578
Thomas Huth800c1062013-09-12 10:33:45 +02001579 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02001580 /*
1581 * As PF_VCPU will be used in fault handler, between
1582 * guest_enter and guest_exit should be no uaccess.
1583 */
1584 preempt_disable();
1585 kvm_guest_enter();
1586 preempt_enable();
1587 exit_reason = sie64a(vcpu->arch.sie_block,
1588 vcpu->run->s.regs.gprs);
1589 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02001590 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001591
Thomas Hutha76ccff2013-09-12 10:33:44 +02001592 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01001593 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001594
Thomas Huth800c1062013-09-12 10:33:45 +02001595 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01001596 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001597}
1598
David Hildenbrandb028ee32014-07-17 10:47:43 +02001599static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1600{
1601 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1602 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1603 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1604 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1605 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1606 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001607 /* some control register changes require a tlb flush */
1608 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02001609 }
1610 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1611 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1612 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1613 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1614 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1615 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1616 }
1617 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1618 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1619 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1620 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001621 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1622 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02001623 }
1624 kvm_run->kvm_dirty_regs = 0;
1625}
1626
1627static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1628{
1629 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1630 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1631 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1632 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1633 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1634 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1635 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1636 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1637 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1638 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1639 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1640 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1641}
1642
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001643int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1644{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001645 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001646 sigset_t sigsaved;
1647
David Hildenbrand27291e22014-01-23 12:26:52 +01001648 if (guestdbg_exit_pending(vcpu)) {
1649 kvm_s390_prepare_debug_exit(vcpu);
1650 return 0;
1651 }
1652
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001653 if (vcpu->sigset_active)
1654 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1655
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001656 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1657 kvm_s390_vcpu_start(vcpu);
1658 } else if (is_vcpu_stopped(vcpu)) {
1659 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1660 vcpu->vcpu_id);
1661 return -EINVAL;
1662 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001663
David Hildenbrandb028ee32014-07-17 10:47:43 +02001664 sync_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001665
Heiko Carstensdab4079d2009-06-12 10:26:32 +02001666 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02001667 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001668
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001669 if (signal_pending(current) && !rc) {
1670 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001671 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001672 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001673
David Hildenbrand27291e22014-01-23 12:26:52 +01001674 if (guestdbg_exit_pending(vcpu) && !rc) {
1675 kvm_s390_prepare_debug_exit(vcpu);
1676 rc = 0;
1677 }
1678
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001679 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001680 /* intercept cannot be handled in-kernel, prepare kvm-run */
1681 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1682 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001683 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1684 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1685 rc = 0;
1686 }
1687
1688 if (rc == -EREMOTE) {
1689 /* intercept was handled, but userspace support is needed
1690 * kvm_run has been prepared by the handler */
1691 rc = 0;
1692 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001693
David Hildenbrandb028ee32014-07-17 10:47:43 +02001694 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001695
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001696 if (vcpu->sigset_active)
1697 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1698
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001699 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02001700 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001701}
1702
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001703/*
1704 * store status at address
1705 * we use have two special cases:
1706 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1707 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1708 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01001709int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001710{
Carsten Otte092670c2011-07-24 10:48:22 +02001711 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02001712 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01001713 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001714 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001715
Heiko Carstensd0bce602014-01-01 16:45:58 +01001716 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1717 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001718 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001719 gpa = SAVE_AREA_BASE;
1720 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1721 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001722 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001723 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1724 }
1725 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1726 vcpu->arch.guest_fpregs.fprs, 128);
1727 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1728 vcpu->run->s.regs.gprs, 128);
1729 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1730 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02001731 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001732 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02001733 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001734 rc |= write_guest_abs(vcpu,
1735 gpa + offsetof(struct save_area, fp_ctrl_reg),
1736 &vcpu->arch.guest_fpregs.fpc, 4);
1737 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1738 &vcpu->arch.sie_block->todpr, 4);
1739 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1740 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01001741 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001742 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1743 &clkcomp, 8);
1744 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1745 &vcpu->run->s.regs.acrs, 64);
1746 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1747 &vcpu->arch.sie_block->gcr, 128);
1748 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001749}
1750
Thomas Huthe8798922013-11-06 15:46:33 +01001751int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1752{
1753 /*
1754 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1755 * copying in vcpu load/put. Lets update our copies before we save
1756 * it into the save area
1757 */
1758 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1759 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1760 save_access_regs(vcpu->run->s.regs.acrs);
1761
1762 return kvm_s390_store_status_unloaded(vcpu, addr);
1763}
1764
David Hildenbrand8ad35752014-03-14 11:00:21 +01001765static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1766{
1767 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1768 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1769 exit_sie_sync(vcpu);
1770}
1771
1772static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1773{
1774 unsigned int i;
1775 struct kvm_vcpu *vcpu;
1776
1777 kvm_for_each_vcpu(i, vcpu, kvm) {
1778 __disable_ibs_on_vcpu(vcpu);
1779 }
1780}
1781
1782static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1783{
1784 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1785 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1786 exit_sie_sync(vcpu);
1787}
1788
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001789void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1790{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001791 int i, online_vcpus, started_vcpus = 0;
1792
1793 if (!is_vcpu_stopped(vcpu))
1794 return;
1795
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001796 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001797 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001798 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001799 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1800
1801 for (i = 0; i < online_vcpus; i++) {
1802 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1803 started_vcpus++;
1804 }
1805
1806 if (started_vcpus == 0) {
1807 /* we're the only active VCPU -> speed it up */
1808 __enable_ibs_on_vcpu(vcpu);
1809 } else if (started_vcpus == 1) {
1810 /*
1811 * As we are starting a second VCPU, we have to disable
1812 * the IBS facility on all VCPUs to remove potentially
1813 * oustanding ENABLE requests.
1814 */
1815 __disable_ibs_on_all_vcpus(vcpu->kvm);
1816 }
1817
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001818 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001819 /*
1820 * Another VCPU might have used IBS while we were offline.
1821 * Let's play safe and flush the VCPU at startup.
1822 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001823 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001824 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001825 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001826}
1827
1828void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1829{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001830 int i, online_vcpus, started_vcpus = 0;
1831 struct kvm_vcpu *started_vcpu = NULL;
1832
1833 if (is_vcpu_stopped(vcpu))
1834 return;
1835
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001836 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001837 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001838 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001839 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1840
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001841 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02001842 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001843
David Hildenbrand6cddd432014-10-15 16:48:53 +02001844 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001845 __disable_ibs_on_vcpu(vcpu);
1846
1847 for (i = 0; i < online_vcpus; i++) {
1848 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1849 started_vcpus++;
1850 started_vcpu = vcpu->kvm->vcpus[i];
1851 }
1852 }
1853
1854 if (started_vcpus == 1) {
1855 /*
1856 * As we only have one VCPU left, we want to enable the
1857 * IBS facility for that VCPU to speed it up.
1858 */
1859 __enable_ibs_on_vcpu(started_vcpu);
1860 }
1861
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001862 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001863 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001864}
1865
Cornelia Huckd6712df2012-12-20 15:32:11 +01001866static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1867 struct kvm_enable_cap *cap)
1868{
1869 int r;
1870
1871 if (cap->flags)
1872 return -EINVAL;
1873
1874 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001875 case KVM_CAP_S390_CSS_SUPPORT:
1876 if (!vcpu->kvm->arch.css_support) {
1877 vcpu->kvm->arch.css_support = 1;
1878 trace_kvm_s390_enable_css(vcpu->kvm);
1879 }
1880 r = 0;
1881 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01001882 default:
1883 r = -EINVAL;
1884 break;
1885 }
1886 return r;
1887}
1888
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001889long kvm_arch_vcpu_ioctl(struct file *filp,
1890 unsigned int ioctl, unsigned long arg)
1891{
1892 struct kvm_vcpu *vcpu = filp->private_data;
1893 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02001894 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03001895 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001896
Avi Kivity93736622010-05-13 12:35:17 +03001897 switch (ioctl) {
1898 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001899 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02001900 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001901
Avi Kivity93736622010-05-13 12:35:17 +03001902 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001903 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03001904 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02001905 if (s390int_to_s390irq(&s390int, &s390irq))
1906 return -EINVAL;
1907 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity93736622010-05-13 12:35:17 +03001908 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001909 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001910 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02001911 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001912 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02001913 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001914 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001915 case KVM_S390_SET_INITIAL_PSW: {
1916 psw_t psw;
1917
Avi Kivitybc923cc2010-05-13 12:21:46 +03001918 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001919 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03001920 break;
1921 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1922 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001923 }
1924 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03001925 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1926 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001927 case KVM_SET_ONE_REG:
1928 case KVM_GET_ONE_REG: {
1929 struct kvm_one_reg reg;
1930 r = -EFAULT;
1931 if (copy_from_user(&reg, argp, sizeof(reg)))
1932 break;
1933 if (ioctl == KVM_SET_ONE_REG)
1934 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1935 else
1936 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1937 break;
1938 }
Carsten Otte27e03932012-01-04 10:25:21 +01001939#ifdef CONFIG_KVM_S390_UCONTROL
1940 case KVM_S390_UCAS_MAP: {
1941 struct kvm_s390_ucas_mapping ucasmap;
1942
1943 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1944 r = -EFAULT;
1945 break;
1946 }
1947
1948 if (!kvm_is_ucontrol(vcpu->kvm)) {
1949 r = -EINVAL;
1950 break;
1951 }
1952
1953 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1954 ucasmap.vcpu_addr, ucasmap.length);
1955 break;
1956 }
1957 case KVM_S390_UCAS_UNMAP: {
1958 struct kvm_s390_ucas_mapping ucasmap;
1959
1960 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1961 r = -EFAULT;
1962 break;
1963 }
1964
1965 if (!kvm_is_ucontrol(vcpu->kvm)) {
1966 r = -EINVAL;
1967 break;
1968 }
1969
1970 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1971 ucasmap.length);
1972 break;
1973 }
1974#endif
Carsten Otteccc79102012-01-04 10:25:26 +01001975 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001976 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01001977 break;
1978 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01001979 case KVM_ENABLE_CAP:
1980 {
1981 struct kvm_enable_cap cap;
1982 r = -EFAULT;
1983 if (copy_from_user(&cap, argp, sizeof(cap)))
1984 break;
1985 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1986 break;
1987 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001988 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01001989 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001990 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03001991 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001992}
1993
Carsten Otte5b1c1492012-01-04 10:25:23 +01001994int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1995{
1996#ifdef CONFIG_KVM_S390_UCONTROL
1997 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1998 && (kvm_is_ucontrol(vcpu->kvm))) {
1999 vmf->page = virt_to_page(vcpu->arch.sie_block);
2000 get_page(vmf->page);
2001 return 0;
2002 }
2003#endif
2004 return VM_FAULT_SIGBUS;
2005}
2006
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05302007int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2008 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09002009{
2010 return 0;
2011}
2012
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002013/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002014int kvm_arch_prepare_memory_region(struct kvm *kvm,
2015 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09002016 struct kvm_userspace_memory_region *mem,
2017 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002018{
Nick Wangdd2887e2013-03-25 17:22:57 +01002019 /* A few sanity checks. We can have memory slots which have to be
2020 located/ended at a segment boundary (1MB). The memory in userland is
2021 ok to be fragmented into various different vmas. It is okay to mmap()
2022 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002023
Carsten Otte598841c2011-07-24 10:48:21 +02002024 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002025 return -EINVAL;
2026
Carsten Otte598841c2011-07-24 10:48:21 +02002027 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002028 return -EINVAL;
2029
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002030 return 0;
2031}
2032
2033void kvm_arch_commit_memory_region(struct kvm *kvm,
2034 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09002035 const struct kvm_memory_slot *old,
2036 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002037{
Carsten Ottef7850c92011-07-24 10:48:23 +02002038 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002039
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01002040 /* If the basics of the memslot do not change, we do not want
2041 * to update the gmap. Every update causes several unnecessary
2042 * segment translation exceptions. This is usually handled just
2043 * fine by the normal fault handler + gmap, but it will also
2044 * cause faults on the prefix page of running guest CPUs.
2045 */
2046 if (old->userspace_addr == mem->userspace_addr &&
2047 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2048 old->npages * PAGE_SIZE == mem->memory_size)
2049 return;
Carsten Otte598841c2011-07-24 10:48:21 +02002050
2051 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2052 mem->guest_phys_addr, mem->memory_size);
2053 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02002054 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02002055 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002056}
2057
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002058static int __init kvm_s390_init(void)
2059{
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02002060 int ret;
Avi Kivity0ee75be2010-04-28 15:39:01 +03002061 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02002062 if (ret)
2063 return ret;
2064
2065 /*
2066 * guests can ask for up to 255+1 double words, we need a full page
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002067 * to hold the maximum amount of facilities. On the other hand, we
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02002068 * only set facilities that are known to work in KVM.
2069 */
Michael Mueller78c4b592013-07-26 15:04:04 +02002070 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
2071 if (!vfacilities) {
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02002072 kvm_exit();
2073 return -ENOMEM;
2074 }
Michael Mueller78c4b592013-07-26 15:04:04 +02002075 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
Christian Borntraeger7be81a42014-09-19 15:55:20 +02002076 vfacilities[0] &= 0xff82fffbf47c2000UL;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002077 vfacilities[1] &= 0x005c000000000000UL;
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02002078 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002079}
2080
2081static void __exit kvm_s390_exit(void)
2082{
Michael Mueller78c4b592013-07-26 15:04:04 +02002083 free_page((unsigned long) vfacilities);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002084 kvm_exit();
2085}
2086
2087module_init(kvm_s390_init);
2088module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02002089
2090/*
2091 * Enable autoloading of the kvm module.
2092 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2093 * since x86 takes a different approach.
2094 */
2095#include <linux/miscdevice.h>
2096MODULE_ALIAS_MISCDEV(KVM_MINOR);
2097MODULE_ALIAS("devname:kvm");