blob: 397b88dc1b2c8d427d9f482ed51c3a34f91c6f5f [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020025#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010027#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010028#include <linux/vmalloc.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010029#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010030#include <asm/lowcore.h>
Fan Zhangfdf03652015-05-13 10:58:41 +020031#include <asm/etr.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010032#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010033#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010034#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020035#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020036#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010037#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include "gaccess.h"
39
David Hildenbrandea2cdd22015-05-20 13:24:02 +020040#define KMSG_COMPONENT "kvm-s390"
41#undef pr_fmt
42#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43
Cornelia Huck5786fff2012-07-23 17:20:29 +020044#define CREATE_TRACE_POINTS
45#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020046#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020047
Thomas Huth41408c282015-02-06 15:01:21 +010048#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010049#define LOCAL_IRQS 32
50#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
51 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010052
Heiko Carstensb0c632d2008-03-25 18:47:20 +010053#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
54
55struct kvm_stats_debugfs_item debugfs_entries[] = {
56 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020057 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010058 { "exit_validity", VCPU_STAT(exit_validity) },
59 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
60 { "exit_external_request", VCPU_STAT(exit_external_request) },
61 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010062 { "exit_instruction", VCPU_STAT(exit_instruction) },
63 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
64 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010065 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020066 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020067 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010068 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010069 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
70 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010071 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020072 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010073 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
74 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
75 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
76 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
77 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
78 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
79 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020080 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010081 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
82 { "instruction_spx", VCPU_STAT(instruction_spx) },
83 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
84 { "instruction_stap", VCPU_STAT(instruction_stap) },
85 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010086 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010087 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
88 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020089 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010090 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
91 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020092 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010093 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010094 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020095 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010096 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020097 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
98 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010099 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200100 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
101 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500102 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100103 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
104 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
105 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200106 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
107 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
108 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100109 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100110 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200111 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200112 { "diagnose_258", VCPU_STAT(diagnose_258) },
113 { "diagnose_308", VCPU_STAT(diagnose_308) },
114 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100115 { NULL }
116};
117
Michael Mueller9d8d5782015-02-02 15:42:51 +0100118/* upper facilities limit for kvm */
119unsigned long kvm_s390_fac_list_mask[] = {
Christian Borntraegera3ed8da2015-03-18 13:54:31 +0100120 0xffe6fffbfcfdfc40UL,
Guenther Hutzl53df84f2015-02-18 11:13:03 +0100121 0x005e800000000000UL,
Michael Mueller9d8d5782015-02-02 15:42:51 +0100122};
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100123
Michael Mueller9d8d5782015-02-02 15:42:51 +0100124unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200125{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100126 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
127 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b592013-07-26 15:04:04 +0200128}
129
Michael Mueller9d8d5782015-02-02 15:42:51 +0100130static struct gmap_notifier gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200131debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100132
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100133/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200134int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100135{
136 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200137 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100138}
139
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200140static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
141
Fan Zhangfdf03652015-05-13 10:58:41 +0200142/*
143 * This callback is executed during stop_machine(). All CPUs are therefore
144 * temporarily stopped. In order not to change guest behavior, we have to
145 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
146 * so a CPU won't be stopped while calculating with the epoch.
147 */
148static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
149 void *v)
150{
151 struct kvm *kvm;
152 struct kvm_vcpu *vcpu;
153 int i;
154 unsigned long long *delta = v;
155
156 list_for_each_entry(kvm, &vm_list, vm_list) {
157 kvm->arch.epoch -= *delta;
158 kvm_for_each_vcpu(i, vcpu, kvm) {
159 vcpu->arch.sie_block->epoch -= *delta;
160 }
161 }
162 return NOTIFY_OK;
163}
164
165static struct notifier_block kvm_clock_notifier = {
166 .notifier_call = kvm_clock_sync,
167};
168
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100169int kvm_arch_hardware_setup(void)
170{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200171 gmap_notifier.notifier_call = kvm_gmap_notifier;
172 gmap_register_ipte_notifier(&gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200173 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
174 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100175 return 0;
176}
177
178void kvm_arch_hardware_unsetup(void)
179{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200180 gmap_unregister_ipte_notifier(&gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200181 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
182 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100183}
184
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100185int kvm_arch_init(void *opaque)
186{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200187 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
188 if (!kvm_s390_dbf)
189 return -ENOMEM;
190
191 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
192 debug_unregister(kvm_s390_dbf);
193 return -ENOMEM;
194 }
195
Cornelia Huck84877d92014-09-02 10:27:35 +0100196 /* Register floating interrupt controller interface. */
197 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100198}
199
Christian Borntraeger78f26132015-07-22 15:50:58 +0200200void kvm_arch_exit(void)
201{
202 debug_unregister(kvm_s390_dbf);
203}
204
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100205/* Section: device related */
206long kvm_arch_dev_ioctl(struct file *filp,
207 unsigned int ioctl, unsigned long arg)
208{
209 if (ioctl == KVM_S390_ENABLE_SIE)
210 return s390_enable_sie();
211 return -EINVAL;
212}
213
Alexander Graf784aa3d2014-07-14 18:27:35 +0200214int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100215{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100216 int r;
217
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200218 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100219 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200220 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100221 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100222#ifdef CONFIG_KVM_S390_UCONTROL
223 case KVM_CAP_S390_UCONTROL:
224#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200225 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100226 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200227 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100228 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100229 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100230 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200231 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200232 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200233 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200234 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200235 case KVM_CAP_MP_STATE:
Jens Freimann47b43c52014-11-11 20:57:06 +0100236 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200237 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100238 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400239 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100240 case KVM_CAP_S390_IRQ_STATE:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100241 r = 1;
242 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100243 case KVM_CAP_S390_MEM_OP:
244 r = MEM_OP_MAX_SIZE;
245 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200246 case KVM_CAP_NR_VCPUS:
247 case KVM_CAP_MAX_VCPUS:
248 r = KVM_MAX_VCPUS;
249 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100250 case KVM_CAP_NR_MEMSLOTS:
251 r = KVM_USER_MEM_SLOTS;
252 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200253 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100254 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200255 break;
Eric Farman68c55752014-06-09 10:57:26 -0400256 case KVM_CAP_S390_VECTOR_REGISTERS:
257 r = MACHINE_HAS_VX;
258 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200259 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100260 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200261 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100262 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100263}
264
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400265static void kvm_s390_sync_dirty_log(struct kvm *kvm,
266 struct kvm_memory_slot *memslot)
267{
268 gfn_t cur_gfn, last_gfn;
269 unsigned long address;
270 struct gmap *gmap = kvm->arch.gmap;
271
272 down_read(&gmap->mm->mmap_sem);
273 /* Loop over all guest pages */
274 last_gfn = memslot->base_gfn + memslot->npages;
275 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
276 address = gfn_to_hva_memslot(memslot, cur_gfn);
277
278 if (gmap_test_and_clear_dirty(address, gmap))
279 mark_page_dirty(kvm, cur_gfn);
280 }
281 up_read(&gmap->mm->mmap_sem);
282}
283
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100284/* Section: vm related */
285/*
286 * Get (and clear) the dirty memory log for a memory slot.
287 */
288int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
289 struct kvm_dirty_log *log)
290{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400291 int r;
292 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200293 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400294 struct kvm_memory_slot *memslot;
295 int is_dirty = 0;
296
297 mutex_lock(&kvm->slots_lock);
298
299 r = -EINVAL;
300 if (log->slot >= KVM_USER_MEM_SLOTS)
301 goto out;
302
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200303 slots = kvm_memslots(kvm);
304 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400305 r = -ENOENT;
306 if (!memslot->dirty_bitmap)
307 goto out;
308
309 kvm_s390_sync_dirty_log(kvm, memslot);
310 r = kvm_get_dirty_log(kvm, log, &is_dirty);
311 if (r)
312 goto out;
313
314 /* Clear the dirty log */
315 if (is_dirty) {
316 n = kvm_dirty_bitmap_bytes(memslot);
317 memset(memslot->dirty_bitmap, 0, n);
318 }
319 r = 0;
320out:
321 mutex_unlock(&kvm->slots_lock);
322 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100323}
324
Cornelia Huckd938dc52013-10-23 18:26:34 +0200325static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
326{
327 int r;
328
329 if (cap->flags)
330 return -EINVAL;
331
332 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200333 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200334 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200335 kvm->arch.use_irqchip = 1;
336 r = 0;
337 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200338 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200339 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200340 kvm->arch.user_sigp = 1;
341 r = 0;
342 break;
Eric Farman68c55752014-06-09 10:57:26 -0400343 case KVM_CAP_S390_VECTOR_REGISTERS:
Michael Mueller18280d82015-03-16 16:05:41 +0100344 if (MACHINE_HAS_VX) {
345 set_kvm_facility(kvm->arch.model.fac->mask, 129);
346 set_kvm_facility(kvm->arch.model.fac->list, 129);
347 r = 0;
348 } else
349 r = -EINVAL;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200350 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
351 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400352 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100353 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200354 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100355 kvm->arch.user_stsi = 1;
356 r = 0;
357 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200358 default:
359 r = -EINVAL;
360 break;
361 }
362 return r;
363}
364
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100365static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
366{
367 int ret;
368
369 switch (attr->attr) {
370 case KVM_S390_VM_MEM_LIMIT_SIZE:
371 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200372 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
373 kvm->arch.gmap->asce_end);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100374 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
375 ret = -EFAULT;
376 break;
377 default:
378 ret = -ENXIO;
379 break;
380 }
381 return ret;
382}
383
384static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200385{
386 int ret;
387 unsigned int idx;
388 switch (attr->attr) {
389 case KVM_S390_VM_MEM_ENABLE_CMMA:
Dominik Dingele6db1d62015-05-07 15:41:57 +0200390 /* enable CMMA only for z10 and later (EDAT_1) */
391 ret = -EINVAL;
392 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
393 break;
394
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200395 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200396 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200397 mutex_lock(&kvm->lock);
398 if (atomic_read(&kvm->online_vcpus) == 0) {
399 kvm->arch.use_cmma = 1;
400 ret = 0;
401 }
402 mutex_unlock(&kvm->lock);
403 break;
404 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingelc3489152015-06-18 13:17:11 +0200405 ret = -EINVAL;
406 if (!kvm->arch.use_cmma)
407 break;
408
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200409 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200410 mutex_lock(&kvm->lock);
411 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200412 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200413 srcu_read_unlock(&kvm->srcu, idx);
414 mutex_unlock(&kvm->lock);
415 ret = 0;
416 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100417 case KVM_S390_VM_MEM_LIMIT_SIZE: {
418 unsigned long new_limit;
419
420 if (kvm_is_ucontrol(kvm))
421 return -EINVAL;
422
423 if (get_user(new_limit, (u64 __user *)attr->addr))
424 return -EFAULT;
425
426 if (new_limit > kvm->arch.gmap->asce_end)
427 return -E2BIG;
428
429 ret = -EBUSY;
430 mutex_lock(&kvm->lock);
431 if (atomic_read(&kvm->online_vcpus) == 0) {
432 /* gmap_alloc will round the limit up */
433 struct gmap *new = gmap_alloc(current->mm, new_limit);
434
435 if (!new) {
436 ret = -ENOMEM;
437 } else {
438 gmap_free(kvm->arch.gmap);
439 new->private = kvm;
440 kvm->arch.gmap = new;
441 ret = 0;
442 }
443 }
444 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200445 VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100446 break;
447 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200448 default:
449 ret = -ENXIO;
450 break;
451 }
452 return ret;
453}
454
Tony Krowiaka374e892014-09-03 10:13:53 +0200455static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
456
457static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
458{
459 struct kvm_vcpu *vcpu;
460 int i;
461
Michael Mueller9d8d5782015-02-02 15:42:51 +0100462 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200463 return -EINVAL;
464
465 mutex_lock(&kvm->lock);
466 switch (attr->attr) {
467 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
468 get_random_bytes(
469 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
470 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
471 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200472 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200473 break;
474 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
475 get_random_bytes(
476 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
477 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
478 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200479 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200480 break;
481 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
482 kvm->arch.crypto.aes_kw = 0;
483 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
484 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200485 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200486 break;
487 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
488 kvm->arch.crypto.dea_kw = 0;
489 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
490 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200491 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200492 break;
493 default:
494 mutex_unlock(&kvm->lock);
495 return -ENXIO;
496 }
497
498 kvm_for_each_vcpu(i, vcpu, kvm) {
499 kvm_s390_vcpu_crypto_setup(vcpu);
500 exit_sie(vcpu);
501 }
502 mutex_unlock(&kvm->lock);
503 return 0;
504}
505
Jason J. Herne72f25022014-11-25 09:46:02 -0500506static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
507{
508 u8 gtod_high;
509
510 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
511 sizeof(gtod_high)))
512 return -EFAULT;
513
514 if (gtod_high != 0)
515 return -EINVAL;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200516 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500517
518 return 0;
519}
520
521static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
522{
523 struct kvm_vcpu *cur_vcpu;
524 unsigned int vcpu_idx;
525 u64 host_tod, gtod;
526 int r;
527
528 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
529 return -EFAULT;
530
531 r = store_tod_clock(&host_tod);
532 if (r)
533 return r;
534
535 mutex_lock(&kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +0200536 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -0500537 kvm->arch.epoch = gtod - host_tod;
Christian Borntraeger27406cd2015-04-14 12:17:34 +0200538 kvm_s390_vcpu_block_all(kvm);
539 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
Jason J. Herne72f25022014-11-25 09:46:02 -0500540 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
Christian Borntraeger27406cd2015-04-14 12:17:34 +0200541 kvm_s390_vcpu_unblock_all(kvm);
Fan Zhangfdf03652015-05-13 10:58:41 +0200542 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -0500543 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200544 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500545 return 0;
546}
547
548static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
549{
550 int ret;
551
552 if (attr->flags)
553 return -EINVAL;
554
555 switch (attr->attr) {
556 case KVM_S390_VM_TOD_HIGH:
557 ret = kvm_s390_set_tod_high(kvm, attr);
558 break;
559 case KVM_S390_VM_TOD_LOW:
560 ret = kvm_s390_set_tod_low(kvm, attr);
561 break;
562 default:
563 ret = -ENXIO;
564 break;
565 }
566 return ret;
567}
568
569static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
570{
571 u8 gtod_high = 0;
572
573 if (copy_to_user((void __user *)attr->addr, &gtod_high,
574 sizeof(gtod_high)))
575 return -EFAULT;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200576 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500577
578 return 0;
579}
580
581static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
582{
583 u64 host_tod, gtod;
584 int r;
585
586 r = store_tod_clock(&host_tod);
587 if (r)
588 return r;
589
Fan Zhangfdf03652015-05-13 10:58:41 +0200590 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -0500591 gtod = host_tod + kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +0200592 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -0500593 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
594 return -EFAULT;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200595 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500596
597 return 0;
598}
599
600static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
601{
602 int ret;
603
604 if (attr->flags)
605 return -EINVAL;
606
607 switch (attr->attr) {
608 case KVM_S390_VM_TOD_HIGH:
609 ret = kvm_s390_get_tod_high(kvm, attr);
610 break;
611 case KVM_S390_VM_TOD_LOW:
612 ret = kvm_s390_get_tod_low(kvm, attr);
613 break;
614 default:
615 ret = -ENXIO;
616 break;
617 }
618 return ret;
619}
620
Michael Mueller658b6ed2015-02-02 15:49:35 +0100621static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
622{
623 struct kvm_s390_vm_cpu_processor *proc;
624 int ret = 0;
625
626 mutex_lock(&kvm->lock);
627 if (atomic_read(&kvm->online_vcpus)) {
628 ret = -EBUSY;
629 goto out;
630 }
631 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
632 if (!proc) {
633 ret = -ENOMEM;
634 goto out;
635 }
636 if (!copy_from_user(proc, (void __user *)attr->addr,
637 sizeof(*proc))) {
638 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
639 sizeof(struct cpuid));
640 kvm->arch.model.ibc = proc->ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100641 memcpy(kvm->arch.model.fac->list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +0100642 S390_ARCH_FAC_LIST_SIZE_BYTE);
643 } else
644 ret = -EFAULT;
645 kfree(proc);
646out:
647 mutex_unlock(&kvm->lock);
648 return ret;
649}
650
651static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
652{
653 int ret = -ENXIO;
654
655 switch (attr->attr) {
656 case KVM_S390_VM_CPU_PROCESSOR:
657 ret = kvm_s390_set_processor(kvm, attr);
658 break;
659 }
660 return ret;
661}
662
663static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
664{
665 struct kvm_s390_vm_cpu_processor *proc;
666 int ret = 0;
667
668 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
669 if (!proc) {
670 ret = -ENOMEM;
671 goto out;
672 }
673 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
674 proc->ibc = kvm->arch.model.ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100675 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100676 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
677 ret = -EFAULT;
678 kfree(proc);
679out:
680 return ret;
681}
682
683static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
684{
685 struct kvm_s390_vm_cpu_machine *mach;
686 int ret = 0;
687
688 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
689 if (!mach) {
690 ret = -ENOMEM;
691 goto out;
692 }
693 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +0200694 mach->ibc = sclp.ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100695 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
696 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100697 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +0100698 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100699 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
700 ret = -EFAULT;
701 kfree(mach);
702out:
703 return ret;
704}
705
706static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
707{
708 int ret = -ENXIO;
709
710 switch (attr->attr) {
711 case KVM_S390_VM_CPU_PROCESSOR:
712 ret = kvm_s390_get_processor(kvm, attr);
713 break;
714 case KVM_S390_VM_CPU_MACHINE:
715 ret = kvm_s390_get_machine(kvm, attr);
716 break;
717 }
718 return ret;
719}
720
Dominik Dingelf2061652014-04-09 13:13:00 +0200721static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
722{
723 int ret;
724
725 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200726 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100727 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200728 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500729 case KVM_S390_VM_TOD:
730 ret = kvm_s390_set_tod(kvm, attr);
731 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100732 case KVM_S390_VM_CPU_MODEL:
733 ret = kvm_s390_set_cpu_model(kvm, attr);
734 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200735 case KVM_S390_VM_CRYPTO:
736 ret = kvm_s390_vm_set_crypto(kvm, attr);
737 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200738 default:
739 ret = -ENXIO;
740 break;
741 }
742
743 return ret;
744}
745
746static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
747{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100748 int ret;
749
750 switch (attr->group) {
751 case KVM_S390_VM_MEM_CTRL:
752 ret = kvm_s390_get_mem_control(kvm, attr);
753 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500754 case KVM_S390_VM_TOD:
755 ret = kvm_s390_get_tod(kvm, attr);
756 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100757 case KVM_S390_VM_CPU_MODEL:
758 ret = kvm_s390_get_cpu_model(kvm, attr);
759 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100760 default:
761 ret = -ENXIO;
762 break;
763 }
764
765 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200766}
767
768static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
769{
770 int ret;
771
772 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200773 case KVM_S390_VM_MEM_CTRL:
774 switch (attr->attr) {
775 case KVM_S390_VM_MEM_ENABLE_CMMA:
776 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100777 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200778 ret = 0;
779 break;
780 default:
781 ret = -ENXIO;
782 break;
783 }
784 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500785 case KVM_S390_VM_TOD:
786 switch (attr->attr) {
787 case KVM_S390_VM_TOD_LOW:
788 case KVM_S390_VM_TOD_HIGH:
789 ret = 0;
790 break;
791 default:
792 ret = -ENXIO;
793 break;
794 }
795 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100796 case KVM_S390_VM_CPU_MODEL:
797 switch (attr->attr) {
798 case KVM_S390_VM_CPU_PROCESSOR:
799 case KVM_S390_VM_CPU_MACHINE:
800 ret = 0;
801 break;
802 default:
803 ret = -ENXIO;
804 break;
805 }
806 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200807 case KVM_S390_VM_CRYPTO:
808 switch (attr->attr) {
809 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
810 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
811 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
812 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
813 ret = 0;
814 break;
815 default:
816 ret = -ENXIO;
817 break;
818 }
819 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200820 default:
821 ret = -ENXIO;
822 break;
823 }
824
825 return ret;
826}
827
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400828static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
829{
830 uint8_t *keys;
831 uint64_t hva;
832 unsigned long curkey;
833 int i, r = 0;
834
835 if (args->flags != 0)
836 return -EINVAL;
837
838 /* Is this guest using storage keys? */
839 if (!mm_use_skey(current->mm))
840 return KVM_S390_GET_SKEYS_NONE;
841
842 /* Enforce sane limit on memory allocation */
843 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
844 return -EINVAL;
845
846 keys = kmalloc_array(args->count, sizeof(uint8_t),
847 GFP_KERNEL | __GFP_NOWARN);
848 if (!keys)
849 keys = vmalloc(sizeof(uint8_t) * args->count);
850 if (!keys)
851 return -ENOMEM;
852
853 for (i = 0; i < args->count; i++) {
854 hva = gfn_to_hva(kvm, args->start_gfn + i);
855 if (kvm_is_error_hva(hva)) {
856 r = -EFAULT;
857 goto out;
858 }
859
860 curkey = get_guest_storage_key(current->mm, hva);
861 if (IS_ERR_VALUE(curkey)) {
862 r = curkey;
863 goto out;
864 }
865 keys[i] = curkey;
866 }
867
868 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
869 sizeof(uint8_t) * args->count);
870 if (r)
871 r = -EFAULT;
872out:
873 kvfree(keys);
874 return r;
875}
876
877static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
878{
879 uint8_t *keys;
880 uint64_t hva;
881 int i, r = 0;
882
883 if (args->flags != 0)
884 return -EINVAL;
885
886 /* Enforce sane limit on memory allocation */
887 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
888 return -EINVAL;
889
890 keys = kmalloc_array(args->count, sizeof(uint8_t),
891 GFP_KERNEL | __GFP_NOWARN);
892 if (!keys)
893 keys = vmalloc(sizeof(uint8_t) * args->count);
894 if (!keys)
895 return -ENOMEM;
896
897 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
898 sizeof(uint8_t) * args->count);
899 if (r) {
900 r = -EFAULT;
901 goto out;
902 }
903
904 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +0200905 r = s390_enable_skey();
906 if (r)
907 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400908
909 for (i = 0; i < args->count; i++) {
910 hva = gfn_to_hva(kvm, args->start_gfn + i);
911 if (kvm_is_error_hva(hva)) {
912 r = -EFAULT;
913 goto out;
914 }
915
916 /* Lowest order bit is reserved */
917 if (keys[i] & 0x01) {
918 r = -EINVAL;
919 goto out;
920 }
921
922 r = set_guest_storage_key(current->mm, hva,
923 (unsigned long)keys[i], 0);
924 if (r)
925 goto out;
926 }
927out:
928 kvfree(keys);
929 return r;
930}
931
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100932long kvm_arch_vm_ioctl(struct file *filp,
933 unsigned int ioctl, unsigned long arg)
934{
935 struct kvm *kvm = filp->private_data;
936 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200937 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100938 int r;
939
940 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100941 case KVM_S390_INTERRUPT: {
942 struct kvm_s390_interrupt s390int;
943
944 r = -EFAULT;
945 if (copy_from_user(&s390int, argp, sizeof(s390int)))
946 break;
947 r = kvm_s390_inject_vm(kvm, &s390int);
948 break;
949 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200950 case KVM_ENABLE_CAP: {
951 struct kvm_enable_cap cap;
952 r = -EFAULT;
953 if (copy_from_user(&cap, argp, sizeof(cap)))
954 break;
955 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
956 break;
957 }
Cornelia Huck84223592013-07-15 13:36:01 +0200958 case KVM_CREATE_IRQCHIP: {
959 struct kvm_irq_routing_entry routing;
960
961 r = -EINVAL;
962 if (kvm->arch.use_irqchip) {
963 /* Set up dummy routing. */
964 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -0400965 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +0200966 }
967 break;
968 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200969 case KVM_SET_DEVICE_ATTR: {
970 r = -EFAULT;
971 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
972 break;
973 r = kvm_s390_vm_set_attr(kvm, &attr);
974 break;
975 }
976 case KVM_GET_DEVICE_ATTR: {
977 r = -EFAULT;
978 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
979 break;
980 r = kvm_s390_vm_get_attr(kvm, &attr);
981 break;
982 }
983 case KVM_HAS_DEVICE_ATTR: {
984 r = -EFAULT;
985 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
986 break;
987 r = kvm_s390_vm_has_attr(kvm, &attr);
988 break;
989 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400990 case KVM_S390_GET_SKEYS: {
991 struct kvm_s390_skeys args;
992
993 r = -EFAULT;
994 if (copy_from_user(&args, argp,
995 sizeof(struct kvm_s390_skeys)))
996 break;
997 r = kvm_s390_get_skeys(kvm, &args);
998 break;
999 }
1000 case KVM_S390_SET_SKEYS: {
1001 struct kvm_s390_skeys args;
1002
1003 r = -EFAULT;
1004 if (copy_from_user(&args, argp,
1005 sizeof(struct kvm_s390_skeys)))
1006 break;
1007 r = kvm_s390_set_skeys(kvm, &args);
1008 break;
1009 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001010 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001011 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001012 }
1013
1014 return r;
1015}
1016
Tony Krowiak45c9b472015-01-13 11:33:26 -05001017static int kvm_s390_query_ap_config(u8 *config)
1018{
1019 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001020 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001021
Christian Borntraeger86044c82015-02-26 13:53:47 +01001022 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001023 asm volatile(
1024 "lgr 0,%1\n"
1025 "lgr 2,%2\n"
1026 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001027 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001028 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001029 "1:\n"
1030 EX_TABLE(0b, 1b)
1031 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001032 : "r" (fcn_code), "r" (config)
1033 : "cc", "0", "2", "memory"
1034 );
1035
1036 return cc;
1037}
1038
1039static int kvm_s390_apxa_installed(void)
1040{
1041 u8 config[128];
1042 int cc;
1043
1044 if (test_facility(2) && test_facility(12)) {
1045 cc = kvm_s390_query_ap_config(config);
1046
1047 if (cc)
1048 pr_err("PQAP(QCI) failed with cc=%d", cc);
1049 else
1050 return config[0] & 0x40;
1051 }
1052
1053 return 0;
1054}
1055
1056static void kvm_s390_set_crycb_format(struct kvm *kvm)
1057{
1058 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1059
1060 if (kvm_s390_apxa_installed())
1061 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1062 else
1063 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1064}
1065
Michael Mueller9d8d5782015-02-02 15:42:51 +01001066static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
1067{
1068 get_cpu_id(cpu_id);
1069 cpu_id->version = 0xff;
1070}
1071
Tony Krowiak5102ee82014-06-27 14:46:01 -04001072static int kvm_s390_crypto_init(struct kvm *kvm)
1073{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001074 if (!test_kvm_facility(kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001075 return 0;
1076
1077 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
1078 GFP_KERNEL | GFP_DMA);
1079 if (!kvm->arch.crypto.crycb)
1080 return -ENOMEM;
1081
Tony Krowiak45c9b472015-01-13 11:33:26 -05001082 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001083
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001084 /* Enable AES/DEA protected key functions by default */
1085 kvm->arch.crypto.aes_kw = 1;
1086 kvm->arch.crypto.dea_kw = 1;
1087 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1088 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1089 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1090 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiaka374e892014-09-03 10:13:53 +02001091
Tony Krowiak5102ee82014-06-27 14:46:01 -04001092 return 0;
1093}
1094
Carsten Ottee08b9632012-01-04 10:25:20 +01001095int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001096{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001097 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001098 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001099 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001100
Carsten Ottee08b9632012-01-04 10:25:20 +01001101 rc = -EINVAL;
1102#ifdef CONFIG_KVM_S390_UCONTROL
1103 if (type & ~KVM_VM_S390_UCONTROL)
1104 goto out_err;
1105 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1106 goto out_err;
1107#else
1108 if (type)
1109 goto out_err;
1110#endif
1111
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001112 rc = s390_enable_sie();
1113 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001114 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001115
Carsten Otteb2904112011-10-18 12:27:13 +02001116 rc = -ENOMEM;
1117
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001118 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1119 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001120 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001121 spin_lock(&kvm_lock);
1122 sca_offset = (sca_offset + 16) & 0x7f0;
1123 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1124 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001125
1126 sprintf(debug_name, "kvm-%u", current->pid);
1127
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001128 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001129 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001130 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001131
Michael Mueller9d8d5782015-02-02 15:42:51 +01001132 /*
1133 * The architectural maximum amount of facilities is 16 kbit. To store
1134 * this amount, 2 kbyte of memory is required. Thus we need a full
Michael Mueller981467c2015-02-24 13:51:04 +01001135 * page to hold the guest facility list (arch.model.fac->list) and the
1136 * facility mask (arch.model.fac->mask). Its address size has to be
Michael Mueller9d8d5782015-02-02 15:42:51 +01001137 * 31 bits and word aligned.
1138 */
1139 kvm->arch.model.fac =
Michael Mueller981467c2015-02-24 13:51:04 +01001140 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001141 if (!kvm->arch.model.fac)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001142 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001143
Michael Muellerfb5bf932015-02-27 14:25:10 +01001144 /* Populate the facility mask initially. */
Michael Mueller981467c2015-02-24 13:51:04 +01001145 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +01001146 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001147 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1148 if (i < kvm_s390_fac_list_mask_size())
Michael Mueller981467c2015-02-24 13:51:04 +01001149 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001150 else
Michael Mueller981467c2015-02-24 13:51:04 +01001151 kvm->arch.model.fac->mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001152 }
1153
Michael Mueller981467c2015-02-24 13:51:04 +01001154 /* Populate the facility list initially. */
1155 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1156 S390_ARCH_FAC_LIST_SIZE_BYTE);
1157
Michael Mueller9d8d5782015-02-02 15:42:51 +01001158 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001159 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001160
Tony Krowiak5102ee82014-06-27 14:46:01 -04001161 if (kvm_s390_crypto_init(kvm) < 0)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001162 goto out_err;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001163
Carsten Otteba5c1e92008-03-25 18:47:26 +01001164 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001165 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1166 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001167 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001168 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001169
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001170 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001171 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001172
Carsten Ottee08b9632012-01-04 10:25:20 +01001173 if (type & KVM_VM_S390_UCONTROL) {
1174 kvm->arch.gmap = NULL;
1175 } else {
Christian Borntraeger03499852014-08-25 12:38:57 +02001176 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001177 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001178 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001179 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001180 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001181 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001182
1183 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001184 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001185 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001186
David Hildenbrand8ad35752014-03-14 11:00:21 +01001187 spin_lock_init(&kvm->arch.start_stop_lock);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001188 KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001189
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001190 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001191out_err:
Dominik Dingel40f5b732015-03-12 13:55:53 +01001192 kfree(kvm->arch.crypto.crycb);
1193 free_page((unsigned long)kvm->arch.model.fac);
1194 debug_unregister(kvm->arch.dbf);
1195 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraeger78f26132015-07-22 15:50:58 +02001196 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001197 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001198}
1199
Christian Borntraegerd329c032008-11-26 14:50:27 +01001200void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1201{
1202 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001203 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001204 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001205 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +01001206 if (!kvm_is_ucontrol(vcpu->kvm)) {
1207 clear_bit(63 - vcpu->vcpu_id,
1208 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1209 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1210 (__u64) vcpu->arch.sie_block)
1211 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
1212 }
Carsten Otteabf4a712009-05-12 17:21:51 +02001213 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +01001214
1215 if (kvm_is_ucontrol(vcpu->kvm))
1216 gmap_free(vcpu->arch.gmap);
1217
Dominik Dingele6db1d62015-05-07 15:41:57 +02001218 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01001219 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001220 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001221
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001222 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001223 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001224}
1225
1226static void kvm_free_vcpus(struct kvm *kvm)
1227{
1228 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001229 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001230
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001231 kvm_for_each_vcpu(i, vcpu, kvm)
1232 kvm_arch_vcpu_destroy(vcpu);
1233
1234 mutex_lock(&kvm->lock);
1235 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1236 kvm->vcpus[i] = NULL;
1237
1238 atomic_set(&kvm->online_vcpus, 0);
1239 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001240}
1241
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001242void kvm_arch_destroy_vm(struct kvm *kvm)
1243{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001244 kvm_free_vcpus(kvm);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001245 free_page((unsigned long)kvm->arch.model.fac);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001246 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +01001247 debug_unregister(kvm->arch.dbf);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001248 kfree(kvm->arch.crypto.crycb);
Carsten Otte27e03932012-01-04 10:25:21 +01001249 if (!kvm_is_ucontrol(kvm))
1250 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001251 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001252 kvm_s390_clear_float_irqs(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001253 KVM_EVENT(3, "vm 0x%p destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001254}
1255
1256/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001257static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1258{
1259 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1260 if (!vcpu->arch.gmap)
1261 return -ENOMEM;
1262 vcpu->arch.gmap->private = vcpu->kvm;
1263
1264 return 0;
1265}
1266
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001267int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1268{
Dominik Dingel3c038e62013-10-07 17:11:48 +02001269 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1270 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001271 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1272 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001273 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02001274 KVM_SYNC_CRS |
1275 KVM_SYNC_ARCH0 |
1276 KVM_SYNC_PFAULT;
Eric Farman68c55752014-06-09 10:57:26 -04001277 if (test_kvm_facility(vcpu->kvm, 129))
1278 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01001279
1280 if (kvm_is_ucontrol(vcpu->kvm))
1281 return __kvm_ucontrol_vcpu_init(vcpu);
1282
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001283 return 0;
1284}
1285
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001286void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1287{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001288 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
Michael Mueller18280d82015-03-16 16:05:41 +01001289 if (test_kvm_facility(vcpu->kvm, 129))
Eric Farman68c55752014-06-09 10:57:26 -04001290 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1291 else
1292 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001293 save_access_regs(vcpu->arch.host_acrs);
Michael Mueller18280d82015-03-16 16:05:41 +01001294 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman68c55752014-06-09 10:57:26 -04001295 restore_fp_ctl(&vcpu->run->s.regs.fpc);
1296 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1297 } else {
1298 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1299 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1300 }
Christian Borntraeger59674c12012-01-11 11:20:33 +01001301 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001302 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001303 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001304}
1305
1306void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1307{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001308 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001309 gmap_disable(vcpu->arch.gmap);
Michael Mueller18280d82015-03-16 16:05:41 +01001310 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman68c55752014-06-09 10:57:26 -04001311 save_fp_ctl(&vcpu->run->s.regs.fpc);
1312 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1313 } else {
1314 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1315 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1316 }
Christian Borntraeger59674c12012-01-11 11:20:33 +01001317 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001318 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
Michael Mueller18280d82015-03-16 16:05:41 +01001319 if (test_kvm_facility(vcpu->kvm, 129))
Eric Farman68c55752014-06-09 10:57:26 -04001320 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1321 else
1322 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001323 restore_access_regs(vcpu->arch.host_acrs);
1324}
1325
1326static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1327{
1328 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1329 vcpu->arch.sie_block->gpsw.mask = 0UL;
1330 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01001331 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001332 vcpu->arch.sie_block->cputm = 0UL;
1333 vcpu->arch.sie_block->ckc = 0UL;
1334 vcpu->arch.sie_block->todpr = 0;
1335 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1336 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1337 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1338 vcpu->arch.guest_fpregs.fpc = 0;
1339 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1340 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001341 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001342 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1343 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001344 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1345 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001346 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001347}
1348
Dominik Dingel31928aa2014-12-04 15:47:07 +01001349void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001350{
Jason J. Herne72f25022014-11-25 09:46:02 -05001351 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02001352 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001353 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02001354 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001355 mutex_unlock(&vcpu->kvm->lock);
Dominik Dingeldafd0322014-12-02 16:53:21 +01001356 if (!kvm_is_ucontrol(vcpu->kvm))
1357 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001358}
1359
Tony Krowiak5102ee82014-06-27 14:46:01 -04001360static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1361{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001362 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001363 return;
1364
Tony Krowiaka374e892014-09-03 10:13:53 +02001365 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1366
1367 if (vcpu->kvm->arch.crypto.aes_kw)
1368 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1369 if (vcpu->kvm->arch.crypto.dea_kw)
1370 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1371
Tony Krowiak5102ee82014-06-27 14:46:01 -04001372 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1373}
1374
Dominik Dingelb31605c2014-03-25 13:47:11 +01001375void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1376{
1377 free_page(vcpu->arch.sie_block->cbrlo);
1378 vcpu->arch.sie_block->cbrlo = 0;
1379}
1380
1381int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1382{
1383 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1384 if (!vcpu->arch.sie_block->cbrlo)
1385 return -ENOMEM;
1386
1387 vcpu->arch.sie_block->ecb2 |= 0x80;
1388 vcpu->arch.sie_block->ecb2 &= ~0x08;
1389 return 0;
1390}
1391
Michael Mueller91520f12015-02-27 14:32:11 +01001392static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1393{
1394 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1395
1396 vcpu->arch.cpu_id = model->cpu_id;
1397 vcpu->arch.sie_block->ibc = model->ibc;
1398 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1399}
1400
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001401int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1402{
Dominik Dingelb31605c2014-03-25 13:47:11 +01001403 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001404
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001405 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1406 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001407 CPUSTAT_STOPPED);
1408
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001409 if (test_kvm_facility(vcpu->kvm, 78))
1410 atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1411 else if (test_kvm_facility(vcpu->kvm, 8))
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001412 atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1413
Michael Mueller91520f12015-02-27 14:32:11 +01001414 kvm_s390_vcpu_setup_model(vcpu);
1415
Christian Borntraegerfc345312010-06-17 23:16:20 +02001416 vcpu->arch.sie_block->ecb = 6;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001417 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001418 vcpu->arch.sie_block->ecb |= 0x10;
1419
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +02001420 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001421 vcpu->arch.sie_block->eca = 0xC1002000U;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001422 if (sclp.has_siif)
Heiko Carstens217a4402013-12-30 12:54:14 +01001423 vcpu->arch.sie_block->eca |= 1;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001424 if (sclp.has_sigpif)
David Hildenbrandea5f4962014-10-14 15:29:30 +02001425 vcpu->arch.sie_block->eca |= 0x10000000U;
Michael Mueller18280d82015-03-16 16:05:41 +01001426 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman13211ea2014-04-30 13:39:46 -04001427 vcpu->arch.sie_block->eca |= 0x00020000;
1428 vcpu->arch.sie_block->ecd |= 0x20000000;
1429 }
Thomas Huth492d8642015-02-10 16:11:01 +01001430 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05001431
Dominik Dingele6db1d62015-05-07 15:41:57 +02001432 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01001433 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1434 if (rc)
1435 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001436 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01001437 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02001438 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001439
Tony Krowiak5102ee82014-06-27 14:46:01 -04001440 kvm_s390_vcpu_crypto_setup(vcpu);
1441
Dominik Dingelb31605c2014-03-25 13:47:11 +01001442 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001443}
1444
1445struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1446 unsigned int id)
1447{
Carsten Otte4d475552011-10-18 12:27:12 +02001448 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001449 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02001450 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001451
Carsten Otte4d475552011-10-18 12:27:12 +02001452 if (id >= KVM_MAX_VCPUS)
1453 goto out;
1454
1455 rc = -ENOMEM;
1456
Michael Muellerb110fea2013-06-12 13:54:54 +02001457 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001458 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02001459 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001460
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001461 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1462 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001463 goto out_free_cpu;
1464
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001465 vcpu->arch.sie_block = &sie_page->sie_block;
1466 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
Eric Farman68c55752014-06-09 10:57:26 -04001467 vcpu->arch.host_vregs = &sie_page->vregs;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001468
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001469 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +01001470 if (!kvm_is_ucontrol(kvm)) {
1471 if (!kvm->arch.sca) {
1472 WARN_ON_ONCE(1);
1473 goto out_free_cpu;
1474 }
1475 if (!kvm->arch.sca->cpu[id].sda)
1476 kvm->arch.sca->cpu[id].sda =
1477 (__u64) vcpu->arch.sie_block;
1478 vcpu->arch.sie_block->scaoh =
1479 (__u32)(((__u64)kvm->arch.sca) >> 32);
1480 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1481 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1482 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001483
Carsten Otteba5c1e92008-03-25 18:47:26 +01001484 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001485 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02001486 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01001487 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001488
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001489 rc = kvm_vcpu_init(vcpu, kvm, id);
1490 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001491 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001492 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1493 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001494 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001495
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001496 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001497out_free_sie_block:
1498 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001499out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001500 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001501out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001502 return ERR_PTR(rc);
1503}
1504
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001505int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1506{
David Hildenbrand9a022062014-08-05 17:40:47 +02001507 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001508}
1509
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001510void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001511{
1512 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001513 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001514}
1515
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001516void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001517{
1518 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1519}
1520
Christian Borntraeger8e236542015-04-09 13:49:04 +02001521static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1522{
1523 atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001524 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001525}
1526
1527static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1528{
1529 atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1530}
1531
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001532/*
1533 * Kick a guest cpu out of SIE and wait until SIE is not running.
1534 * If the CPU is not running (e.g. waiting as idle) the function will
1535 * return immediately. */
1536void exit_sie(struct kvm_vcpu *vcpu)
1537{
1538 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1539 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1540 cpu_relax();
1541}
1542
Christian Borntraeger8e236542015-04-09 13:49:04 +02001543/* Kick a guest cpu out of SIE to process a request synchronously */
1544void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001545{
Christian Borntraeger8e236542015-04-09 13:49:04 +02001546 kvm_make_request(req, vcpu);
1547 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001548}
1549
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001550static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1551{
1552 int i;
1553 struct kvm *kvm = gmap->private;
1554 struct kvm_vcpu *vcpu;
1555
1556 kvm_for_each_vcpu(i, vcpu, kvm) {
1557 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +02001558 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001559 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001560 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001561 }
1562 }
1563}
1564
Christoffer Dallb6d33832012-03-08 16:44:24 -05001565int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1566{
1567 /* kvm common code refers to this, but never calls it */
1568 BUG();
1569 return 0;
1570}
1571
Carsten Otte14eebd92012-05-15 14:15:26 +02001572static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1573 struct kvm_one_reg *reg)
1574{
1575 int r = -EINVAL;
1576
1577 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001578 case KVM_REG_S390_TODPR:
1579 r = put_user(vcpu->arch.sie_block->todpr,
1580 (u32 __user *)reg->addr);
1581 break;
1582 case KVM_REG_S390_EPOCHDIFF:
1583 r = put_user(vcpu->arch.sie_block->epoch,
1584 (u64 __user *)reg->addr);
1585 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001586 case KVM_REG_S390_CPU_TIMER:
1587 r = put_user(vcpu->arch.sie_block->cputm,
1588 (u64 __user *)reg->addr);
1589 break;
1590 case KVM_REG_S390_CLOCK_COMP:
1591 r = put_user(vcpu->arch.sie_block->ckc,
1592 (u64 __user *)reg->addr);
1593 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001594 case KVM_REG_S390_PFTOKEN:
1595 r = put_user(vcpu->arch.pfault_token,
1596 (u64 __user *)reg->addr);
1597 break;
1598 case KVM_REG_S390_PFCOMPARE:
1599 r = put_user(vcpu->arch.pfault_compare,
1600 (u64 __user *)reg->addr);
1601 break;
1602 case KVM_REG_S390_PFSELECT:
1603 r = put_user(vcpu->arch.pfault_select,
1604 (u64 __user *)reg->addr);
1605 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001606 case KVM_REG_S390_PP:
1607 r = put_user(vcpu->arch.sie_block->pp,
1608 (u64 __user *)reg->addr);
1609 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001610 case KVM_REG_S390_GBEA:
1611 r = put_user(vcpu->arch.sie_block->gbea,
1612 (u64 __user *)reg->addr);
1613 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001614 default:
1615 break;
1616 }
1617
1618 return r;
1619}
1620
1621static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1622 struct kvm_one_reg *reg)
1623{
1624 int r = -EINVAL;
1625
1626 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001627 case KVM_REG_S390_TODPR:
1628 r = get_user(vcpu->arch.sie_block->todpr,
1629 (u32 __user *)reg->addr);
1630 break;
1631 case KVM_REG_S390_EPOCHDIFF:
1632 r = get_user(vcpu->arch.sie_block->epoch,
1633 (u64 __user *)reg->addr);
1634 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001635 case KVM_REG_S390_CPU_TIMER:
1636 r = get_user(vcpu->arch.sie_block->cputm,
1637 (u64 __user *)reg->addr);
1638 break;
1639 case KVM_REG_S390_CLOCK_COMP:
1640 r = get_user(vcpu->arch.sie_block->ckc,
1641 (u64 __user *)reg->addr);
1642 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001643 case KVM_REG_S390_PFTOKEN:
1644 r = get_user(vcpu->arch.pfault_token,
1645 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001646 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1647 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02001648 break;
1649 case KVM_REG_S390_PFCOMPARE:
1650 r = get_user(vcpu->arch.pfault_compare,
1651 (u64 __user *)reg->addr);
1652 break;
1653 case KVM_REG_S390_PFSELECT:
1654 r = get_user(vcpu->arch.pfault_select,
1655 (u64 __user *)reg->addr);
1656 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001657 case KVM_REG_S390_PP:
1658 r = get_user(vcpu->arch.sie_block->pp,
1659 (u64 __user *)reg->addr);
1660 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001661 case KVM_REG_S390_GBEA:
1662 r = get_user(vcpu->arch.sie_block->gbea,
1663 (u64 __user *)reg->addr);
1664 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001665 default:
1666 break;
1667 }
1668
1669 return r;
1670}
Christoffer Dallb6d33832012-03-08 16:44:24 -05001671
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001672static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1673{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001674 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001675 return 0;
1676}
1677
1678int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1679{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001680 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001681 return 0;
1682}
1683
1684int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1685{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001686 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001687 return 0;
1688}
1689
1690int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1691 struct kvm_sregs *sregs)
1692{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001693 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001694 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01001695 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001696 return 0;
1697}
1698
1699int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1700 struct kvm_sregs *sregs)
1701{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001702 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001703 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001704 return 0;
1705}
1706
1707int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1708{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001709 if (test_fp_ctl(fpu->fpc))
1710 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001711 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001712 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1713 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1714 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001715 return 0;
1716}
1717
1718int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1719{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001720 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1721 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001722 return 0;
1723}
1724
1725static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1726{
1727 int rc = 0;
1728
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02001729 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001730 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001731 else {
1732 vcpu->run->psw_mask = psw.mask;
1733 vcpu->run->psw_addr = psw.addr;
1734 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001735 return rc;
1736}
1737
1738int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1739 struct kvm_translation *tr)
1740{
1741 return -EINVAL; /* not implemented yet */
1742}
1743
David Hildenbrand27291e22014-01-23 12:26:52 +01001744#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1745 KVM_GUESTDBG_USE_HW_BP | \
1746 KVM_GUESTDBG_ENABLE)
1747
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001748int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1749 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001750{
David Hildenbrand27291e22014-01-23 12:26:52 +01001751 int rc = 0;
1752
1753 vcpu->guest_debug = 0;
1754 kvm_s390_clear_bp_data(vcpu);
1755
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02001756 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01001757 return -EINVAL;
1758
1759 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1760 vcpu->guest_debug = dbg->control;
1761 /* enforce guest PER */
1762 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1763
1764 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1765 rc = kvm_s390_import_bp_data(vcpu, dbg);
1766 } else {
1767 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1768 vcpu->arch.guestdbg.last_bp = 0;
1769 }
1770
1771 if (rc) {
1772 vcpu->guest_debug = 0;
1773 kvm_s390_clear_bp_data(vcpu);
1774 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1775 }
1776
1777 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001778}
1779
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001780int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1781 struct kvm_mp_state *mp_state)
1782{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001783 /* CHECK_STOP and LOAD are not supported yet */
1784 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1785 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001786}
1787
1788int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1789 struct kvm_mp_state *mp_state)
1790{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001791 int rc = 0;
1792
1793 /* user space knows about this interface - let it control the state */
1794 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1795
1796 switch (mp_state->mp_state) {
1797 case KVM_MP_STATE_STOPPED:
1798 kvm_s390_vcpu_stop(vcpu);
1799 break;
1800 case KVM_MP_STATE_OPERATING:
1801 kvm_s390_vcpu_start(vcpu);
1802 break;
1803 case KVM_MP_STATE_LOAD:
1804 case KVM_MP_STATE_CHECK_STOP:
1805 /* fall through - CHECK_STOP and LOAD are not supported yet */
1806 default:
1807 rc = -ENXIO;
1808 }
1809
1810 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001811}
1812
David Hildenbrand8ad35752014-03-14 11:00:21 +01001813static bool ibs_enabled(struct kvm_vcpu *vcpu)
1814{
1815 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1816}
1817
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001818static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1819{
Christian Borntraeger785dbef2015-04-16 16:58:22 +02001820 if (!vcpu->requests)
1821 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001822retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02001823 kvm_s390_vcpu_request_handled(vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001824 /*
1825 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1826 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1827 * This ensures that the ipte instruction for this request has
1828 * already finished. We might race against a second unmapper that
1829 * wants to set the blocking bit. Lets just retry the request loop.
1830 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001831 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001832 int rc;
1833 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001834 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001835 PAGE_SIZE * 2);
1836 if (rc)
1837 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001838 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001839 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001840
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001841 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1842 vcpu->arch.sie_block->ihcpu = 0xffff;
1843 goto retry;
1844 }
1845
David Hildenbrand8ad35752014-03-14 11:00:21 +01001846 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1847 if (!ibs_enabled(vcpu)) {
1848 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1849 atomic_set_mask(CPUSTAT_IBS,
1850 &vcpu->arch.sie_block->cpuflags);
1851 }
1852 goto retry;
1853 }
1854
1855 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1856 if (ibs_enabled(vcpu)) {
1857 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1858 atomic_clear_mask(CPUSTAT_IBS,
1859 &vcpu->arch.sie_block->cpuflags);
1860 }
1861 goto retry;
1862 }
1863
David Hildenbrand0759d062014-05-13 16:54:32 +02001864 /* nothing to do, just clear the request */
1865 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1866
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001867 return 0;
1868}
1869
Thomas Huthfa576c52014-05-06 17:20:16 +02001870/**
1871 * kvm_arch_fault_in_page - fault-in guest page if necessary
1872 * @vcpu: The corresponding virtual cpu
1873 * @gpa: Guest physical address
1874 * @writable: Whether the page should be writable or not
1875 *
1876 * Make sure that a guest page has been faulted-in on the host.
1877 *
1878 * Return: Zero on success, negative error code otherwise.
1879 */
1880long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001881{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001882 return gmap_fault(vcpu->arch.gmap, gpa,
1883 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001884}
1885
Dominik Dingel3c038e62013-10-07 17:11:48 +02001886static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1887 unsigned long token)
1888{
1889 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02001890 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001891
1892 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02001893 irq.u.ext.ext_params2 = token;
1894 irq.type = KVM_S390_INT_PFAULT_INIT;
1895 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02001896 } else {
1897 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02001898 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001899 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1900 }
1901}
1902
1903void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1904 struct kvm_async_pf *work)
1905{
1906 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1907 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1908}
1909
1910void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1911 struct kvm_async_pf *work)
1912{
1913 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1914 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1915}
1916
1917void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1918 struct kvm_async_pf *work)
1919{
1920 /* s390 will always inject the page directly */
1921}
1922
1923bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1924{
1925 /*
1926 * s390 will always inject the page directly,
1927 * but we still want check_async_completion to cleanup
1928 */
1929 return true;
1930}
1931
1932static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1933{
1934 hva_t hva;
1935 struct kvm_arch_async_pf arch;
1936 int rc;
1937
1938 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1939 return 0;
1940 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1941 vcpu->arch.pfault_compare)
1942 return 0;
1943 if (psw_extint_disabled(vcpu))
1944 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02001945 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001946 return 0;
1947 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1948 return 0;
1949 if (!vcpu->arch.gmap->pfault_enabled)
1950 return 0;
1951
Heiko Carstens81480cc2014-01-01 16:36:07 +01001952 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1953 hva += current->thread.gmap_addr & ~PAGE_MASK;
1954 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001955 return 0;
1956
1957 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1958 return rc;
1959}
1960
Thomas Huth3fb4c402013-09-12 10:33:43 +02001961static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001962{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001963 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001964
Dominik Dingel3c038e62013-10-07 17:11:48 +02001965 /*
1966 * On s390 notifications for arriving pages will be delivered directly
1967 * to the guest but the house keeping for completed pfaults is
1968 * handled outside the worker.
1969 */
1970 kvm_check_async_pf_completion(vcpu);
1971
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001972 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001973
1974 if (need_resched())
1975 schedule();
1976
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02001977 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02001978 s390_handle_mcck();
1979
Jens Freimann79395032014-04-17 10:10:30 +02001980 if (!kvm_is_ucontrol(vcpu->kvm)) {
1981 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1982 if (rc)
1983 return rc;
1984 }
Carsten Otte0ff31862008-05-21 13:37:37 +02001985
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001986 rc = kvm_s390_handle_requests(vcpu);
1987 if (rc)
1988 return rc;
1989
David Hildenbrand27291e22014-01-23 12:26:52 +01001990 if (guestdbg_enabled(vcpu)) {
1991 kvm_s390_backup_guest_per_regs(vcpu);
1992 kvm_s390_patch_guest_per_regs(vcpu);
1993 }
1994
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001995 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001996 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1997 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1998 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001999
Thomas Huth3fb4c402013-09-12 10:33:43 +02002000 return 0;
2001}
2002
Thomas Huth492d8642015-02-10 16:11:01 +01002003static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2004{
2005 psw_t *psw = &vcpu->arch.sie_block->gpsw;
2006 u8 opcode;
2007 int rc;
2008
2009 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2010 trace_kvm_s390_sie_fault(vcpu);
2011
2012 /*
2013 * We want to inject an addressing exception, which is defined as a
2014 * suppressing or terminating exception. However, since we came here
2015 * by a DAT access exception, the PSW still points to the faulting
2016 * instruction since DAT exceptions are nullifying. So we've got
2017 * to look up the current opcode to get the length of the instruction
2018 * to be able to forward the PSW.
2019 */
Alexander Yarygin8ae04b82015-01-19 13:24:51 +03002020 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
Thomas Huth492d8642015-02-10 16:11:01 +01002021 if (rc)
2022 return kvm_s390_inject_prog_cond(vcpu, rc);
2023 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
2024
2025 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
2026}
2027
Thomas Huth3fb4c402013-09-12 10:33:43 +02002028static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2029{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002030 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002031
2032 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2033 vcpu->arch.sie_block->icptcode);
2034 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2035
David Hildenbrand27291e22014-01-23 12:26:52 +01002036 if (guestdbg_enabled(vcpu))
2037 kvm_s390_restore_guest_per_regs(vcpu);
2038
Thomas Huth3fb4c402013-09-12 10:33:43 +02002039 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02002040 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02002041 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2042 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2043 vcpu->run->s390_ucontrol.trans_exc_code =
2044 current->thread.gmap_addr;
2045 vcpu->run->s390_ucontrol.pgm_code = 0x10;
2046 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002047
2048 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02002049 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002050 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02002051 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002052 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02002053 } else {
2054 gpa_t gpa = current->thread.gmap_addr;
2055 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
2056 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002057 }
2058
Thomas Huth492d8642015-02-10 16:11:01 +01002059 if (rc == -1)
2060 rc = vcpu_post_run_fault_in_sie(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002061
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002062 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002063
Thomas Hutha76ccff2013-09-12 10:33:44 +02002064 if (rc == 0) {
2065 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01002066 /* Don't exit for host interrupts. */
2067 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02002068 else
2069 rc = kvm_handle_sie_intercept(vcpu);
2070 }
2071
Thomas Huth3fb4c402013-09-12 10:33:43 +02002072 return rc;
2073}
2074
2075static int __vcpu_run(struct kvm_vcpu *vcpu)
2076{
2077 int rc, exit_reason;
2078
Thomas Huth800c1062013-09-12 10:33:45 +02002079 /*
2080 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2081 * ning the guest), so that memslots (and other stuff) are protected
2082 */
2083 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2084
Thomas Hutha76ccff2013-09-12 10:33:44 +02002085 do {
2086 rc = vcpu_pre_run(vcpu);
2087 if (rc)
2088 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002089
Thomas Huth800c1062013-09-12 10:33:45 +02002090 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02002091 /*
2092 * As PF_VCPU will be used in fault handler, between
2093 * guest_enter and guest_exit should be no uaccess.
2094 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02002095 local_irq_disable();
2096 __kvm_guest_enter();
2097 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002098 exit_reason = sie64a(vcpu->arch.sie_block,
2099 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002100 local_irq_disable();
2101 __kvm_guest_exit();
2102 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02002103 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002104
Thomas Hutha76ccff2013-09-12 10:33:44 +02002105 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01002106 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002107
Thomas Huth800c1062013-09-12 10:33:45 +02002108 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01002109 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002110}
2111
David Hildenbrandb028ee32014-07-17 10:47:43 +02002112static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2113{
2114 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2115 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2116 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2117 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2118 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2119 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002120 /* some control register changes require a tlb flush */
2121 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002122 }
2123 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2124 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2125 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2126 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2127 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2128 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2129 }
2130 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2131 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2132 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2133 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002134 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2135 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002136 }
2137 kvm_run->kvm_dirty_regs = 0;
2138}
2139
2140static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2141{
2142 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2143 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2144 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2145 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2146 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2147 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2148 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2149 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2150 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2151 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2152 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2153 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2154}
2155
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002156int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2157{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002158 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002159 sigset_t sigsaved;
2160
David Hildenbrand27291e22014-01-23 12:26:52 +01002161 if (guestdbg_exit_pending(vcpu)) {
2162 kvm_s390_prepare_debug_exit(vcpu);
2163 return 0;
2164 }
2165
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002166 if (vcpu->sigset_active)
2167 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2168
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002169 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2170 kvm_s390_vcpu_start(vcpu);
2171 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02002172 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002173 vcpu->vcpu_id);
2174 return -EINVAL;
2175 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002176
David Hildenbrandb028ee32014-07-17 10:47:43 +02002177 sync_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002178
Heiko Carstensdab4079d2009-06-12 10:26:32 +02002179 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002180 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02002181
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002182 if (signal_pending(current) && !rc) {
2183 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002184 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002185 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002186
David Hildenbrand27291e22014-01-23 12:26:52 +01002187 if (guestdbg_exit_pending(vcpu) && !rc) {
2188 kvm_s390_prepare_debug_exit(vcpu);
2189 rc = 0;
2190 }
2191
Heiko Carstensb8e660b2010-02-26 22:37:41 +01002192 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002193 /* intercept cannot be handled in-kernel, prepare kvm-run */
2194 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
2195 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002196 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2197 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2198 rc = 0;
2199 }
2200
2201 if (rc == -EREMOTE) {
2202 /* intercept was handled, but userspace support is needed
2203 * kvm_run has been prepared by the handler */
2204 rc = 0;
2205 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002206
David Hildenbrandb028ee32014-07-17 10:47:43 +02002207 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002208
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002209 if (vcpu->sigset_active)
2210 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2211
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002212 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02002213 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002214}
2215
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002216/*
2217 * store status at address
2218 * we use have two special cases:
2219 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2220 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2221 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01002222int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002223{
Carsten Otte092670c2011-07-24 10:48:22 +02002224 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02002225 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01002226 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002227 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002228
Heiko Carstensd0bce602014-01-01 16:45:58 +01002229 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2230 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002231 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002232 gpa = SAVE_AREA_BASE;
2233 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2234 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002235 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002236 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2237 }
2238 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2239 vcpu->arch.guest_fpregs.fprs, 128);
2240 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2241 vcpu->run->s.regs.gprs, 128);
2242 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2243 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02002244 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002245 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02002246 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002247 rc |= write_guest_abs(vcpu,
2248 gpa + offsetof(struct save_area, fp_ctrl_reg),
2249 &vcpu->arch.guest_fpregs.fpc, 4);
2250 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2251 &vcpu->arch.sie_block->todpr, 4);
2252 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2253 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01002254 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002255 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2256 &clkcomp, 8);
2257 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2258 &vcpu->run->s.regs.acrs, 64);
2259 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2260 &vcpu->arch.sie_block->gcr, 128);
2261 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002262}
2263
Thomas Huthe8798922013-11-06 15:46:33 +01002264int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2265{
2266 /*
2267 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2268 * copying in vcpu load/put. Lets update our copies before we save
2269 * it into the save area
2270 */
2271 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2272 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2273 save_access_regs(vcpu->run->s.regs.acrs);
2274
2275 return kvm_s390_store_status_unloaded(vcpu, addr);
2276}
2277
Eric Farmanbc17de72014-04-14 16:01:09 -04002278/*
2279 * store additional status at address
2280 */
2281int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2282 unsigned long gpa)
2283{
2284 /* Only bits 0-53 are used for address formation */
2285 if (!(gpa & ~0x3ff))
2286 return 0;
2287
2288 return write_guest_abs(vcpu, gpa & ~0x3ff,
2289 (void *)&vcpu->run->s.regs.vrs, 512);
2290}
2291
2292int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2293{
2294 if (!test_kvm_facility(vcpu->kvm, 129))
2295 return 0;
2296
2297 /*
2298 * The guest VXRS are in the host VXRs due to the lazy
2299 * copying in vcpu load/put. Let's update our copies before we save
2300 * it into the save area.
2301 */
2302 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2303
2304 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2305}
2306
David Hildenbrand8ad35752014-03-14 11:00:21 +01002307static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2308{
2309 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002310 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002311}
2312
2313static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2314{
2315 unsigned int i;
2316 struct kvm_vcpu *vcpu;
2317
2318 kvm_for_each_vcpu(i, vcpu, kvm) {
2319 __disable_ibs_on_vcpu(vcpu);
2320 }
2321}
2322
2323static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2324{
2325 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002326 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002327}
2328
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002329void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2330{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002331 int i, online_vcpus, started_vcpus = 0;
2332
2333 if (!is_vcpu_stopped(vcpu))
2334 return;
2335
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002336 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002337 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002338 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002339 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2340
2341 for (i = 0; i < online_vcpus; i++) {
2342 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2343 started_vcpus++;
2344 }
2345
2346 if (started_vcpus == 0) {
2347 /* we're the only active VCPU -> speed it up */
2348 __enable_ibs_on_vcpu(vcpu);
2349 } else if (started_vcpus == 1) {
2350 /*
2351 * As we are starting a second VCPU, we have to disable
2352 * the IBS facility on all VCPUs to remove potentially
2353 * oustanding ENABLE requests.
2354 */
2355 __disable_ibs_on_all_vcpus(vcpu->kvm);
2356 }
2357
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002358 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002359 /*
2360 * Another VCPU might have used IBS while we were offline.
2361 * Let's play safe and flush the VCPU at startup.
2362 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002363 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002364 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002365 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002366}
2367
2368void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2369{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002370 int i, online_vcpus, started_vcpus = 0;
2371 struct kvm_vcpu *started_vcpu = NULL;
2372
2373 if (is_vcpu_stopped(vcpu))
2374 return;
2375
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002376 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002377 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002378 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002379 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2380
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002381 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02002382 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002383
David Hildenbrand6cddd432014-10-15 16:48:53 +02002384 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002385 __disable_ibs_on_vcpu(vcpu);
2386
2387 for (i = 0; i < online_vcpus; i++) {
2388 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2389 started_vcpus++;
2390 started_vcpu = vcpu->kvm->vcpus[i];
2391 }
2392 }
2393
2394 if (started_vcpus == 1) {
2395 /*
2396 * As we only have one VCPU left, we want to enable the
2397 * IBS facility for that VCPU to speed it up.
2398 */
2399 __enable_ibs_on_vcpu(started_vcpu);
2400 }
2401
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002402 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002403 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002404}
2405
Cornelia Huckd6712df2012-12-20 15:32:11 +01002406static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2407 struct kvm_enable_cap *cap)
2408{
2409 int r;
2410
2411 if (cap->flags)
2412 return -EINVAL;
2413
2414 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002415 case KVM_CAP_S390_CSS_SUPPORT:
2416 if (!vcpu->kvm->arch.css_support) {
2417 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02002418 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002419 trace_kvm_s390_enable_css(vcpu->kvm);
2420 }
2421 r = 0;
2422 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01002423 default:
2424 r = -EINVAL;
2425 break;
2426 }
2427 return r;
2428}
2429
Thomas Huth41408c282015-02-06 15:01:21 +01002430static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2431 struct kvm_s390_mem_op *mop)
2432{
2433 void __user *uaddr = (void __user *)mop->buf;
2434 void *tmpbuf = NULL;
2435 int r, srcu_idx;
2436 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2437 | KVM_S390_MEMOP_F_CHECK_ONLY;
2438
2439 if (mop->flags & ~supported_flags)
2440 return -EINVAL;
2441
2442 if (mop->size > MEM_OP_MAX_SIZE)
2443 return -E2BIG;
2444
2445 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2446 tmpbuf = vmalloc(mop->size);
2447 if (!tmpbuf)
2448 return -ENOMEM;
2449 }
2450
2451 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2452
2453 switch (mop->op) {
2454 case KVM_S390_MEMOP_LOGICAL_READ:
2455 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2456 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2457 break;
2458 }
2459 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2460 if (r == 0) {
2461 if (copy_to_user(uaddr, tmpbuf, mop->size))
2462 r = -EFAULT;
2463 }
2464 break;
2465 case KVM_S390_MEMOP_LOGICAL_WRITE:
2466 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2467 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2468 break;
2469 }
2470 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2471 r = -EFAULT;
2472 break;
2473 }
2474 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2475 break;
2476 default:
2477 r = -EINVAL;
2478 }
2479
2480 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2481
2482 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2483 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2484
2485 vfree(tmpbuf);
2486 return r;
2487}
2488
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002489long kvm_arch_vcpu_ioctl(struct file *filp,
2490 unsigned int ioctl, unsigned long arg)
2491{
2492 struct kvm_vcpu *vcpu = filp->private_data;
2493 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02002494 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03002495 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002496
Avi Kivity93736622010-05-13 12:35:17 +03002497 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01002498 case KVM_S390_IRQ: {
2499 struct kvm_s390_irq s390irq;
2500
2501 r = -EFAULT;
2502 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2503 break;
2504 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2505 break;
2506 }
Avi Kivity93736622010-05-13 12:35:17 +03002507 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002508 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02002509 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002510
Avi Kivity93736622010-05-13 12:35:17 +03002511 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002512 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03002513 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02002514 if (s390int_to_s390irq(&s390int, &s390irq))
2515 return -EINVAL;
2516 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity93736622010-05-13 12:35:17 +03002517 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002518 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002519 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02002520 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002521 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02002522 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002523 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002524 case KVM_S390_SET_INITIAL_PSW: {
2525 psw_t psw;
2526
Avi Kivitybc923cc2010-05-13 12:21:46 +03002527 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002528 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03002529 break;
2530 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2531 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002532 }
2533 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03002534 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2535 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002536 case KVM_SET_ONE_REG:
2537 case KVM_GET_ONE_REG: {
2538 struct kvm_one_reg reg;
2539 r = -EFAULT;
2540 if (copy_from_user(&reg, argp, sizeof(reg)))
2541 break;
2542 if (ioctl == KVM_SET_ONE_REG)
2543 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2544 else
2545 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2546 break;
2547 }
Carsten Otte27e03932012-01-04 10:25:21 +01002548#ifdef CONFIG_KVM_S390_UCONTROL
2549 case KVM_S390_UCAS_MAP: {
2550 struct kvm_s390_ucas_mapping ucasmap;
2551
2552 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2553 r = -EFAULT;
2554 break;
2555 }
2556
2557 if (!kvm_is_ucontrol(vcpu->kvm)) {
2558 r = -EINVAL;
2559 break;
2560 }
2561
2562 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2563 ucasmap.vcpu_addr, ucasmap.length);
2564 break;
2565 }
2566 case KVM_S390_UCAS_UNMAP: {
2567 struct kvm_s390_ucas_mapping ucasmap;
2568
2569 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2570 r = -EFAULT;
2571 break;
2572 }
2573
2574 if (!kvm_is_ucontrol(vcpu->kvm)) {
2575 r = -EINVAL;
2576 break;
2577 }
2578
2579 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2580 ucasmap.length);
2581 break;
2582 }
2583#endif
Carsten Otteccc79102012-01-04 10:25:26 +01002584 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002585 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01002586 break;
2587 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01002588 case KVM_ENABLE_CAP:
2589 {
2590 struct kvm_enable_cap cap;
2591 r = -EFAULT;
2592 if (copy_from_user(&cap, argp, sizeof(cap)))
2593 break;
2594 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2595 break;
2596 }
Thomas Huth41408c282015-02-06 15:01:21 +01002597 case KVM_S390_MEM_OP: {
2598 struct kvm_s390_mem_op mem_op;
2599
2600 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2601 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2602 else
2603 r = -EFAULT;
2604 break;
2605 }
Jens Freimann816c7662014-11-24 17:13:46 +01002606 case KVM_S390_SET_IRQ_STATE: {
2607 struct kvm_s390_irq_state irq_state;
2608
2609 r = -EFAULT;
2610 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2611 break;
2612 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2613 irq_state.len == 0 ||
2614 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2615 r = -EINVAL;
2616 break;
2617 }
2618 r = kvm_s390_set_irq_state(vcpu,
2619 (void __user *) irq_state.buf,
2620 irq_state.len);
2621 break;
2622 }
2623 case KVM_S390_GET_IRQ_STATE: {
2624 struct kvm_s390_irq_state irq_state;
2625
2626 r = -EFAULT;
2627 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2628 break;
2629 if (irq_state.len == 0) {
2630 r = -EINVAL;
2631 break;
2632 }
2633 r = kvm_s390_get_irq_state(vcpu,
2634 (__u8 __user *) irq_state.buf,
2635 irq_state.len);
2636 break;
2637 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002638 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01002639 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002640 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03002641 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002642}
2643
Carsten Otte5b1c1492012-01-04 10:25:23 +01002644int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2645{
2646#ifdef CONFIG_KVM_S390_UCONTROL
2647 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2648 && (kvm_is_ucontrol(vcpu->kvm))) {
2649 vmf->page = virt_to_page(vcpu->arch.sie_block);
2650 get_page(vmf->page);
2651 return 0;
2652 }
2653#endif
2654 return VM_FAULT_SIGBUS;
2655}
2656
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05302657int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2658 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09002659{
2660 return 0;
2661}
2662
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002663/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002664int kvm_arch_prepare_memory_region(struct kvm *kvm,
2665 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02002666 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09002667 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002668{
Nick Wangdd2887e2013-03-25 17:22:57 +01002669 /* A few sanity checks. We can have memory slots which have to be
2670 located/ended at a segment boundary (1MB). The memory in userland is
2671 ok to be fragmented into various different vmas. It is okay to mmap()
2672 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002673
Carsten Otte598841c2011-07-24 10:48:21 +02002674 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002675 return -EINVAL;
2676
Carsten Otte598841c2011-07-24 10:48:21 +02002677 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002678 return -EINVAL;
2679
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002680 return 0;
2681}
2682
2683void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02002684 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09002685 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02002686 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09002687 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002688{
Carsten Ottef7850c92011-07-24 10:48:23 +02002689 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002690
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01002691 /* If the basics of the memslot do not change, we do not want
2692 * to update the gmap. Every update causes several unnecessary
2693 * segment translation exceptions. This is usually handled just
2694 * fine by the normal fault handler + gmap, but it will also
2695 * cause faults on the prefix page of running guest CPUs.
2696 */
2697 if (old->userspace_addr == mem->userspace_addr &&
2698 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2699 old->npages * PAGE_SIZE == mem->memory_size)
2700 return;
Carsten Otte598841c2011-07-24 10:48:21 +02002701
2702 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2703 mem->guest_phys_addr, mem->memory_size);
2704 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02002705 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02002706 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002707}
2708
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002709static int __init kvm_s390_init(void)
2710{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002711 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002712}
2713
2714static void __exit kvm_s390_exit(void)
2715{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002716 kvm_exit();
2717}
2718
2719module_init(kvm_s390_init);
2720module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02002721
2722/*
2723 * Enable autoloading of the kvm module.
2724 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2725 * since x86 takes a different approach.
2726 */
2727#include <linux/miscdevice.h>
2728MODULE_ALIAS_MISCDEV(KVM_MINOR);
2729MODULE_ALIAS("devname:kvm");