blob: 67f1b6b4c060a815d6c1556e309a2904e656dd0c [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020025#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010027#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010028#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010029#include <linux/bitmap.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010030#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010031#include <asm/lowcore.h>
Fan Zhangfdf03652015-05-13 10:58:41 +020032#include <asm/etr.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010033#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010034#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010035#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010036#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020037#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020038#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020039#include <asm/cpacf.h>
40#include <asm/etr.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010041#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010042#include "gaccess.h"
43
David Hildenbrandea2cdd22015-05-20 13:24:02 +020044#define KMSG_COMPONENT "kvm-s390"
45#undef pr_fmt
46#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
47
Cornelia Huck5786fff2012-07-23 17:20:29 +020048#define CREATE_TRACE_POINTS
49#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020050#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020051
Thomas Huth41408c282015-02-06 15:01:21 +010052#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010053#define LOCAL_IRQS 32
54#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
55 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010056
Heiko Carstensb0c632d2008-03-25 18:47:20 +010057#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
58
59struct kvm_stats_debugfs_item debugfs_entries[] = {
60 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020061 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010062 { "exit_validity", VCPU_STAT(exit_validity) },
63 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
64 { "exit_external_request", VCPU_STAT(exit_external_request) },
65 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010066 { "exit_instruction", VCPU_STAT(exit_instruction) },
67 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
68 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020069 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010070 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020071 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020072 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020073 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020074 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010075 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010076 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
77 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010078 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020079 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010080 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
81 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
82 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
83 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
84 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
85 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
86 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020087 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010088 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
89 { "instruction_spx", VCPU_STAT(instruction_spx) },
90 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
91 { "instruction_stap", VCPU_STAT(instruction_stap) },
92 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010093 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010094 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
95 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020096 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010097 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
98 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020099 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200100 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100101 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100102 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200103 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100104 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200105 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
106 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100107 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200108 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
109 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500110 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100111 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
112 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
113 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200114 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
115 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
116 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100117 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100118 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200119 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200120 { "diagnose_258", VCPU_STAT(diagnose_258) },
121 { "diagnose_308", VCPU_STAT(diagnose_308) },
122 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100123 { NULL }
124};
125
Michael Mueller9d8d5782015-02-02 15:42:51 +0100126/* upper facilities limit for kvm */
Alexander Yarygin60a37702016-04-01 15:38:57 +0300127unsigned long kvm_s390_fac_list_mask[16] = {
128 0xffe6000000000000UL,
129 0x005e000000000000UL,
Michael Mueller9d8d5782015-02-02 15:42:51 +0100130};
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100131
Michael Mueller9d8d5782015-02-02 15:42:51 +0100132unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200133{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100134 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
135 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b592013-07-26 15:04:04 +0200136}
137
David Hildenbrand15c97052015-03-19 17:36:43 +0100138/* available cpu features supported by kvm */
139static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200140/* available subfunctions indicated via query / "test bit" */
141static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100142
Michael Mueller9d8d5782015-02-02 15:42:51 +0100143static struct gmap_notifier gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200144debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100145
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100146/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200147int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100148{
149 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200150 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100151}
152
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100153static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
154 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200155
Fan Zhangfdf03652015-05-13 10:58:41 +0200156/*
157 * This callback is executed during stop_machine(). All CPUs are therefore
158 * temporarily stopped. In order not to change guest behavior, we have to
159 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
160 * so a CPU won't be stopped while calculating with the epoch.
161 */
162static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
163 void *v)
164{
165 struct kvm *kvm;
166 struct kvm_vcpu *vcpu;
167 int i;
168 unsigned long long *delta = v;
169
170 list_for_each_entry(kvm, &vm_list, vm_list) {
171 kvm->arch.epoch -= *delta;
172 kvm_for_each_vcpu(i, vcpu, kvm) {
173 vcpu->arch.sie_block->epoch -= *delta;
David Hildenbranddb0758b2016-02-15 09:42:25 +0100174 if (vcpu->arch.cputm_enabled)
175 vcpu->arch.cputm_start += *delta;
Fan Zhangfdf03652015-05-13 10:58:41 +0200176 }
177 }
178 return NOTIFY_OK;
179}
180
181static struct notifier_block kvm_clock_notifier = {
182 .notifier_call = kvm_clock_sync,
183};
184
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100185int kvm_arch_hardware_setup(void)
186{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200187 gmap_notifier.notifier_call = kvm_gmap_notifier;
188 gmap_register_ipte_notifier(&gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200189 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
190 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100191 return 0;
192}
193
194void kvm_arch_hardware_unsetup(void)
195{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200196 gmap_unregister_ipte_notifier(&gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200197 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
198 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100199}
200
David Hildenbrand22be5a12016-01-21 13:22:54 +0100201static void allow_cpu_feat(unsigned long nr)
202{
203 set_bit_inv(nr, kvm_s390_available_cpu_feat);
204}
205
David Hildenbrand0a763c72016-05-18 16:03:47 +0200206static inline int plo_test_bit(unsigned char nr)
207{
208 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
209 int cc = 3; /* subfunction not available */
210
211 asm volatile(
212 /* Parameter registers are ignored for "test bit" */
213 " plo 0,0,0,0(0)\n"
214 " ipm %0\n"
215 " srl %0,28\n"
216 : "=d" (cc)
217 : "d" (r0)
218 : "cc");
219 return cc == 0;
220}
221
David Hildenbrand22be5a12016-01-21 13:22:54 +0100222static void kvm_s390_cpu_feat_init(void)
223{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200224 int i;
225
226 for (i = 0; i < 256; ++i) {
227 if (plo_test_bit(i))
228 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
229 }
230
231 if (test_facility(28)) /* TOD-clock steering */
232 etr_ptff(kvm_s390_available_subfunc.ptff, ETR_PTFF_QAF);
233
234 if (test_facility(17)) { /* MSA */
235 __cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
236 __cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
237 __cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
238 __cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
239 __cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
240 }
241 if (test_facility(76)) /* MSA3 */
242 __cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
243 if (test_facility(77)) { /* MSA4 */
244 __cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
245 __cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
246 __cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
247 __cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
248 }
249 if (test_facility(57)) /* MSA5 */
250 __cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
251
David Hildenbrand22be5a12016-01-21 13:22:54 +0100252 if (MACHINE_HAS_ESOP)
253 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
254}
255
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100256int kvm_arch_init(void *opaque)
257{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200258 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
259 if (!kvm_s390_dbf)
260 return -ENOMEM;
261
262 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
263 debug_unregister(kvm_s390_dbf);
264 return -ENOMEM;
265 }
266
David Hildenbrand22be5a12016-01-21 13:22:54 +0100267 kvm_s390_cpu_feat_init();
268
Cornelia Huck84877d92014-09-02 10:27:35 +0100269 /* Register floating interrupt controller interface. */
270 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100271}
272
Christian Borntraeger78f26132015-07-22 15:50:58 +0200273void kvm_arch_exit(void)
274{
275 debug_unregister(kvm_s390_dbf);
276}
277
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100278/* Section: device related */
279long kvm_arch_dev_ioctl(struct file *filp,
280 unsigned int ioctl, unsigned long arg)
281{
282 if (ioctl == KVM_S390_ENABLE_SIE)
283 return s390_enable_sie();
284 return -EINVAL;
285}
286
Alexander Graf784aa3d2014-07-14 18:27:35 +0200287int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100288{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100289 int r;
290
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200291 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100292 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200293 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100294 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100295#ifdef CONFIG_KVM_S390_UCONTROL
296 case KVM_CAP_S390_UCONTROL:
297#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200298 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100299 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200300 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100301 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100302 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100303 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200304 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200305 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200306 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200307 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200308 case KVM_CAP_MP_STATE:
Jens Freimann47b43c52014-11-11 20:57:06 +0100309 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200310 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100311 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400312 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100313 case KVM_CAP_S390_IRQ_STATE:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100314 r = 1;
315 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100316 case KVM_CAP_S390_MEM_OP:
317 r = MEM_OP_MAX_SIZE;
318 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200319 case KVM_CAP_NR_VCPUS:
320 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100321 r = KVM_S390_BSCA_CPU_SLOTS;
322 if (sclp.has_esca && sclp.has_64bscao)
323 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200324 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100325 case KVM_CAP_NR_MEMSLOTS:
326 r = KVM_USER_MEM_SLOTS;
327 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200328 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100329 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200330 break;
Eric Farman68c55752014-06-09 10:57:26 -0400331 case KVM_CAP_S390_VECTOR_REGISTERS:
332 r = MACHINE_HAS_VX;
333 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800334 case KVM_CAP_S390_RI:
335 r = test_facility(64);
336 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200337 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100338 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200339 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100340 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100341}
342
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400343static void kvm_s390_sync_dirty_log(struct kvm *kvm,
344 struct kvm_memory_slot *memslot)
345{
346 gfn_t cur_gfn, last_gfn;
347 unsigned long address;
348 struct gmap *gmap = kvm->arch.gmap;
349
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400350 /* Loop over all guest pages */
351 last_gfn = memslot->base_gfn + memslot->npages;
352 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
353 address = gfn_to_hva_memslot(memslot, cur_gfn);
354
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100355 if (test_and_clear_guest_dirty(gmap->mm, address))
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400356 mark_page_dirty(kvm, cur_gfn);
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100357 if (fatal_signal_pending(current))
358 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100359 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400360 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400361}
362
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100363/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200364static void sca_del_vcpu(struct kvm_vcpu *vcpu);
365
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100366/*
367 * Get (and clear) the dirty memory log for a memory slot.
368 */
369int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
370 struct kvm_dirty_log *log)
371{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400372 int r;
373 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200374 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400375 struct kvm_memory_slot *memslot;
376 int is_dirty = 0;
377
378 mutex_lock(&kvm->slots_lock);
379
380 r = -EINVAL;
381 if (log->slot >= KVM_USER_MEM_SLOTS)
382 goto out;
383
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200384 slots = kvm_memslots(kvm);
385 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400386 r = -ENOENT;
387 if (!memslot->dirty_bitmap)
388 goto out;
389
390 kvm_s390_sync_dirty_log(kvm, memslot);
391 r = kvm_get_dirty_log(kvm, log, &is_dirty);
392 if (r)
393 goto out;
394
395 /* Clear the dirty log */
396 if (is_dirty) {
397 n = kvm_dirty_bitmap_bytes(memslot);
398 memset(memslot->dirty_bitmap, 0, n);
399 }
400 r = 0;
401out:
402 mutex_unlock(&kvm->slots_lock);
403 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100404}
405
Cornelia Huckd938dc52013-10-23 18:26:34 +0200406static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
407{
408 int r;
409
410 if (cap->flags)
411 return -EINVAL;
412
413 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200414 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200415 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200416 kvm->arch.use_irqchip = 1;
417 r = 0;
418 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200419 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200420 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200421 kvm->arch.user_sigp = 1;
422 r = 0;
423 break;
Eric Farman68c55752014-06-09 10:57:26 -0400424 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100425 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200426 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100427 r = -EBUSY;
428 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100429 set_kvm_facility(kvm->arch.model.fac_mask, 129);
430 set_kvm_facility(kvm->arch.model.fac_list, 129);
Michael Mueller18280d82015-03-16 16:05:41 +0100431 r = 0;
432 } else
433 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100434 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200435 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
436 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400437 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800438 case KVM_CAP_S390_RI:
439 r = -EINVAL;
440 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200441 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800442 r = -EBUSY;
443 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100444 set_kvm_facility(kvm->arch.model.fac_mask, 64);
445 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800446 r = 0;
447 }
448 mutex_unlock(&kvm->lock);
449 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
450 r ? "(not available)" : "(success)");
451 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100452 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200453 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100454 kvm->arch.user_stsi = 1;
455 r = 0;
456 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200457 default:
458 r = -EINVAL;
459 break;
460 }
461 return r;
462}
463
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100464static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
465{
466 int ret;
467
468 switch (attr->attr) {
469 case KVM_S390_VM_MEM_LIMIT_SIZE:
470 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200471 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100472 kvm->arch.mem_limit);
473 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100474 ret = -EFAULT;
475 break;
476 default:
477 ret = -ENXIO;
478 break;
479 }
480 return ret;
481}
482
483static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200484{
485 int ret;
486 unsigned int idx;
487 switch (attr->attr) {
488 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100489 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100490 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200491 break;
492
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200493 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200494 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200495 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200496 if (!kvm->created_vcpus) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200497 kvm->arch.use_cmma = 1;
498 ret = 0;
499 }
500 mutex_unlock(&kvm->lock);
501 break;
502 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100503 ret = -ENXIO;
504 if (!sclp.has_cmma)
505 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200506 ret = -EINVAL;
507 if (!kvm->arch.use_cmma)
508 break;
509
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200510 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200511 mutex_lock(&kvm->lock);
512 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200513 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200514 srcu_read_unlock(&kvm->srcu, idx);
515 mutex_unlock(&kvm->lock);
516 ret = 0;
517 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100518 case KVM_S390_VM_MEM_LIMIT_SIZE: {
519 unsigned long new_limit;
520
521 if (kvm_is_ucontrol(kvm))
522 return -EINVAL;
523
524 if (get_user(new_limit, (u64 __user *)attr->addr))
525 return -EFAULT;
526
Dominik Dingela3a92c32014-12-01 17:24:42 +0100527 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
528 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100529 return -E2BIG;
530
Dominik Dingela3a92c32014-12-01 17:24:42 +0100531 if (!new_limit)
532 return -EINVAL;
533
534 /* gmap_alloc takes last usable address */
535 if (new_limit != KVM_S390_NO_MEM_LIMIT)
536 new_limit -= 1;
537
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100538 ret = -EBUSY;
539 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200540 if (!kvm->created_vcpus) {
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100541 /* gmap_alloc will round the limit up */
542 struct gmap *new = gmap_alloc(current->mm, new_limit);
543
544 if (!new) {
545 ret = -ENOMEM;
546 } else {
547 gmap_free(kvm->arch.gmap);
548 new->private = kvm;
549 kvm->arch.gmap = new;
550 ret = 0;
551 }
552 }
553 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100554 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
555 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
556 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100557 break;
558 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200559 default:
560 ret = -ENXIO;
561 break;
562 }
563 return ret;
564}
565
Tony Krowiaka374e892014-09-03 10:13:53 +0200566static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
567
568static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
569{
570 struct kvm_vcpu *vcpu;
571 int i;
572
Michael Mueller9d8d5782015-02-02 15:42:51 +0100573 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200574 return -EINVAL;
575
576 mutex_lock(&kvm->lock);
577 switch (attr->attr) {
578 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
579 get_random_bytes(
580 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
581 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
582 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200583 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200584 break;
585 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
586 get_random_bytes(
587 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
588 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
589 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200590 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200591 break;
592 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
593 kvm->arch.crypto.aes_kw = 0;
594 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
595 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200596 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200597 break;
598 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
599 kvm->arch.crypto.dea_kw = 0;
600 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
601 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200602 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200603 break;
604 default:
605 mutex_unlock(&kvm->lock);
606 return -ENXIO;
607 }
608
609 kvm_for_each_vcpu(i, vcpu, kvm) {
610 kvm_s390_vcpu_crypto_setup(vcpu);
611 exit_sie(vcpu);
612 }
613 mutex_unlock(&kvm->lock);
614 return 0;
615}
616
Jason J. Herne72f25022014-11-25 09:46:02 -0500617static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
618{
619 u8 gtod_high;
620
621 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
622 sizeof(gtod_high)))
623 return -EFAULT;
624
625 if (gtod_high != 0)
626 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200627 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500628
629 return 0;
630}
631
632static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
633{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200634 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500635
636 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
637 return -EFAULT;
638
David Hildenbrand25ed1672015-05-12 09:49:14 +0200639 kvm_s390_set_tod_clock(kvm, gtod);
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200640 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500641 return 0;
642}
643
644static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
645{
646 int ret;
647
648 if (attr->flags)
649 return -EINVAL;
650
651 switch (attr->attr) {
652 case KVM_S390_VM_TOD_HIGH:
653 ret = kvm_s390_set_tod_high(kvm, attr);
654 break;
655 case KVM_S390_VM_TOD_LOW:
656 ret = kvm_s390_set_tod_low(kvm, attr);
657 break;
658 default:
659 ret = -ENXIO;
660 break;
661 }
662 return ret;
663}
664
665static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
666{
667 u8 gtod_high = 0;
668
669 if (copy_to_user((void __user *)attr->addr, &gtod_high,
670 sizeof(gtod_high)))
671 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200672 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500673
674 return 0;
675}
676
677static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
678{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200679 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500680
David Hildenbrand60417fc2015-09-29 16:20:36 +0200681 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -0500682 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
683 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200684 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500685
686 return 0;
687}
688
689static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
690{
691 int ret;
692
693 if (attr->flags)
694 return -EINVAL;
695
696 switch (attr->attr) {
697 case KVM_S390_VM_TOD_HIGH:
698 ret = kvm_s390_get_tod_high(kvm, attr);
699 break;
700 case KVM_S390_VM_TOD_LOW:
701 ret = kvm_s390_get_tod_low(kvm, attr);
702 break;
703 default:
704 ret = -ENXIO;
705 break;
706 }
707 return ret;
708}
709
Michael Mueller658b6ed2015-02-02 15:49:35 +0100710static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
711{
712 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +0200713 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100714 int ret = 0;
715
716 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200717 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +0100718 ret = -EBUSY;
719 goto out;
720 }
721 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
722 if (!proc) {
723 ret = -ENOMEM;
724 goto out;
725 }
726 if (!copy_from_user(proc, (void __user *)attr->addr,
727 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200728 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +0200729 lowest_ibc = sclp.ibc >> 16 & 0xfff;
730 unblocked_ibc = sclp.ibc & 0xfff;
731 if (lowest_ibc) {
732 if (proc->ibc > unblocked_ibc)
733 kvm->arch.model.ibc = unblocked_ibc;
734 else if (proc->ibc < lowest_ibc)
735 kvm->arch.model.ibc = lowest_ibc;
736 else
737 kvm->arch.model.ibc = proc->ibc;
738 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100739 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +0100740 S390_ARCH_FAC_LIST_SIZE_BYTE);
741 } else
742 ret = -EFAULT;
743 kfree(proc);
744out:
745 mutex_unlock(&kvm->lock);
746 return ret;
747}
748
David Hildenbrand15c97052015-03-19 17:36:43 +0100749static int kvm_s390_set_processor_feat(struct kvm *kvm,
750 struct kvm_device_attr *attr)
751{
752 struct kvm_s390_vm_cpu_feat data;
753 int ret = -EBUSY;
754
755 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
756 return -EFAULT;
757 if (!bitmap_subset((unsigned long *) data.feat,
758 kvm_s390_available_cpu_feat,
759 KVM_S390_VM_CPU_FEAT_NR_BITS))
760 return -EINVAL;
761
762 mutex_lock(&kvm->lock);
763 if (!atomic_read(&kvm->online_vcpus)) {
764 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
765 KVM_S390_VM_CPU_FEAT_NR_BITS);
766 ret = 0;
767 }
768 mutex_unlock(&kvm->lock);
769 return ret;
770}
771
David Hildenbrand0a763c72016-05-18 16:03:47 +0200772static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
773 struct kvm_device_attr *attr)
774{
775 /*
776 * Once supported by kernel + hw, we have to store the subfunctions
777 * in kvm->arch and remember that user space configured them.
778 */
779 return -ENXIO;
780}
781
Michael Mueller658b6ed2015-02-02 15:49:35 +0100782static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
783{
784 int ret = -ENXIO;
785
786 switch (attr->attr) {
787 case KVM_S390_VM_CPU_PROCESSOR:
788 ret = kvm_s390_set_processor(kvm, attr);
789 break;
David Hildenbrand15c97052015-03-19 17:36:43 +0100790 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
791 ret = kvm_s390_set_processor_feat(kvm, attr);
792 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200793 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
794 ret = kvm_s390_set_processor_subfunc(kvm, attr);
795 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100796 }
797 return ret;
798}
799
800static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
801{
802 struct kvm_s390_vm_cpu_processor *proc;
803 int ret = 0;
804
805 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
806 if (!proc) {
807 ret = -ENOMEM;
808 goto out;
809 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200810 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100811 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100812 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
813 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100814 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
815 ret = -EFAULT;
816 kfree(proc);
817out:
818 return ret;
819}
820
821static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
822{
823 struct kvm_s390_vm_cpu_machine *mach;
824 int ret = 0;
825
826 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
827 if (!mach) {
828 ret = -ENOMEM;
829 goto out;
830 }
831 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +0200832 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100833 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +0100834 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100835 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +0100836 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100837 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
838 ret = -EFAULT;
839 kfree(mach);
840out:
841 return ret;
842}
843
David Hildenbrand15c97052015-03-19 17:36:43 +0100844static int kvm_s390_get_processor_feat(struct kvm *kvm,
845 struct kvm_device_attr *attr)
846{
847 struct kvm_s390_vm_cpu_feat data;
848
849 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
850 KVM_S390_VM_CPU_FEAT_NR_BITS);
851 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
852 return -EFAULT;
853 return 0;
854}
855
856static int kvm_s390_get_machine_feat(struct kvm *kvm,
857 struct kvm_device_attr *attr)
858{
859 struct kvm_s390_vm_cpu_feat data;
860
861 bitmap_copy((unsigned long *) data.feat,
862 kvm_s390_available_cpu_feat,
863 KVM_S390_VM_CPU_FEAT_NR_BITS);
864 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
865 return -EFAULT;
866 return 0;
867}
868
David Hildenbrand0a763c72016-05-18 16:03:47 +0200869static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
870 struct kvm_device_attr *attr)
871{
872 /*
873 * Once we can actually configure subfunctions (kernel + hw support),
874 * we have to check if they were already set by user space, if so copy
875 * them from kvm->arch.
876 */
877 return -ENXIO;
878}
879
880static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
881 struct kvm_device_attr *attr)
882{
883 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
884 sizeof(struct kvm_s390_vm_cpu_subfunc)))
885 return -EFAULT;
886 return 0;
887}
Michael Mueller658b6ed2015-02-02 15:49:35 +0100888static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
889{
890 int ret = -ENXIO;
891
892 switch (attr->attr) {
893 case KVM_S390_VM_CPU_PROCESSOR:
894 ret = kvm_s390_get_processor(kvm, attr);
895 break;
896 case KVM_S390_VM_CPU_MACHINE:
897 ret = kvm_s390_get_machine(kvm, attr);
898 break;
David Hildenbrand15c97052015-03-19 17:36:43 +0100899 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
900 ret = kvm_s390_get_processor_feat(kvm, attr);
901 break;
902 case KVM_S390_VM_CPU_MACHINE_FEAT:
903 ret = kvm_s390_get_machine_feat(kvm, attr);
904 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200905 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
906 ret = kvm_s390_get_processor_subfunc(kvm, attr);
907 break;
908 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
909 ret = kvm_s390_get_machine_subfunc(kvm, attr);
910 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100911 }
912 return ret;
913}
914
Dominik Dingelf2061652014-04-09 13:13:00 +0200915static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
916{
917 int ret;
918
919 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200920 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100921 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200922 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500923 case KVM_S390_VM_TOD:
924 ret = kvm_s390_set_tod(kvm, attr);
925 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100926 case KVM_S390_VM_CPU_MODEL:
927 ret = kvm_s390_set_cpu_model(kvm, attr);
928 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200929 case KVM_S390_VM_CRYPTO:
930 ret = kvm_s390_vm_set_crypto(kvm, attr);
931 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200932 default:
933 ret = -ENXIO;
934 break;
935 }
936
937 return ret;
938}
939
940static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
941{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100942 int ret;
943
944 switch (attr->group) {
945 case KVM_S390_VM_MEM_CTRL:
946 ret = kvm_s390_get_mem_control(kvm, attr);
947 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500948 case KVM_S390_VM_TOD:
949 ret = kvm_s390_get_tod(kvm, attr);
950 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100951 case KVM_S390_VM_CPU_MODEL:
952 ret = kvm_s390_get_cpu_model(kvm, attr);
953 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100954 default:
955 ret = -ENXIO;
956 break;
957 }
958
959 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200960}
961
962static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
963{
964 int ret;
965
966 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200967 case KVM_S390_VM_MEM_CTRL:
968 switch (attr->attr) {
969 case KVM_S390_VM_MEM_ENABLE_CMMA:
970 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100971 ret = sclp.has_cmma ? 0 : -ENXIO;
972 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100973 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200974 ret = 0;
975 break;
976 default:
977 ret = -ENXIO;
978 break;
979 }
980 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500981 case KVM_S390_VM_TOD:
982 switch (attr->attr) {
983 case KVM_S390_VM_TOD_LOW:
984 case KVM_S390_VM_TOD_HIGH:
985 ret = 0;
986 break;
987 default:
988 ret = -ENXIO;
989 break;
990 }
991 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100992 case KVM_S390_VM_CPU_MODEL:
993 switch (attr->attr) {
994 case KVM_S390_VM_CPU_PROCESSOR:
995 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +0100996 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
997 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +0200998 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +0100999 ret = 0;
1000 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001001 /* configuring subfunctions is not supported yet */
1002 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001003 default:
1004 ret = -ENXIO;
1005 break;
1006 }
1007 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001008 case KVM_S390_VM_CRYPTO:
1009 switch (attr->attr) {
1010 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1011 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1012 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1013 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1014 ret = 0;
1015 break;
1016 default:
1017 ret = -ENXIO;
1018 break;
1019 }
1020 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001021 default:
1022 ret = -ENXIO;
1023 break;
1024 }
1025
1026 return ret;
1027}
1028
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001029static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1030{
1031 uint8_t *keys;
1032 uint64_t hva;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001033 int i, r = 0;
1034
1035 if (args->flags != 0)
1036 return -EINVAL;
1037
1038 /* Is this guest using storage keys? */
1039 if (!mm_use_skey(current->mm))
1040 return KVM_S390_GET_SKEYS_NONE;
1041
1042 /* Enforce sane limit on memory allocation */
1043 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1044 return -EINVAL;
1045
1046 keys = kmalloc_array(args->count, sizeof(uint8_t),
1047 GFP_KERNEL | __GFP_NOWARN);
1048 if (!keys)
1049 keys = vmalloc(sizeof(uint8_t) * args->count);
1050 if (!keys)
1051 return -ENOMEM;
1052
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001053 down_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001054 for (i = 0; i < args->count; i++) {
1055 hva = gfn_to_hva(kvm, args->start_gfn + i);
1056 if (kvm_is_error_hva(hva)) {
1057 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001058 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001059 }
1060
David Hildenbrand154c8c12016-05-09 11:22:34 +02001061 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1062 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001063 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001064 }
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001065 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001066
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001067 if (!r) {
1068 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1069 sizeof(uint8_t) * args->count);
1070 if (r)
1071 r = -EFAULT;
1072 }
1073
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001074 kvfree(keys);
1075 return r;
1076}
1077
1078static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1079{
1080 uint8_t *keys;
1081 uint64_t hva;
1082 int i, r = 0;
1083
1084 if (args->flags != 0)
1085 return -EINVAL;
1086
1087 /* Enforce sane limit on memory allocation */
1088 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1089 return -EINVAL;
1090
1091 keys = kmalloc_array(args->count, sizeof(uint8_t),
1092 GFP_KERNEL | __GFP_NOWARN);
1093 if (!keys)
1094 keys = vmalloc(sizeof(uint8_t) * args->count);
1095 if (!keys)
1096 return -ENOMEM;
1097
1098 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1099 sizeof(uint8_t) * args->count);
1100 if (r) {
1101 r = -EFAULT;
1102 goto out;
1103 }
1104
1105 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001106 r = s390_enable_skey();
1107 if (r)
1108 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001109
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001110 down_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001111 for (i = 0; i < args->count; i++) {
1112 hva = gfn_to_hva(kvm, args->start_gfn + i);
1113 if (kvm_is_error_hva(hva)) {
1114 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001115 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001116 }
1117
1118 /* Lowest order bit is reserved */
1119 if (keys[i] & 0x01) {
1120 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001121 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001122 }
1123
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001124 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001125 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001126 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001127 }
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001128 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001129out:
1130 kvfree(keys);
1131 return r;
1132}
1133
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001134long kvm_arch_vm_ioctl(struct file *filp,
1135 unsigned int ioctl, unsigned long arg)
1136{
1137 struct kvm *kvm = filp->private_data;
1138 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001139 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001140 int r;
1141
1142 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001143 case KVM_S390_INTERRUPT: {
1144 struct kvm_s390_interrupt s390int;
1145
1146 r = -EFAULT;
1147 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1148 break;
1149 r = kvm_s390_inject_vm(kvm, &s390int);
1150 break;
1151 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001152 case KVM_ENABLE_CAP: {
1153 struct kvm_enable_cap cap;
1154 r = -EFAULT;
1155 if (copy_from_user(&cap, argp, sizeof(cap)))
1156 break;
1157 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1158 break;
1159 }
Cornelia Huck84223592013-07-15 13:36:01 +02001160 case KVM_CREATE_IRQCHIP: {
1161 struct kvm_irq_routing_entry routing;
1162
1163 r = -EINVAL;
1164 if (kvm->arch.use_irqchip) {
1165 /* Set up dummy routing. */
1166 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001167 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001168 }
1169 break;
1170 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001171 case KVM_SET_DEVICE_ATTR: {
1172 r = -EFAULT;
1173 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1174 break;
1175 r = kvm_s390_vm_set_attr(kvm, &attr);
1176 break;
1177 }
1178 case KVM_GET_DEVICE_ATTR: {
1179 r = -EFAULT;
1180 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1181 break;
1182 r = kvm_s390_vm_get_attr(kvm, &attr);
1183 break;
1184 }
1185 case KVM_HAS_DEVICE_ATTR: {
1186 r = -EFAULT;
1187 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1188 break;
1189 r = kvm_s390_vm_has_attr(kvm, &attr);
1190 break;
1191 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001192 case KVM_S390_GET_SKEYS: {
1193 struct kvm_s390_skeys args;
1194
1195 r = -EFAULT;
1196 if (copy_from_user(&args, argp,
1197 sizeof(struct kvm_s390_skeys)))
1198 break;
1199 r = kvm_s390_get_skeys(kvm, &args);
1200 break;
1201 }
1202 case KVM_S390_SET_SKEYS: {
1203 struct kvm_s390_skeys args;
1204
1205 r = -EFAULT;
1206 if (copy_from_user(&args, argp,
1207 sizeof(struct kvm_s390_skeys)))
1208 break;
1209 r = kvm_s390_set_skeys(kvm, &args);
1210 break;
1211 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001212 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001213 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001214 }
1215
1216 return r;
1217}
1218
Tony Krowiak45c9b472015-01-13 11:33:26 -05001219static int kvm_s390_query_ap_config(u8 *config)
1220{
1221 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001222 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001223
Christian Borntraeger86044c82015-02-26 13:53:47 +01001224 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001225 asm volatile(
1226 "lgr 0,%1\n"
1227 "lgr 2,%2\n"
1228 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001229 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001230 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001231 "1:\n"
1232 EX_TABLE(0b, 1b)
1233 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001234 : "r" (fcn_code), "r" (config)
1235 : "cc", "0", "2", "memory"
1236 );
1237
1238 return cc;
1239}
1240
1241static int kvm_s390_apxa_installed(void)
1242{
1243 u8 config[128];
1244 int cc;
1245
Heiko Carstensa6aacc32015-11-24 14:28:12 +01001246 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05001247 cc = kvm_s390_query_ap_config(config);
1248
1249 if (cc)
1250 pr_err("PQAP(QCI) failed with cc=%d", cc);
1251 else
1252 return config[0] & 0x40;
1253 }
1254
1255 return 0;
1256}
1257
1258static void kvm_s390_set_crycb_format(struct kvm *kvm)
1259{
1260 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1261
1262 if (kvm_s390_apxa_installed())
1263 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1264 else
1265 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1266}
1267
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001268static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01001269{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001270 struct cpuid cpuid;
1271
1272 get_cpu_id(&cpuid);
1273 cpuid.version = 0xff;
1274 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001275}
1276
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001277static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04001278{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001279 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001280 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001281
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001282 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001283 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001284
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001285 /* Enable AES/DEA protected key functions by default */
1286 kvm->arch.crypto.aes_kw = 1;
1287 kvm->arch.crypto.dea_kw = 1;
1288 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1289 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1290 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1291 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04001292}
1293
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001294static void sca_dispose(struct kvm *kvm)
1295{
1296 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001297 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001298 else
1299 free_page((unsigned long)(kvm->arch.sca));
1300 kvm->arch.sca = NULL;
1301}
1302
Carsten Ottee08b9632012-01-04 10:25:20 +01001303int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001304{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001305 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001306 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001307 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001308 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001309
Carsten Ottee08b9632012-01-04 10:25:20 +01001310 rc = -EINVAL;
1311#ifdef CONFIG_KVM_S390_UCONTROL
1312 if (type & ~KVM_VM_S390_UCONTROL)
1313 goto out_err;
1314 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1315 goto out_err;
1316#else
1317 if (type)
1318 goto out_err;
1319#endif
1320
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001321 rc = s390_enable_sie();
1322 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001323 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001324
Carsten Otteb2904112011-10-18 12:27:13 +02001325 rc = -ENOMEM;
1326
Janosch Frank7d0a5e62016-05-10 15:03:42 +02001327 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1328
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001329 kvm->arch.use_esca = 0; /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001330 if (!sclp.has_64bscao)
1331 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001332 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001333 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001334 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001335 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001336 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001337 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001338 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001339 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001340 kvm->arch.sca = (struct bsca_block *)
1341 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001342 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001343
1344 sprintf(debug_name, "kvm-%u", current->pid);
1345
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001346 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001347 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001348 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001349
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001350 kvm->arch.sie_page2 =
1351 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1352 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001353 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001354
Michael Muellerfb5bf932015-02-27 14:25:10 +01001355 /* Populate the facility mask initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001356 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +01001357 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001358 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1359 if (i < kvm_s390_fac_list_mask_size())
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001360 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001361 else
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001362 kvm->arch.model.fac_mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001363 }
1364
Michael Mueller981467c2015-02-24 13:51:04 +01001365 /* Populate the facility list initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001366 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1367 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001368 S390_ARCH_FAC_LIST_SIZE_BYTE);
1369
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001370 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1371 set_kvm_facility(kvm->arch.model.fac_list, 74);
1372
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001373 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001374 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001375
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001376 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001377
Carsten Otteba5c1e92008-03-25 18:47:26 +01001378 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001379 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1380 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001381 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001382 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001383
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001384 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001385 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001386
Carsten Ottee08b9632012-01-04 10:25:20 +01001387 if (type & KVM_VM_S390_UCONTROL) {
1388 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01001389 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01001390 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001391 if (sclp.hamax == U64_MAX)
1392 kvm->arch.mem_limit = TASK_MAX_SIZE;
1393 else
1394 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1395 sclp.hamax + 1);
Dominik Dingela3a92c32014-12-01 17:24:42 +01001396 kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001397 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001398 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001399 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001400 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001401 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001402
1403 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001404 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001405 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001406
David Hildenbrand8ad35752014-03-14 11:00:21 +01001407 spin_lock_init(&kvm->arch.start_stop_lock);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001408 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001409
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001410 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001411out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001412 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01001413 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001414 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001415 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001416 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001417}
1418
Christian Borntraegerd329c032008-11-26 14:50:27 +01001419void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1420{
1421 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001422 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001423 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001424 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001425 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001426 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01001427
1428 if (kvm_is_ucontrol(vcpu->kvm))
1429 gmap_free(vcpu->arch.gmap);
1430
Dominik Dingele6db1d62015-05-07 15:41:57 +02001431 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01001432 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001433 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001434
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001435 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001436 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001437}
1438
1439static void kvm_free_vcpus(struct kvm *kvm)
1440{
1441 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001442 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001443
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001444 kvm_for_each_vcpu(i, vcpu, kvm)
1445 kvm_arch_vcpu_destroy(vcpu);
1446
1447 mutex_lock(&kvm->lock);
1448 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1449 kvm->vcpus[i] = NULL;
1450
1451 atomic_set(&kvm->online_vcpus, 0);
1452 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001453}
1454
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001455void kvm_arch_destroy_vm(struct kvm *kvm)
1456{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001457 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001458 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001459 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001460 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01001461 if (!kvm_is_ucontrol(kvm))
1462 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001463 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001464 kvm_s390_clear_float_irqs(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001465 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001466}
1467
1468/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001469static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1470{
1471 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1472 if (!vcpu->arch.gmap)
1473 return -ENOMEM;
1474 vcpu->arch.gmap->private = vcpu->kvm;
1475
1476 return 0;
1477}
1478
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001479static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1480{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001481 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001482 if (vcpu->kvm->arch.use_esca) {
1483 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001484
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001485 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001486 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001487 } else {
1488 struct bsca_block *sca = vcpu->kvm->arch.sca;
1489
1490 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001491 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001492 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001493 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001494}
1495
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001496static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001497{
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001498 read_lock(&vcpu->kvm->arch.sca_lock);
1499 if (vcpu->kvm->arch.use_esca) {
1500 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001501
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001502 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001503 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1504 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand25508822015-10-12 16:27:23 +02001505 vcpu->arch.sie_block->ecb2 |= 0x04U;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001506 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001507 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001508 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001509
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001510 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001511 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1512 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001513 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001514 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001515 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001516}
1517
1518/* Basic SCA to Extended SCA data copy routines */
1519static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1520{
1521 d->sda = s->sda;
1522 d->sigp_ctrl.c = s->sigp_ctrl.c;
1523 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1524}
1525
1526static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1527{
1528 int i;
1529
1530 d->ipte_control = s->ipte_control;
1531 d->mcn[0] = s->mcn;
1532 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1533 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1534}
1535
1536static int sca_switch_to_extended(struct kvm *kvm)
1537{
1538 struct bsca_block *old_sca = kvm->arch.sca;
1539 struct esca_block *new_sca;
1540 struct kvm_vcpu *vcpu;
1541 unsigned int vcpu_idx;
1542 u32 scaol, scaoh;
1543
1544 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1545 if (!new_sca)
1546 return -ENOMEM;
1547
1548 scaoh = (u32)((u64)(new_sca) >> 32);
1549 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1550
1551 kvm_s390_vcpu_block_all(kvm);
1552 write_lock(&kvm->arch.sca_lock);
1553
1554 sca_copy_b_to_e(new_sca, old_sca);
1555
1556 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1557 vcpu->arch.sie_block->scaoh = scaoh;
1558 vcpu->arch.sie_block->scaol = scaol;
1559 vcpu->arch.sie_block->ecb2 |= 0x04U;
1560 }
1561 kvm->arch.sca = new_sca;
1562 kvm->arch.use_esca = 1;
1563
1564 write_unlock(&kvm->arch.sca_lock);
1565 kvm_s390_vcpu_unblock_all(kvm);
1566
1567 free_page((unsigned long)old_sca);
1568
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001569 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1570 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001571 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001572}
1573
1574static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1575{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001576 int rc;
1577
1578 if (id < KVM_S390_BSCA_CPU_SLOTS)
1579 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001580 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001581 return false;
1582
1583 mutex_lock(&kvm->lock);
1584 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1585 mutex_unlock(&kvm->lock);
1586
1587 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001588}
1589
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001590int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1591{
Dominik Dingel3c038e62013-10-07 17:11:48 +02001592 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1593 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001594 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1595 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001596 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02001597 KVM_SYNC_CRS |
1598 KVM_SYNC_ARCH0 |
1599 KVM_SYNC_PFAULT;
Fan Zhangc6e5f162016-01-07 18:24:29 +08001600 if (test_kvm_facility(vcpu->kvm, 64))
1601 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01001602 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1603 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1604 */
1605 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04001606 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01001607 else
1608 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01001609
1610 if (kvm_is_ucontrol(vcpu->kvm))
1611 return __kvm_ucontrol_vcpu_init(vcpu);
1612
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001613 return 0;
1614}
1615
David Hildenbranddb0758b2016-02-15 09:42:25 +01001616/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1617static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1618{
1619 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01001620 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001621 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01001622 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001623}
1624
1625/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1626static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1627{
1628 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01001629 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001630 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1631 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001632 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001633}
1634
1635/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1636static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1637{
1638 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1639 vcpu->arch.cputm_enabled = true;
1640 __start_cpu_timer_accounting(vcpu);
1641}
1642
1643/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1644static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1645{
1646 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1647 __stop_cpu_timer_accounting(vcpu);
1648 vcpu->arch.cputm_enabled = false;
1649}
1650
1651static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1652{
1653 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1654 __enable_cpu_timer_accounting(vcpu);
1655 preempt_enable();
1656}
1657
1658static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1659{
1660 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1661 __disable_cpu_timer_accounting(vcpu);
1662 preempt_enable();
1663}
1664
David Hildenbrand4287f242016-02-15 09:40:12 +01001665/* set the cpu timer - may only be called from the VCPU thread itself */
1666void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1667{
David Hildenbranddb0758b2016-02-15 09:42:25 +01001668 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01001669 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001670 if (vcpu->arch.cputm_enabled)
1671 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01001672 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001673 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001674 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01001675}
1676
David Hildenbranddb0758b2016-02-15 09:42:25 +01001677/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01001678__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1679{
David Hildenbrand9c23a132016-02-17 21:53:33 +01001680 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01001681 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01001682
1683 if (unlikely(!vcpu->arch.cputm_enabled))
1684 return vcpu->arch.sie_block->cputm;
1685
David Hildenbrand9c23a132016-02-17 21:53:33 +01001686 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1687 do {
1688 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1689 /*
1690 * If the writer would ever execute a read in the critical
1691 * section, e.g. in irq context, we have a deadlock.
1692 */
1693 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1694 value = vcpu->arch.sie_block->cputm;
1695 /* if cputm_start is 0, accounting is being started/stopped */
1696 if (likely(vcpu->arch.cputm_start))
1697 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1698 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1699 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01001700 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01001701}
1702
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001703void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1704{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001705 /* Save host register state */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02001706 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001707 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1708 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
Hendrik Brueckner96b2d7a2015-06-12 13:53:51 +02001709
David Hildenbrand6fd8e672016-01-18 14:46:34 +01001710 if (MACHINE_HAS_VX)
1711 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1712 else
1713 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001714 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001715 if (test_fp_ctl(current->thread.fpu.fpc))
Hendrik Brueckner96b2d7a2015-06-12 13:53:51 +02001716 /* User space provided an invalid FPC, let's clear it */
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001717 current->thread.fpu.fpc = 0;
1718
1719 save_access_regs(vcpu->arch.host_acrs);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001720 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001721 gmap_enable(vcpu->arch.gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001722 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand5ebda312016-02-22 13:52:27 +01001723 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01001724 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01001725 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001726}
1727
1728void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1729{
David Hildenbrand01a745a2016-02-12 20:41:56 +01001730 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01001731 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01001732 __stop_cpu_timer_accounting(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001733 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001734 gmap_disable(vcpu->arch.gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001735
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001736 /* Save guest register state */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02001737 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001738 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001739
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001740 /* Restore host register state */
1741 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1742 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001743
1744 save_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001745 restore_access_regs(vcpu->arch.host_acrs);
1746}
1747
1748static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1749{
1750 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1751 vcpu->arch.sie_block->gpsw.mask = 0UL;
1752 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01001753 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01001754 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001755 vcpu->arch.sie_block->ckc = 0UL;
1756 vcpu->arch.sie_block->todpr = 0;
1757 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1758 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1759 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001760 /* make sure the new fpc will be lazily loaded */
1761 save_fpu_regs();
1762 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001763 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001764 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001765 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1766 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001767 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1768 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001769 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001770}
1771
Dominik Dingel31928aa2014-12-04 15:47:07 +01001772void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001773{
Jason J. Herne72f25022014-11-25 09:46:02 -05001774 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02001775 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001776 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02001777 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001778 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02001779 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01001780 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001781 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02001782 }
1783
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001784}
1785
Tony Krowiak5102ee82014-06-27 14:46:01 -04001786static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1787{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001788 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001789 return;
1790
Tony Krowiaka374e892014-09-03 10:13:53 +02001791 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1792
1793 if (vcpu->kvm->arch.crypto.aes_kw)
1794 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1795 if (vcpu->kvm->arch.crypto.dea_kw)
1796 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1797
Tony Krowiak5102ee82014-06-27 14:46:01 -04001798 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1799}
1800
Dominik Dingelb31605c2014-03-25 13:47:11 +01001801void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1802{
1803 free_page(vcpu->arch.sie_block->cbrlo);
1804 vcpu->arch.sie_block->cbrlo = 0;
1805}
1806
1807int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1808{
1809 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1810 if (!vcpu->arch.sie_block->cbrlo)
1811 return -ENOMEM;
1812
1813 vcpu->arch.sie_block->ecb2 |= 0x80;
1814 vcpu->arch.sie_block->ecb2 &= ~0x08;
1815 return 0;
1816}
1817
Michael Mueller91520f12015-02-27 14:32:11 +01001818static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1819{
1820 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1821
Michael Mueller91520f12015-02-27 14:32:11 +01001822 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01001823 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001824 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01001825}
1826
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001827int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1828{
Dominik Dingelb31605c2014-03-25 13:47:11 +01001829 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001830
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001831 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1832 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001833 CPUSTAT_STOPPED);
1834
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001835 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001836 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001837 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001838 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001839
Michael Mueller91520f12015-02-27 14:32:11 +01001840 kvm_s390_vcpu_setup_model(vcpu);
1841
David Hildenbrandbdab09f2016-04-12 11:07:49 +02001842 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
1843 if (MACHINE_HAS_ESOP)
1844 vcpu->arch.sie_block->ecb |= 0x02;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01001845 if (test_kvm_facility(vcpu->kvm, 9))
1846 vcpu->arch.sie_block->ecb |= 0x04;
David Hildenbrandf597d242016-04-22 16:26:49 +02001847 if (test_kvm_facility(vcpu->kvm, 73))
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001848 vcpu->arch.sie_block->ecb |= 0x10;
1849
David Hildenbrand873b4252016-04-04 15:53:47 +02001850 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
David Hildenbrandd6af0b42016-03-04 11:55:56 +01001851 vcpu->arch.sie_block->ecb2 |= 0x08;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02001852 vcpu->arch.sie_block->eca = 0x1002000U;
1853 if (sclp.has_cei)
1854 vcpu->arch.sie_block->eca |= 0x80000000U;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02001855 if (sclp.has_ib)
1856 vcpu->arch.sie_block->eca |= 0x40000000U;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001857 if (sclp.has_siif)
Heiko Carstens217a4402013-12-30 12:54:14 +01001858 vcpu->arch.sie_block->eca |= 1;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001859 if (sclp.has_sigpif)
David Hildenbrandea5f4962014-10-14 15:29:30 +02001860 vcpu->arch.sie_block->eca |= 0x10000000U;
Fan Zhangc6e5f162016-01-07 18:24:29 +08001861 if (test_kvm_facility(vcpu->kvm, 64))
1862 vcpu->arch.sie_block->ecb3 |= 0x01;
Michael Mueller18280d82015-03-16 16:05:41 +01001863 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman13211ea2014-04-30 13:39:46 -04001864 vcpu->arch.sie_block->eca |= 0x00020000;
1865 vcpu->arch.sie_block->ecd |= 0x20000000;
1866 }
Fan Zhangc6e5f162016-01-07 18:24:29 +08001867 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Thomas Huth492d8642015-02-10 16:11:01 +01001868 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001869 if (test_kvm_facility(vcpu->kvm, 74))
1870 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05001871
Dominik Dingele6db1d62015-05-07 15:41:57 +02001872 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01001873 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1874 if (rc)
1875 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001876 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01001877 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02001878 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001879
Tony Krowiak5102ee82014-06-27 14:46:01 -04001880 kvm_s390_vcpu_crypto_setup(vcpu);
1881
Dominik Dingelb31605c2014-03-25 13:47:11 +01001882 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001883}
1884
1885struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1886 unsigned int id)
1887{
Carsten Otte4d475552011-10-18 12:27:12 +02001888 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001889 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02001890 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001891
David Hildenbrand42158252015-10-12 12:57:22 +02001892 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02001893 goto out;
1894
1895 rc = -ENOMEM;
1896
Michael Muellerb110fea2013-06-12 13:54:54 +02001897 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001898 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02001899 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001900
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001901 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1902 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001903 goto out_free_cpu;
1904
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001905 vcpu->arch.sie_block = &sie_page->sie_block;
1906 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1907
David Hildenbrandefed1102015-04-16 12:32:41 +02001908 /* the real guest size will always be smaller than msl */
1909 vcpu->arch.sie_block->mso = 0;
1910 vcpu->arch.sie_block->msl = sclp.hamax;
1911
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001912 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001913 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001914 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02001915 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01001916 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001917 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001918
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001919 rc = kvm_vcpu_init(vcpu, kvm, id);
1920 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001921 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001922 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001923 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001924 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001925
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001926 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001927out_free_sie_block:
1928 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001929out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001930 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001931out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001932 return ERR_PTR(rc);
1933}
1934
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001935int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1936{
David Hildenbrand9a022062014-08-05 17:40:47 +02001937 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001938}
1939
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001940void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001941{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001942 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001943 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001944}
1945
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001946void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001947{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001948 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001949}
1950
Christian Borntraeger8e236542015-04-09 13:49:04 +02001951static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1952{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001953 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001954 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001955}
1956
1957static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1958{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04001959 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001960}
1961
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001962/*
1963 * Kick a guest cpu out of SIE and wait until SIE is not running.
1964 * If the CPU is not running (e.g. waiting as idle) the function will
1965 * return immediately. */
1966void exit_sie(struct kvm_vcpu *vcpu)
1967{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001968 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001969 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1970 cpu_relax();
1971}
1972
Christian Borntraeger8e236542015-04-09 13:49:04 +02001973/* Kick a guest cpu out of SIE to process a request synchronously */
1974void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001975{
Christian Borntraeger8e236542015-04-09 13:49:04 +02001976 kvm_make_request(req, vcpu);
1977 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001978}
1979
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01001980static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
1981 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001982{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001983 struct kvm *kvm = gmap->private;
1984 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01001985 unsigned long prefix;
1986 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001987
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01001988 if (start >= 1UL << 31)
1989 /* We are only interested in prefix pages */
1990 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001991 kvm_for_each_vcpu(i, vcpu, kvm) {
1992 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01001993 prefix = kvm_s390_get_prefix(vcpu);
1994 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
1995 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
1996 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001997 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001998 }
1999 }
2000}
2001
Christoffer Dallb6d33832012-03-08 16:44:24 -05002002int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2003{
2004 /* kvm common code refers to this, but never calls it */
2005 BUG();
2006 return 0;
2007}
2008
Carsten Otte14eebd92012-05-15 14:15:26 +02002009static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2010 struct kvm_one_reg *reg)
2011{
2012 int r = -EINVAL;
2013
2014 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002015 case KVM_REG_S390_TODPR:
2016 r = put_user(vcpu->arch.sie_block->todpr,
2017 (u32 __user *)reg->addr);
2018 break;
2019 case KVM_REG_S390_EPOCHDIFF:
2020 r = put_user(vcpu->arch.sie_block->epoch,
2021 (u64 __user *)reg->addr);
2022 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002023 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002024 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002025 (u64 __user *)reg->addr);
2026 break;
2027 case KVM_REG_S390_CLOCK_COMP:
2028 r = put_user(vcpu->arch.sie_block->ckc,
2029 (u64 __user *)reg->addr);
2030 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002031 case KVM_REG_S390_PFTOKEN:
2032 r = put_user(vcpu->arch.pfault_token,
2033 (u64 __user *)reg->addr);
2034 break;
2035 case KVM_REG_S390_PFCOMPARE:
2036 r = put_user(vcpu->arch.pfault_compare,
2037 (u64 __user *)reg->addr);
2038 break;
2039 case KVM_REG_S390_PFSELECT:
2040 r = put_user(vcpu->arch.pfault_select,
2041 (u64 __user *)reg->addr);
2042 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002043 case KVM_REG_S390_PP:
2044 r = put_user(vcpu->arch.sie_block->pp,
2045 (u64 __user *)reg->addr);
2046 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002047 case KVM_REG_S390_GBEA:
2048 r = put_user(vcpu->arch.sie_block->gbea,
2049 (u64 __user *)reg->addr);
2050 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002051 default:
2052 break;
2053 }
2054
2055 return r;
2056}
2057
2058static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2059 struct kvm_one_reg *reg)
2060{
2061 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002062 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002063
2064 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002065 case KVM_REG_S390_TODPR:
2066 r = get_user(vcpu->arch.sie_block->todpr,
2067 (u32 __user *)reg->addr);
2068 break;
2069 case KVM_REG_S390_EPOCHDIFF:
2070 r = get_user(vcpu->arch.sie_block->epoch,
2071 (u64 __user *)reg->addr);
2072 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002073 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002074 r = get_user(val, (u64 __user *)reg->addr);
2075 if (!r)
2076 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002077 break;
2078 case KVM_REG_S390_CLOCK_COMP:
2079 r = get_user(vcpu->arch.sie_block->ckc,
2080 (u64 __user *)reg->addr);
2081 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002082 case KVM_REG_S390_PFTOKEN:
2083 r = get_user(vcpu->arch.pfault_token,
2084 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002085 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2086 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002087 break;
2088 case KVM_REG_S390_PFCOMPARE:
2089 r = get_user(vcpu->arch.pfault_compare,
2090 (u64 __user *)reg->addr);
2091 break;
2092 case KVM_REG_S390_PFSELECT:
2093 r = get_user(vcpu->arch.pfault_select,
2094 (u64 __user *)reg->addr);
2095 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002096 case KVM_REG_S390_PP:
2097 r = get_user(vcpu->arch.sie_block->pp,
2098 (u64 __user *)reg->addr);
2099 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002100 case KVM_REG_S390_GBEA:
2101 r = get_user(vcpu->arch.sie_block->gbea,
2102 (u64 __user *)reg->addr);
2103 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002104 default:
2105 break;
2106 }
2107
2108 return r;
2109}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002110
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002111static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2112{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002113 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002114 return 0;
2115}
2116
2117int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2118{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002119 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002120 return 0;
2121}
2122
2123int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2124{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002125 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002126 return 0;
2127}
2128
2129int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2130 struct kvm_sregs *sregs)
2131{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002132 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002133 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01002134 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002135 return 0;
2136}
2137
2138int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2139 struct kvm_sregs *sregs)
2140{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002141 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002142 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002143 return 0;
2144}
2145
2146int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2147{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002148 /* make sure the new values will be lazily loaded */
2149 save_fpu_regs();
Martin Schwidefsky4725c862013-10-15 16:08:34 +02002150 if (test_fp_ctl(fpu->fpc))
2151 return -EINVAL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002152 current->thread.fpu.fpc = fpu->fpc;
2153 if (MACHINE_HAS_VX)
2154 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
2155 else
2156 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002157 return 0;
2158}
2159
2160int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2161{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002162 /* make sure we have the latest values */
2163 save_fpu_regs();
2164 if (MACHINE_HAS_VX)
2165 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
2166 else
2167 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
2168 fpu->fpc = current->thread.fpu.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002169 return 0;
2170}
2171
2172static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2173{
2174 int rc = 0;
2175
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02002176 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002177 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002178 else {
2179 vcpu->run->psw_mask = psw.mask;
2180 vcpu->run->psw_addr = psw.addr;
2181 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002182 return rc;
2183}
2184
2185int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2186 struct kvm_translation *tr)
2187{
2188 return -EINVAL; /* not implemented yet */
2189}
2190
David Hildenbrand27291e22014-01-23 12:26:52 +01002191#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2192 KVM_GUESTDBG_USE_HW_BP | \
2193 KVM_GUESTDBG_ENABLE)
2194
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002195int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2196 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002197{
David Hildenbrand27291e22014-01-23 12:26:52 +01002198 int rc = 0;
2199
2200 vcpu->guest_debug = 0;
2201 kvm_s390_clear_bp_data(vcpu);
2202
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02002203 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01002204 return -EINVAL;
David Hildenbrand89b5b4d2015-11-24 13:47:13 +01002205 if (!sclp.has_gpere)
2206 return -EINVAL;
David Hildenbrand27291e22014-01-23 12:26:52 +01002207
2208 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2209 vcpu->guest_debug = dbg->control;
2210 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002211 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002212
2213 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2214 rc = kvm_s390_import_bp_data(vcpu, dbg);
2215 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002216 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002217 vcpu->arch.guestdbg.last_bp = 0;
2218 }
2219
2220 if (rc) {
2221 vcpu->guest_debug = 0;
2222 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002223 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002224 }
2225
2226 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002227}
2228
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002229int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2230 struct kvm_mp_state *mp_state)
2231{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002232 /* CHECK_STOP and LOAD are not supported yet */
2233 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2234 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002235}
2236
2237int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2238 struct kvm_mp_state *mp_state)
2239{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002240 int rc = 0;
2241
2242 /* user space knows about this interface - let it control the state */
2243 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2244
2245 switch (mp_state->mp_state) {
2246 case KVM_MP_STATE_STOPPED:
2247 kvm_s390_vcpu_stop(vcpu);
2248 break;
2249 case KVM_MP_STATE_OPERATING:
2250 kvm_s390_vcpu_start(vcpu);
2251 break;
2252 case KVM_MP_STATE_LOAD:
2253 case KVM_MP_STATE_CHECK_STOP:
2254 /* fall through - CHECK_STOP and LOAD are not supported yet */
2255 default:
2256 rc = -ENXIO;
2257 }
2258
2259 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002260}
2261
David Hildenbrand8ad35752014-03-14 11:00:21 +01002262static bool ibs_enabled(struct kvm_vcpu *vcpu)
2263{
2264 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2265}
2266
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002267static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2268{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002269retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02002270 kvm_s390_vcpu_request_handled(vcpu);
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02002271 if (!vcpu->requests)
2272 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002273 /*
2274 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2275 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
2276 * This ensures that the ipte instruction for this request has
2277 * already finished. We might race against a second unmapper that
2278 * wants to set the blocking bit. Lets just retry the request loop.
2279 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01002280 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002281 int rc;
2282 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02002283 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002284 PAGE_SIZE * 2);
2285 if (rc)
2286 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002287 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002288 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002289
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002290 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2291 vcpu->arch.sie_block->ihcpu = 0xffff;
2292 goto retry;
2293 }
2294
David Hildenbrand8ad35752014-03-14 11:00:21 +01002295 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2296 if (!ibs_enabled(vcpu)) {
2297 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002298 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002299 &vcpu->arch.sie_block->cpuflags);
2300 }
2301 goto retry;
2302 }
2303
2304 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2305 if (ibs_enabled(vcpu)) {
2306 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002307 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002308 &vcpu->arch.sie_block->cpuflags);
2309 }
2310 goto retry;
2311 }
2312
David Hildenbrand0759d062014-05-13 16:54:32 +02002313 /* nothing to do, just clear the request */
2314 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2315
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002316 return 0;
2317}
2318
David Hildenbrand25ed1672015-05-12 09:49:14 +02002319void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2320{
2321 struct kvm_vcpu *vcpu;
2322 int i;
2323
2324 mutex_lock(&kvm->lock);
2325 preempt_disable();
2326 kvm->arch.epoch = tod - get_tod_clock();
2327 kvm_s390_vcpu_block_all(kvm);
2328 kvm_for_each_vcpu(i, vcpu, kvm)
2329 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2330 kvm_s390_vcpu_unblock_all(kvm);
2331 preempt_enable();
2332 mutex_unlock(&kvm->lock);
2333}
2334
Thomas Huthfa576c52014-05-06 17:20:16 +02002335/**
2336 * kvm_arch_fault_in_page - fault-in guest page if necessary
2337 * @vcpu: The corresponding virtual cpu
2338 * @gpa: Guest physical address
2339 * @writable: Whether the page should be writable or not
2340 *
2341 * Make sure that a guest page has been faulted-in on the host.
2342 *
2343 * Return: Zero on success, negative error code otherwise.
2344 */
2345long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002346{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002347 return gmap_fault(vcpu->arch.gmap, gpa,
2348 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002349}
2350
Dominik Dingel3c038e62013-10-07 17:11:48 +02002351static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2352 unsigned long token)
2353{
2354 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02002355 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002356
2357 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02002358 irq.u.ext.ext_params2 = token;
2359 irq.type = KVM_S390_INT_PFAULT_INIT;
2360 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02002361 } else {
2362 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02002363 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002364 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2365 }
2366}
2367
2368void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2369 struct kvm_async_pf *work)
2370{
2371 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2372 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2373}
2374
2375void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2376 struct kvm_async_pf *work)
2377{
2378 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2379 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2380}
2381
2382void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2383 struct kvm_async_pf *work)
2384{
2385 /* s390 will always inject the page directly */
2386}
2387
2388bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2389{
2390 /*
2391 * s390 will always inject the page directly,
2392 * but we still want check_async_completion to cleanup
2393 */
2394 return true;
2395}
2396
2397static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2398{
2399 hva_t hva;
2400 struct kvm_arch_async_pf arch;
2401 int rc;
2402
2403 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2404 return 0;
2405 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2406 vcpu->arch.pfault_compare)
2407 return 0;
2408 if (psw_extint_disabled(vcpu))
2409 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02002410 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002411 return 0;
2412 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2413 return 0;
2414 if (!vcpu->arch.gmap->pfault_enabled)
2415 return 0;
2416
Heiko Carstens81480cc2014-01-01 16:36:07 +01002417 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2418 hva += current->thread.gmap_addr & ~PAGE_MASK;
2419 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002420 return 0;
2421
2422 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2423 return rc;
2424}
2425
Thomas Huth3fb4c402013-09-12 10:33:43 +02002426static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002427{
Thomas Huth3fb4c402013-09-12 10:33:43 +02002428 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01002429
Dominik Dingel3c038e62013-10-07 17:11:48 +02002430 /*
2431 * On s390 notifications for arriving pages will be delivered directly
2432 * to the guest but the house keeping for completed pfaults is
2433 * handled outside the worker.
2434 */
2435 kvm_check_async_pf_completion(vcpu);
2436
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002437 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2438 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002439
2440 if (need_resched())
2441 schedule();
2442
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02002443 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02002444 s390_handle_mcck();
2445
Jens Freimann79395032014-04-17 10:10:30 +02002446 if (!kvm_is_ucontrol(vcpu->kvm)) {
2447 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2448 if (rc)
2449 return rc;
2450 }
Carsten Otte0ff31862008-05-21 13:37:37 +02002451
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002452 rc = kvm_s390_handle_requests(vcpu);
2453 if (rc)
2454 return rc;
2455
David Hildenbrand27291e22014-01-23 12:26:52 +01002456 if (guestdbg_enabled(vcpu)) {
2457 kvm_s390_backup_guest_per_regs(vcpu);
2458 kvm_s390_patch_guest_per_regs(vcpu);
2459 }
2460
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002461 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002462 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2463 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2464 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002465
Thomas Huth3fb4c402013-09-12 10:33:43 +02002466 return 0;
2467}
2468
Thomas Huth492d8642015-02-10 16:11:01 +01002469static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2470{
David Hildenbrand56317922016-01-12 17:37:58 +01002471 struct kvm_s390_pgm_info pgm_info = {
2472 .code = PGM_ADDRESSING,
2473 };
2474 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01002475 int rc;
2476
2477 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2478 trace_kvm_s390_sie_fault(vcpu);
2479
2480 /*
2481 * We want to inject an addressing exception, which is defined as a
2482 * suppressing or terminating exception. However, since we came here
2483 * by a DAT access exception, the PSW still points to the faulting
2484 * instruction since DAT exceptions are nullifying. So we've got
2485 * to look up the current opcode to get the length of the instruction
2486 * to be able to forward the PSW.
2487 */
David Hildenbrand65977322015-11-16 16:17:45 +01002488 rc = read_guest_instr(vcpu, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01002489 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01002490 if (rc < 0) {
2491 return rc;
2492 } else if (rc) {
2493 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2494 * Forward by arbitrary ilc, injection will take care of
2495 * nullification if necessary.
2496 */
2497 pgm_info = vcpu->arch.pgm;
2498 ilen = 4;
2499 }
David Hildenbrand56317922016-01-12 17:37:58 +01002500 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2501 kvm_s390_forward_psw(vcpu, ilen);
2502 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01002503}
2504
Thomas Huth3fb4c402013-09-12 10:33:43 +02002505static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2506{
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002507 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2508 vcpu->arch.sie_block->icptcode);
2509 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2510
David Hildenbrand27291e22014-01-23 12:26:52 +01002511 if (guestdbg_enabled(vcpu))
2512 kvm_s390_restore_guest_per_regs(vcpu);
2513
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002514 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2515 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002516
2517 if (vcpu->arch.sie_block->icptcode > 0) {
2518 int rc = kvm_handle_sie_intercept(vcpu);
2519
2520 if (rc != -EOPNOTSUPP)
2521 return rc;
2522 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2523 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2524 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2525 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2526 return -EREMOTE;
2527 } else if (exit_reason != -EFAULT) {
2528 vcpu->stat.exit_null++;
2529 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02002530 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2531 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2532 vcpu->run->s390_ucontrol.trans_exc_code =
2533 current->thread.gmap_addr;
2534 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002535 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002536 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02002537 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002538 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002539 if (kvm_arch_setup_async_pf(vcpu))
2540 return 0;
2541 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002542 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02002543 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002544}
2545
2546static int __vcpu_run(struct kvm_vcpu *vcpu)
2547{
2548 int rc, exit_reason;
2549
Thomas Huth800c1062013-09-12 10:33:45 +02002550 /*
2551 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2552 * ning the guest), so that memslots (and other stuff) are protected
2553 */
2554 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2555
Thomas Hutha76ccff2013-09-12 10:33:44 +02002556 do {
2557 rc = vcpu_pre_run(vcpu);
2558 if (rc)
2559 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002560
Thomas Huth800c1062013-09-12 10:33:45 +02002561 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02002562 /*
2563 * As PF_VCPU will be used in fault handler, between
2564 * guest_enter and guest_exit should be no uaccess.
2565 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02002566 local_irq_disable();
2567 __kvm_guest_enter();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002568 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002569 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002570 exit_reason = sie64a(vcpu->arch.sie_block,
2571 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002572 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002573 __enable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002574 __kvm_guest_exit();
2575 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02002576 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002577
Thomas Hutha76ccff2013-09-12 10:33:44 +02002578 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01002579 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002580
Thomas Huth800c1062013-09-12 10:33:45 +02002581 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01002582 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002583}
2584
David Hildenbrandb028ee32014-07-17 10:47:43 +02002585static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2586{
2587 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2588 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2589 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2590 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2591 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2592 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002593 /* some control register changes require a tlb flush */
2594 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002595 }
2596 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01002597 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002598 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2599 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2600 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2601 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2602 }
2603 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2604 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2605 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2606 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002607 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2608 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002609 }
2610 kvm_run->kvm_dirty_regs = 0;
2611}
2612
2613static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2614{
2615 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2616 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2617 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2618 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01002619 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002620 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2621 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2622 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2623 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2624 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2625 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2626 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2627}
2628
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002629int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2630{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002631 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002632 sigset_t sigsaved;
2633
David Hildenbrand27291e22014-01-23 12:26:52 +01002634 if (guestdbg_exit_pending(vcpu)) {
2635 kvm_s390_prepare_debug_exit(vcpu);
2636 return 0;
2637 }
2638
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002639 if (vcpu->sigset_active)
2640 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2641
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002642 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2643 kvm_s390_vcpu_start(vcpu);
2644 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02002645 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002646 vcpu->vcpu_id);
2647 return -EINVAL;
2648 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002649
David Hildenbrandb028ee32014-07-17 10:47:43 +02002650 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002651 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002652
Heiko Carstensdab4079d2009-06-12 10:26:32 +02002653 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002654 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02002655
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002656 if (signal_pending(current) && !rc) {
2657 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002658 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002659 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002660
David Hildenbrand27291e22014-01-23 12:26:52 +01002661 if (guestdbg_exit_pending(vcpu) && !rc) {
2662 kvm_s390_prepare_debug_exit(vcpu);
2663 rc = 0;
2664 }
2665
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002666 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02002667 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002668 rc = 0;
2669 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002670
David Hildenbranddb0758b2016-02-15 09:42:25 +01002671 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002672 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002673
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002674 if (vcpu->sigset_active)
2675 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2676
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002677 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02002678 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002679}
2680
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002681/*
2682 * store status at address
2683 * we use have two special cases:
2684 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2685 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2686 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01002687int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002688{
Carsten Otte092670c2011-07-24 10:48:22 +02002689 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002690 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02002691 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01002692 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002693 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002694
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002695 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002696 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2697 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002698 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002699 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002700 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2701 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002702 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002703 gpa = px;
2704 } else
2705 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002706
2707 /* manually convert vector registers if necessary */
2708 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01002709 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002710 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2711 fprs, 128);
2712 } else {
2713 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002714 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002715 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002716 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002717 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002718 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002719 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002720 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02002721 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002722 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002723 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002724 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002725 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01002726 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002727 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01002728 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01002729 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002730 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002731 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002732 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002733 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002734 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002735 &vcpu->arch.sie_block->gcr, 128);
2736 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002737}
2738
Thomas Huthe8798922013-11-06 15:46:33 +01002739int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2740{
2741 /*
2742 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2743 * copying in vcpu load/put. Lets update our copies before we save
2744 * it into the save area
2745 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02002746 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002747 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01002748 save_access_regs(vcpu->run->s.regs.acrs);
2749
2750 return kvm_s390_store_status_unloaded(vcpu, addr);
2751}
2752
Eric Farmanbc17de72014-04-14 16:01:09 -04002753/*
2754 * store additional status at address
2755 */
2756int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2757 unsigned long gpa)
2758{
2759 /* Only bits 0-53 are used for address formation */
2760 if (!(gpa & ~0x3ff))
2761 return 0;
2762
2763 return write_guest_abs(vcpu, gpa & ~0x3ff,
2764 (void *)&vcpu->run->s.regs.vrs, 512);
2765}
2766
2767int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2768{
2769 if (!test_kvm_facility(vcpu->kvm, 129))
2770 return 0;
2771
2772 /*
2773 * The guest VXRS are in the host VXRs due to the lazy
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002774 * copying in vcpu load/put. We can simply call save_fpu_regs()
2775 * to save the current register state because we are in the
2776 * middle of a load/put cycle.
2777 *
2778 * Let's update our copies before we save it into the save area.
Eric Farmanbc17de72014-04-14 16:01:09 -04002779 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02002780 save_fpu_regs();
Eric Farmanbc17de72014-04-14 16:01:09 -04002781
2782 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2783}
2784
David Hildenbrand8ad35752014-03-14 11:00:21 +01002785static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2786{
2787 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002788 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002789}
2790
2791static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2792{
2793 unsigned int i;
2794 struct kvm_vcpu *vcpu;
2795
2796 kvm_for_each_vcpu(i, vcpu, kvm) {
2797 __disable_ibs_on_vcpu(vcpu);
2798 }
2799}
2800
2801static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2802{
David Hildenbrand09a400e2016-04-04 15:57:08 +02002803 if (!sclp.has_ibs)
2804 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002805 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002806 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002807}
2808
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002809void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2810{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002811 int i, online_vcpus, started_vcpus = 0;
2812
2813 if (!is_vcpu_stopped(vcpu))
2814 return;
2815
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002816 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002817 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002818 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002819 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2820
2821 for (i = 0; i < online_vcpus; i++) {
2822 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2823 started_vcpus++;
2824 }
2825
2826 if (started_vcpus == 0) {
2827 /* we're the only active VCPU -> speed it up */
2828 __enable_ibs_on_vcpu(vcpu);
2829 } else if (started_vcpus == 1) {
2830 /*
2831 * As we are starting a second VCPU, we have to disable
2832 * the IBS facility on all VCPUs to remove potentially
2833 * oustanding ENABLE requests.
2834 */
2835 __disable_ibs_on_all_vcpus(vcpu->kvm);
2836 }
2837
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002838 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002839 /*
2840 * Another VCPU might have used IBS while we were offline.
2841 * Let's play safe and flush the VCPU at startup.
2842 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002843 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002844 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002845 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002846}
2847
2848void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2849{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002850 int i, online_vcpus, started_vcpus = 0;
2851 struct kvm_vcpu *started_vcpu = NULL;
2852
2853 if (is_vcpu_stopped(vcpu))
2854 return;
2855
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002856 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002857 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002858 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002859 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2860
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002861 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02002862 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002863
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002864 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002865 __disable_ibs_on_vcpu(vcpu);
2866
2867 for (i = 0; i < online_vcpus; i++) {
2868 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2869 started_vcpus++;
2870 started_vcpu = vcpu->kvm->vcpus[i];
2871 }
2872 }
2873
2874 if (started_vcpus == 1) {
2875 /*
2876 * As we only have one VCPU left, we want to enable the
2877 * IBS facility for that VCPU to speed it up.
2878 */
2879 __enable_ibs_on_vcpu(started_vcpu);
2880 }
2881
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002882 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002883 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002884}
2885
Cornelia Huckd6712df2012-12-20 15:32:11 +01002886static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2887 struct kvm_enable_cap *cap)
2888{
2889 int r;
2890
2891 if (cap->flags)
2892 return -EINVAL;
2893
2894 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002895 case KVM_CAP_S390_CSS_SUPPORT:
2896 if (!vcpu->kvm->arch.css_support) {
2897 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02002898 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002899 trace_kvm_s390_enable_css(vcpu->kvm);
2900 }
2901 r = 0;
2902 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01002903 default:
2904 r = -EINVAL;
2905 break;
2906 }
2907 return r;
2908}
2909
Thomas Huth41408c282015-02-06 15:01:21 +01002910static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2911 struct kvm_s390_mem_op *mop)
2912{
2913 void __user *uaddr = (void __user *)mop->buf;
2914 void *tmpbuf = NULL;
2915 int r, srcu_idx;
2916 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2917 | KVM_S390_MEMOP_F_CHECK_ONLY;
2918
2919 if (mop->flags & ~supported_flags)
2920 return -EINVAL;
2921
2922 if (mop->size > MEM_OP_MAX_SIZE)
2923 return -E2BIG;
2924
2925 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2926 tmpbuf = vmalloc(mop->size);
2927 if (!tmpbuf)
2928 return -ENOMEM;
2929 }
2930
2931 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2932
2933 switch (mop->op) {
2934 case KVM_S390_MEMOP_LOGICAL_READ:
2935 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01002936 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2937 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01002938 break;
2939 }
2940 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2941 if (r == 0) {
2942 if (copy_to_user(uaddr, tmpbuf, mop->size))
2943 r = -EFAULT;
2944 }
2945 break;
2946 case KVM_S390_MEMOP_LOGICAL_WRITE:
2947 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01002948 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2949 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01002950 break;
2951 }
2952 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2953 r = -EFAULT;
2954 break;
2955 }
2956 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2957 break;
2958 default:
2959 r = -EINVAL;
2960 }
2961
2962 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2963
2964 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2965 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2966
2967 vfree(tmpbuf);
2968 return r;
2969}
2970
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002971long kvm_arch_vcpu_ioctl(struct file *filp,
2972 unsigned int ioctl, unsigned long arg)
2973{
2974 struct kvm_vcpu *vcpu = filp->private_data;
2975 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02002976 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03002977 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002978
Avi Kivity93736622010-05-13 12:35:17 +03002979 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01002980 case KVM_S390_IRQ: {
2981 struct kvm_s390_irq s390irq;
2982
2983 r = -EFAULT;
2984 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2985 break;
2986 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2987 break;
2988 }
Avi Kivity93736622010-05-13 12:35:17 +03002989 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002990 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02002991 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002992
Avi Kivity93736622010-05-13 12:35:17 +03002993 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002994 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03002995 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02002996 if (s390int_to_s390irq(&s390int, &s390irq))
2997 return -EINVAL;
2998 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity93736622010-05-13 12:35:17 +03002999 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003000 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003001 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02003002 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003003 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02003004 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003005 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003006 case KVM_S390_SET_INITIAL_PSW: {
3007 psw_t psw;
3008
Avi Kivitybc923cc2010-05-13 12:21:46 +03003009 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003010 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03003011 break;
3012 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3013 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003014 }
3015 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03003016 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3017 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003018 case KVM_SET_ONE_REG:
3019 case KVM_GET_ONE_REG: {
3020 struct kvm_one_reg reg;
3021 r = -EFAULT;
3022 if (copy_from_user(&reg, argp, sizeof(reg)))
3023 break;
3024 if (ioctl == KVM_SET_ONE_REG)
3025 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3026 else
3027 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3028 break;
3029 }
Carsten Otte27e03932012-01-04 10:25:21 +01003030#ifdef CONFIG_KVM_S390_UCONTROL
3031 case KVM_S390_UCAS_MAP: {
3032 struct kvm_s390_ucas_mapping ucasmap;
3033
3034 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3035 r = -EFAULT;
3036 break;
3037 }
3038
3039 if (!kvm_is_ucontrol(vcpu->kvm)) {
3040 r = -EINVAL;
3041 break;
3042 }
3043
3044 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3045 ucasmap.vcpu_addr, ucasmap.length);
3046 break;
3047 }
3048 case KVM_S390_UCAS_UNMAP: {
3049 struct kvm_s390_ucas_mapping ucasmap;
3050
3051 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3052 r = -EFAULT;
3053 break;
3054 }
3055
3056 if (!kvm_is_ucontrol(vcpu->kvm)) {
3057 r = -EINVAL;
3058 break;
3059 }
3060
3061 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3062 ucasmap.length);
3063 break;
3064 }
3065#endif
Carsten Otteccc79102012-01-04 10:25:26 +01003066 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003067 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01003068 break;
3069 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01003070 case KVM_ENABLE_CAP:
3071 {
3072 struct kvm_enable_cap cap;
3073 r = -EFAULT;
3074 if (copy_from_user(&cap, argp, sizeof(cap)))
3075 break;
3076 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3077 break;
3078 }
Thomas Huth41408c282015-02-06 15:01:21 +01003079 case KVM_S390_MEM_OP: {
3080 struct kvm_s390_mem_op mem_op;
3081
3082 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3083 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3084 else
3085 r = -EFAULT;
3086 break;
3087 }
Jens Freimann816c7662014-11-24 17:13:46 +01003088 case KVM_S390_SET_IRQ_STATE: {
3089 struct kvm_s390_irq_state irq_state;
3090
3091 r = -EFAULT;
3092 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3093 break;
3094 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3095 irq_state.len == 0 ||
3096 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3097 r = -EINVAL;
3098 break;
3099 }
3100 r = kvm_s390_set_irq_state(vcpu,
3101 (void __user *) irq_state.buf,
3102 irq_state.len);
3103 break;
3104 }
3105 case KVM_S390_GET_IRQ_STATE: {
3106 struct kvm_s390_irq_state irq_state;
3107
3108 r = -EFAULT;
3109 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3110 break;
3111 if (irq_state.len == 0) {
3112 r = -EINVAL;
3113 break;
3114 }
3115 r = kvm_s390_get_irq_state(vcpu,
3116 (__u8 __user *) irq_state.buf,
3117 irq_state.len);
3118 break;
3119 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003120 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01003121 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003122 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03003123 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003124}
3125
Carsten Otte5b1c1492012-01-04 10:25:23 +01003126int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3127{
3128#ifdef CONFIG_KVM_S390_UCONTROL
3129 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3130 && (kvm_is_ucontrol(vcpu->kvm))) {
3131 vmf->page = virt_to_page(vcpu->arch.sie_block);
3132 get_page(vmf->page);
3133 return 0;
3134 }
3135#endif
3136 return VM_FAULT_SIGBUS;
3137}
3138
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05303139int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3140 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09003141{
3142 return 0;
3143}
3144
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003145/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003146int kvm_arch_prepare_memory_region(struct kvm *kvm,
3147 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003148 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09003149 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003150{
Nick Wangdd2887e2013-03-25 17:22:57 +01003151 /* A few sanity checks. We can have memory slots which have to be
3152 located/ended at a segment boundary (1MB). The memory in userland is
3153 ok to be fragmented into various different vmas. It is okay to mmap()
3154 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003155
Carsten Otte598841c2011-07-24 10:48:21 +02003156 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003157 return -EINVAL;
3158
Carsten Otte598841c2011-07-24 10:48:21 +02003159 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003160 return -EINVAL;
3161
Dominik Dingela3a92c32014-12-01 17:24:42 +01003162 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3163 return -EINVAL;
3164
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003165 return 0;
3166}
3167
3168void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003169 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003170 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02003171 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003172 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003173{
Carsten Ottef7850c92011-07-24 10:48:23 +02003174 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003175
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01003176 /* If the basics of the memslot do not change, we do not want
3177 * to update the gmap. Every update causes several unnecessary
3178 * segment translation exceptions. This is usually handled just
3179 * fine by the normal fault handler + gmap, but it will also
3180 * cause faults on the prefix page of running guest CPUs.
3181 */
3182 if (old->userspace_addr == mem->userspace_addr &&
3183 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3184 old->npages * PAGE_SIZE == mem->memory_size)
3185 return;
Carsten Otte598841c2011-07-24 10:48:21 +02003186
3187 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3188 mem->guest_phys_addr, mem->memory_size);
3189 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003190 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02003191 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003192}
3193
Alexander Yarygin60a37702016-04-01 15:38:57 +03003194static inline unsigned long nonhyp_mask(int i)
3195{
3196 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3197
3198 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3199}
3200
Christian Borntraeger3491caf2016-05-13 12:16:35 +02003201void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3202{
3203 vcpu->valid_wakeup = false;
3204}
3205
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003206static int __init kvm_s390_init(void)
3207{
Alexander Yarygin60a37702016-04-01 15:38:57 +03003208 int i;
3209
David Hildenbrand07197fd2015-01-30 16:01:38 +01003210 if (!sclp.has_sief2) {
3211 pr_info("SIE not available\n");
3212 return -ENODEV;
3213 }
3214
Alexander Yarygin60a37702016-04-01 15:38:57 +03003215 for (i = 0; i < 16; i++)
3216 kvm_s390_fac_list_mask[i] |=
3217 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3218
Michael Mueller9d8d5782015-02-02 15:42:51 +01003219 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003220}
3221
3222static void __exit kvm_s390_exit(void)
3223{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003224 kvm_exit();
3225}
3226
3227module_init(kvm_s390_init);
3228module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02003229
3230/*
3231 * Enable autoloading of the kvm module.
3232 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3233 * since x86 takes a different approach.
3234 */
3235#include <linux/miscdevice.h>
3236MODULE_ALIAS_MISCDEV(KVM_MINOR);
3237MODULE_ALIAS("devname:kvm");