blob: abcd24fdde3fc9142768452dea3eaf839ec56648 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Christian Borntraegerbb64da92017-11-21 16:02:52 +01005 * Copyright IBM Corp. 2008, 2017
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
Heiko Carstensb0c632d2008-03-25 18:47:20 +01007 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
14#include <linux/compiler.h>
15#include <linux/err.h>
16#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020017#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010018#include <linux/init.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010021#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010022#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050023#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020024#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010026#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010027#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010028#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010029#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020030#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010031
Heiko Carstenscbb870c2010-02-26 22:37:43 +010032#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010033#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020034#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010036#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010037#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010038#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020039#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020040#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020041#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040042#include <asm/timex.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010043#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010044#include "gaccess.h"
45
David Hildenbrandea2cdd22015-05-20 13:24:02 +020046#define KMSG_COMPONENT "kvm-s390"
47#undef pr_fmt
48#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
49
Cornelia Huck5786fff2012-07-23 17:20:29 +020050#define CREATE_TRACE_POINTS
51#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020052#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020053
Thomas Huth41408c282015-02-06 15:01:21 +010054#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010055#define LOCAL_IRQS 32
56#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010058
Heiko Carstensb0c632d2008-03-25 18:47:20 +010059#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
60
61struct kvm_stats_debugfs_item debugfs_entries[] = {
62 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020063 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010064 { "exit_validity", VCPU_STAT(exit_validity) },
65 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
66 { "exit_external_request", VCPU_STAT(exit_external_request) },
67 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010068 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030069 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
71 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020072 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010073 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020074 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020075 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020076 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020077 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010078 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010079 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
80 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020082 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010083 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
84 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
85 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
86 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
87 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
88 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
89 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020090 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010091 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
92 { "instruction_spx", VCPU_STAT(instruction_spx) },
93 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
94 { "instruction_stap", VCPU_STAT(instruction_stap) },
95 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010096 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010097 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
98 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020099 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100100 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
101 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200102 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200103 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200104 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100105 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100106 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200107 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100108 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200109 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
110 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100111 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200112 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
113 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500114 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100115 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
116 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
117 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200118 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
119 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
120 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100121 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100122 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200123 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200124 { "diagnose_258", VCPU_STAT(diagnose_258) },
125 { "diagnose_308", VCPU_STAT(diagnose_308) },
126 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100127 { NULL }
128};
129
Collin L. Walling8fa16962016-07-26 15:29:44 -0400130struct kvm_s390_tod_clock_ext {
131 __u8 epoch_idx;
132 __u64 tod;
133 __u8 reserved[7];
134} __packed;
135
David Hildenbranda411edf2016-02-02 15:41:22 +0100136/* allow nested virtualization in KVM (if enabled by user space) */
137static int nested;
138module_param(nested, int, S_IRUGO);
139MODULE_PARM_DESC(nested, "Nested virtualization support");
140
Michael Mueller9d8d5782015-02-02 15:42:51 +0100141/* upper facilities limit for kvm */
Heiko Carstensf6c1d352016-08-16 10:31:10 +0200142unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100143
Michael Mueller9d8d5782015-02-02 15:42:51 +0100144unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200145{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100146 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
147 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b592013-07-26 15:04:04 +0200148}
149
David Hildenbrand15c97052015-03-19 17:36:43 +0100150/* available cpu features supported by kvm */
151static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200152/* available subfunctions indicated via query / "test bit" */
153static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100154
Michael Mueller9d8d5782015-02-02 15:42:51 +0100155static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200156static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200157debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100158
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100159/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200160int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100161{
162 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200163 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100164}
165
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100166static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
167 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200168
Fan Zhangfdf03652015-05-13 10:58:41 +0200169/*
170 * This callback is executed during stop_machine(). All CPUs are therefore
171 * temporarily stopped. In order not to change guest behavior, we have to
172 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
173 * so a CPU won't be stopped while calculating with the epoch.
174 */
175static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
176 void *v)
177{
178 struct kvm *kvm;
179 struct kvm_vcpu *vcpu;
180 int i;
181 unsigned long long *delta = v;
182
183 list_for_each_entry(kvm, &vm_list, vm_list) {
184 kvm->arch.epoch -= *delta;
185 kvm_for_each_vcpu(i, vcpu, kvm) {
186 vcpu->arch.sie_block->epoch -= *delta;
David Hildenbranddb0758b2016-02-15 09:42:25 +0100187 if (vcpu->arch.cputm_enabled)
188 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100189 if (vcpu->arch.vsie_block)
190 vcpu->arch.vsie_block->epoch -= *delta;
Fan Zhangfdf03652015-05-13 10:58:41 +0200191 }
192 }
193 return NOTIFY_OK;
194}
195
196static struct notifier_block kvm_clock_notifier = {
197 .notifier_call = kvm_clock_sync,
198};
199
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100200int kvm_arch_hardware_setup(void)
201{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200202 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100203 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200204 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
205 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200206 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
207 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100208 return 0;
209}
210
211void kvm_arch_hardware_unsetup(void)
212{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100213 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200214 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200215 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
216 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100217}
218
David Hildenbrand22be5a132016-01-21 13:22:54 +0100219static void allow_cpu_feat(unsigned long nr)
220{
221 set_bit_inv(nr, kvm_s390_available_cpu_feat);
222}
223
David Hildenbrand0a763c72016-05-18 16:03:47 +0200224static inline int plo_test_bit(unsigned char nr)
225{
226 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100227 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200228
229 asm volatile(
230 /* Parameter registers are ignored for "test bit" */
231 " plo 0,0,0,0(0)\n"
232 " ipm %0\n"
233 " srl %0,28\n"
234 : "=d" (cc)
235 : "d" (r0)
236 : "cc");
237 return cc == 0;
238}
239
David Hildenbrand22be5a132016-01-21 13:22:54 +0100240static void kvm_s390_cpu_feat_init(void)
241{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200242 int i;
243
244 for (i = 0; i < 256; ++i) {
245 if (plo_test_bit(i))
246 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
247 }
248
249 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400250 ptff(kvm_s390_available_subfunc.ptff,
251 sizeof(kvm_s390_available_subfunc.ptff),
252 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200253
254 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200255 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
256 kvm_s390_available_subfunc.kmac);
257 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
258 kvm_s390_available_subfunc.kmc);
259 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
260 kvm_s390_available_subfunc.km);
261 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
262 kvm_s390_available_subfunc.kimd);
263 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
264 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200265 }
266 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200267 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
268 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200269 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200270 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
271 kvm_s390_available_subfunc.kmctr);
272 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
273 kvm_s390_available_subfunc.kmf);
274 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
275 kvm_s390_available_subfunc.kmo);
276 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
277 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200278 }
279 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100280 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200281 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200282
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400283 if (test_facility(146)) /* MSA8 */
284 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
285 kvm_s390_available_subfunc.kma);
286
David Hildenbrand22be5a132016-01-21 13:22:54 +0100287 if (MACHINE_HAS_ESOP)
288 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200289 /*
290 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
291 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
292 */
293 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100294 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200295 return;
296 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100297 if (sclp.has_64bscao)
298 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100299 if (sclp.has_siif)
300 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100301 if (sclp.has_gpere)
302 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100303 if (sclp.has_gsls)
304 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100305 if (sclp.has_ib)
306 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100307 if (sclp.has_cei)
308 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100309 if (sclp.has_ibs)
310 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500311 if (sclp.has_kss)
312 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200313 /*
314 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
315 * all skey handling functions read/set the skey from the PGSTE
316 * instead of the real storage key.
317 *
318 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
319 * pages being detected as preserved although they are resident.
320 *
321 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
322 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
323 *
324 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
325 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
326 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
327 *
328 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
329 * cannot easily shadow the SCA because of the ipte lock.
330 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100331}
332
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100333int kvm_arch_init(void *opaque)
334{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200335 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
336 if (!kvm_s390_dbf)
337 return -ENOMEM;
338
339 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
340 debug_unregister(kvm_s390_dbf);
341 return -ENOMEM;
342 }
343
David Hildenbrand22be5a132016-01-21 13:22:54 +0100344 kvm_s390_cpu_feat_init();
345
Cornelia Huck84877d92014-09-02 10:27:35 +0100346 /* Register floating interrupt controller interface. */
347 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100348}
349
Christian Borntraeger78f26132015-07-22 15:50:58 +0200350void kvm_arch_exit(void)
351{
352 debug_unregister(kvm_s390_dbf);
353}
354
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100355/* Section: device related */
356long kvm_arch_dev_ioctl(struct file *filp,
357 unsigned int ioctl, unsigned long arg)
358{
359 if (ioctl == KVM_S390_ENABLE_SIE)
360 return s390_enable_sie();
361 return -EINVAL;
362}
363
Alexander Graf784aa3d2014-07-14 18:27:35 +0200364int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100365{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100366 int r;
367
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200368 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100369 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200370 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100371 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100372#ifdef CONFIG_KVM_S390_UCONTROL
373 case KVM_CAP_S390_UCONTROL:
374#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200375 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100376 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200377 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100378 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100379 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100380 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200381 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200382 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200383 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200384 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200385 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100386 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100387 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200388 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100389 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400390 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100391 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200392 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200393 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100394 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100395 case KVM_CAP_S390_AIS_MIGRATION:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100396 r = 1;
397 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100398 case KVM_CAP_S390_MEM_OP:
399 r = MEM_OP_MAX_SIZE;
400 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200401 case KVM_CAP_NR_VCPUS:
402 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100403 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200404 if (!kvm_s390_use_sca_entries())
405 r = KVM_MAX_VCPUS;
406 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100407 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200408 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100409 case KVM_CAP_NR_MEMSLOTS:
410 r = KVM_USER_MEM_SLOTS;
411 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200412 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100413 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200414 break;
Eric Farman68c55752014-06-09 10:57:26 -0400415 case KVM_CAP_S390_VECTOR_REGISTERS:
416 r = MACHINE_HAS_VX;
417 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800418 case KVM_CAP_S390_RI:
419 r = test_facility(64);
420 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100421 case KVM_CAP_S390_GS:
422 r = test_facility(133);
423 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200424 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100425 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200426 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100427 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100428}
429
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400430static void kvm_s390_sync_dirty_log(struct kvm *kvm,
431 struct kvm_memory_slot *memslot)
432{
433 gfn_t cur_gfn, last_gfn;
434 unsigned long address;
435 struct gmap *gmap = kvm->arch.gmap;
436
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400437 /* Loop over all guest pages */
438 last_gfn = memslot->base_gfn + memslot->npages;
439 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
440 address = gfn_to_hva_memslot(memslot, cur_gfn);
441
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100442 if (test_and_clear_guest_dirty(gmap->mm, address))
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400443 mark_page_dirty(kvm, cur_gfn);
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100444 if (fatal_signal_pending(current))
445 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100446 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400447 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400448}
449
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100450/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200451static void sca_del_vcpu(struct kvm_vcpu *vcpu);
452
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100453/*
454 * Get (and clear) the dirty memory log for a memory slot.
455 */
456int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
457 struct kvm_dirty_log *log)
458{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400459 int r;
460 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200461 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400462 struct kvm_memory_slot *memslot;
463 int is_dirty = 0;
464
Janosch Franke1e8a962017-02-02 16:39:31 +0100465 if (kvm_is_ucontrol(kvm))
466 return -EINVAL;
467
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400468 mutex_lock(&kvm->slots_lock);
469
470 r = -EINVAL;
471 if (log->slot >= KVM_USER_MEM_SLOTS)
472 goto out;
473
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200474 slots = kvm_memslots(kvm);
475 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400476 r = -ENOENT;
477 if (!memslot->dirty_bitmap)
478 goto out;
479
480 kvm_s390_sync_dirty_log(kvm, memslot);
481 r = kvm_get_dirty_log(kvm, log, &is_dirty);
482 if (r)
483 goto out;
484
485 /* Clear the dirty log */
486 if (is_dirty) {
487 n = kvm_dirty_bitmap_bytes(memslot);
488 memset(memslot->dirty_bitmap, 0, n);
489 }
490 r = 0;
491out:
492 mutex_unlock(&kvm->slots_lock);
493 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100494}
495
David Hildenbrand6502a342016-06-21 14:19:51 +0200496static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
497{
498 unsigned int i;
499 struct kvm_vcpu *vcpu;
500
501 kvm_for_each_vcpu(i, vcpu, kvm) {
502 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
503 }
504}
505
Cornelia Huckd938dc52013-10-23 18:26:34 +0200506static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
507{
508 int r;
509
510 if (cap->flags)
511 return -EINVAL;
512
513 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200514 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200515 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200516 kvm->arch.use_irqchip = 1;
517 r = 0;
518 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200519 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200520 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200521 kvm->arch.user_sigp = 1;
522 r = 0;
523 break;
Eric Farman68c55752014-06-09 10:57:26 -0400524 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100525 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200526 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100527 r = -EBUSY;
528 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100529 set_kvm_facility(kvm->arch.model.fac_mask, 129);
530 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200531 if (test_facility(134)) {
532 set_kvm_facility(kvm->arch.model.fac_mask, 134);
533 set_kvm_facility(kvm->arch.model.fac_list, 134);
534 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100535 if (test_facility(135)) {
536 set_kvm_facility(kvm->arch.model.fac_mask, 135);
537 set_kvm_facility(kvm->arch.model.fac_list, 135);
538 }
Michael Mueller18280d82015-03-16 16:05:41 +0100539 r = 0;
540 } else
541 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100542 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200543 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
544 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400545 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800546 case KVM_CAP_S390_RI:
547 r = -EINVAL;
548 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200549 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800550 r = -EBUSY;
551 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100552 set_kvm_facility(kvm->arch.model.fac_mask, 64);
553 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800554 r = 0;
555 }
556 mutex_unlock(&kvm->lock);
557 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
558 r ? "(not available)" : "(success)");
559 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100560 case KVM_CAP_S390_AIS:
561 mutex_lock(&kvm->lock);
562 if (kvm->created_vcpus) {
563 r = -EBUSY;
564 } else {
565 set_kvm_facility(kvm->arch.model.fac_mask, 72);
566 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100567 r = 0;
568 }
569 mutex_unlock(&kvm->lock);
570 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
571 r ? "(not available)" : "(success)");
572 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100573 case KVM_CAP_S390_GS:
574 r = -EINVAL;
575 mutex_lock(&kvm->lock);
576 if (atomic_read(&kvm->online_vcpus)) {
577 r = -EBUSY;
578 } else if (test_facility(133)) {
579 set_kvm_facility(kvm->arch.model.fac_mask, 133);
580 set_kvm_facility(kvm->arch.model.fac_list, 133);
581 r = 0;
582 }
583 mutex_unlock(&kvm->lock);
584 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
585 r ? "(not available)" : "(success)");
586 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100587 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200588 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100589 kvm->arch.user_stsi = 1;
590 r = 0;
591 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200592 case KVM_CAP_S390_USER_INSTR0:
593 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
594 kvm->arch.user_instr0 = 1;
595 icpt_operexc_on_all_vcpus(kvm);
596 r = 0;
597 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200598 default:
599 r = -EINVAL;
600 break;
601 }
602 return r;
603}
604
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100605static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
606{
607 int ret;
608
609 switch (attr->attr) {
610 case KVM_S390_VM_MEM_LIMIT_SIZE:
611 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200612 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100613 kvm->arch.mem_limit);
614 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100615 ret = -EFAULT;
616 break;
617 default:
618 ret = -ENXIO;
619 break;
620 }
621 return ret;
622}
623
624static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200625{
626 int ret;
627 unsigned int idx;
628 switch (attr->attr) {
629 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100630 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100631 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200632 break;
633
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200634 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200635 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200636 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200637 if (!kvm->created_vcpus) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200638 kvm->arch.use_cmma = 1;
639 ret = 0;
640 }
641 mutex_unlock(&kvm->lock);
642 break;
643 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100644 ret = -ENXIO;
645 if (!sclp.has_cmma)
646 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200647 ret = -EINVAL;
648 if (!kvm->arch.use_cmma)
649 break;
650
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200651 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200652 mutex_lock(&kvm->lock);
653 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200654 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200655 srcu_read_unlock(&kvm->srcu, idx);
656 mutex_unlock(&kvm->lock);
657 ret = 0;
658 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100659 case KVM_S390_VM_MEM_LIMIT_SIZE: {
660 unsigned long new_limit;
661
662 if (kvm_is_ucontrol(kvm))
663 return -EINVAL;
664
665 if (get_user(new_limit, (u64 __user *)attr->addr))
666 return -EFAULT;
667
Dominik Dingela3a92c32014-12-01 17:24:42 +0100668 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
669 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100670 return -E2BIG;
671
Dominik Dingela3a92c32014-12-01 17:24:42 +0100672 if (!new_limit)
673 return -EINVAL;
674
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100675 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100676 if (new_limit != KVM_S390_NO_MEM_LIMIT)
677 new_limit -= 1;
678
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100679 ret = -EBUSY;
680 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200681 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100682 /* gmap_create will round the limit up */
683 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100684
685 if (!new) {
686 ret = -ENOMEM;
687 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100688 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100689 new->private = kvm;
690 kvm->arch.gmap = new;
691 ret = 0;
692 }
693 }
694 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100695 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
696 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
697 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100698 break;
699 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200700 default:
701 ret = -ENXIO;
702 break;
703 }
704 return ret;
705}
706
Tony Krowiaka374e892014-09-03 10:13:53 +0200707static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
708
709static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
710{
711 struct kvm_vcpu *vcpu;
712 int i;
713
Michael Mueller9d8d5782015-02-02 15:42:51 +0100714 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200715 return -EINVAL;
716
717 mutex_lock(&kvm->lock);
718 switch (attr->attr) {
719 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
720 get_random_bytes(
721 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
722 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
723 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200724 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200725 break;
726 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
727 get_random_bytes(
728 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
729 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
730 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200731 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200732 break;
733 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
734 kvm->arch.crypto.aes_kw = 0;
735 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
736 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200737 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200738 break;
739 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
740 kvm->arch.crypto.dea_kw = 0;
741 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
742 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200743 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200744 break;
745 default:
746 mutex_unlock(&kvm->lock);
747 return -ENXIO;
748 }
749
750 kvm_for_each_vcpu(i, vcpu, kvm) {
751 kvm_s390_vcpu_crypto_setup(vcpu);
752 exit_sie(vcpu);
753 }
754 mutex_unlock(&kvm->lock);
755 return 0;
756}
757
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200758static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
759{
760 int cx;
761 struct kvm_vcpu *vcpu;
762
763 kvm_for_each_vcpu(cx, vcpu, kvm)
764 kvm_s390_sync_request(req, vcpu);
765}
766
767/*
768 * Must be called with kvm->srcu held to avoid races on memslots, and with
769 * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
770 */
771static int kvm_s390_vm_start_migration(struct kvm *kvm)
772{
773 struct kvm_s390_migration_state *mgs;
774 struct kvm_memory_slot *ms;
775 /* should be the only one */
776 struct kvm_memslots *slots;
777 unsigned long ram_pages;
778 int slotnr;
779
780 /* migration mode already enabled */
781 if (kvm->arch.migration_state)
782 return 0;
783
784 slots = kvm_memslots(kvm);
785 if (!slots || !slots->used_slots)
786 return -EINVAL;
787
788 mgs = kzalloc(sizeof(*mgs), GFP_KERNEL);
789 if (!mgs)
790 return -ENOMEM;
791 kvm->arch.migration_state = mgs;
792
793 if (kvm->arch.use_cmma) {
794 /*
Christian Borntraeger32aa1442017-12-15 13:14:31 +0100795 * Get the first slot. They are reverse sorted by base_gfn, so
796 * the first slot is also the one at the end of the address
797 * space. We have verified above that at least one slot is
798 * present.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200799 */
Christian Borntraeger32aa1442017-12-15 13:14:31 +0100800 ms = slots->memslots;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200801 /* round up so we only use full longs */
802 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
803 /* allocate enough bytes to store all the bits */
804 mgs->pgste_bitmap = vmalloc(ram_pages / 8);
805 if (!mgs->pgste_bitmap) {
806 kfree(mgs);
807 kvm->arch.migration_state = NULL;
808 return -ENOMEM;
809 }
810
811 mgs->bitmap_size = ram_pages;
812 atomic64_set(&mgs->dirty_pages, ram_pages);
813 /* mark all the pages in active slots as dirty */
814 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
815 ms = slots->memslots + slotnr;
816 bitmap_set(mgs->pgste_bitmap, ms->base_gfn, ms->npages);
817 }
818
819 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
820 }
821 return 0;
822}
823
824/*
825 * Must be called with kvm->lock to avoid races with ourselves and
826 * kvm_s390_vm_start_migration.
827 */
828static int kvm_s390_vm_stop_migration(struct kvm *kvm)
829{
830 struct kvm_s390_migration_state *mgs;
831
832 /* migration mode already disabled */
833 if (!kvm->arch.migration_state)
834 return 0;
835 mgs = kvm->arch.migration_state;
836 kvm->arch.migration_state = NULL;
837
838 if (kvm->arch.use_cmma) {
839 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
840 vfree(mgs->pgste_bitmap);
841 }
842 kfree(mgs);
843 return 0;
844}
845
846static int kvm_s390_vm_set_migration(struct kvm *kvm,
847 struct kvm_device_attr *attr)
848{
849 int idx, res = -ENXIO;
850
851 mutex_lock(&kvm->lock);
852 switch (attr->attr) {
853 case KVM_S390_VM_MIGRATION_START:
854 idx = srcu_read_lock(&kvm->srcu);
855 res = kvm_s390_vm_start_migration(kvm);
856 srcu_read_unlock(&kvm->srcu, idx);
857 break;
858 case KVM_S390_VM_MIGRATION_STOP:
859 res = kvm_s390_vm_stop_migration(kvm);
860 break;
861 default:
862 break;
863 }
864 mutex_unlock(&kvm->lock);
865
866 return res;
867}
868
869static int kvm_s390_vm_get_migration(struct kvm *kvm,
870 struct kvm_device_attr *attr)
871{
872 u64 mig = (kvm->arch.migration_state != NULL);
873
874 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
875 return -ENXIO;
876
877 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
878 return -EFAULT;
879 return 0;
880}
881
Collin L. Walling8fa16962016-07-26 15:29:44 -0400882static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
883{
884 struct kvm_s390_vm_tod_clock gtod;
885
886 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
887 return -EFAULT;
888
889 if (test_kvm_facility(kvm, 139))
890 kvm_s390_set_tod_clock_ext(kvm, &gtod);
891 else if (gtod.epoch_idx == 0)
892 kvm_s390_set_tod_clock(kvm, gtod.tod);
893 else
894 return -EINVAL;
895
896 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
897 gtod.epoch_idx, gtod.tod);
898
899 return 0;
900}
901
Jason J. Herne72f25022014-11-25 09:46:02 -0500902static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
903{
904 u8 gtod_high;
905
906 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
907 sizeof(gtod_high)))
908 return -EFAULT;
909
910 if (gtod_high != 0)
911 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200912 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500913
914 return 0;
915}
916
917static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
918{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200919 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500920
921 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
922 return -EFAULT;
923
David Hildenbrand25ed1672015-05-12 09:49:14 +0200924 kvm_s390_set_tod_clock(kvm, gtod);
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200925 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500926 return 0;
927}
928
929static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
930{
931 int ret;
932
933 if (attr->flags)
934 return -EINVAL;
935
936 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -0400937 case KVM_S390_VM_TOD_EXT:
938 ret = kvm_s390_set_tod_ext(kvm, attr);
939 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500940 case KVM_S390_VM_TOD_HIGH:
941 ret = kvm_s390_set_tod_high(kvm, attr);
942 break;
943 case KVM_S390_VM_TOD_LOW:
944 ret = kvm_s390_set_tod_low(kvm, attr);
945 break;
946 default:
947 ret = -ENXIO;
948 break;
949 }
950 return ret;
951}
952
Collin L. Walling8fa16962016-07-26 15:29:44 -0400953static void kvm_s390_get_tod_clock_ext(struct kvm *kvm,
954 struct kvm_s390_vm_tod_clock *gtod)
955{
956 struct kvm_s390_tod_clock_ext htod;
957
958 preempt_disable();
959
960 get_tod_clock_ext((char *)&htod);
961
962 gtod->tod = htod.tod + kvm->arch.epoch;
963 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
964
965 if (gtod->tod < htod.tod)
966 gtod->epoch_idx += 1;
967
968 preempt_enable();
969}
970
971static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
972{
973 struct kvm_s390_vm_tod_clock gtod;
974
975 memset(&gtod, 0, sizeof(gtod));
976
977 if (test_kvm_facility(kvm, 139))
978 kvm_s390_get_tod_clock_ext(kvm, &gtod);
979 else
980 gtod.tod = kvm_s390_get_tod_clock_fast(kvm);
981
982 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
983 return -EFAULT;
984
985 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
986 gtod.epoch_idx, gtod.tod);
987 return 0;
988}
989
Jason J. Herne72f25022014-11-25 09:46:02 -0500990static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
991{
992 u8 gtod_high = 0;
993
994 if (copy_to_user((void __user *)attr->addr, &gtod_high,
995 sizeof(gtod_high)))
996 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200997 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500998
999 return 0;
1000}
1001
1002static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1003{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001004 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001005
David Hildenbrand60417fc2015-09-29 16:20:36 +02001006 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001007 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1008 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001009 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001010
1011 return 0;
1012}
1013
1014static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1015{
1016 int ret;
1017
1018 if (attr->flags)
1019 return -EINVAL;
1020
1021 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001022 case KVM_S390_VM_TOD_EXT:
1023 ret = kvm_s390_get_tod_ext(kvm, attr);
1024 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001025 case KVM_S390_VM_TOD_HIGH:
1026 ret = kvm_s390_get_tod_high(kvm, attr);
1027 break;
1028 case KVM_S390_VM_TOD_LOW:
1029 ret = kvm_s390_get_tod_low(kvm, attr);
1030 break;
1031 default:
1032 ret = -ENXIO;
1033 break;
1034 }
1035 return ret;
1036}
1037
Michael Mueller658b6ed2015-02-02 15:49:35 +01001038static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1039{
1040 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001041 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001042 int ret = 0;
1043
1044 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001045 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001046 ret = -EBUSY;
1047 goto out;
1048 }
1049 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1050 if (!proc) {
1051 ret = -ENOMEM;
1052 goto out;
1053 }
1054 if (!copy_from_user(proc, (void __user *)attr->addr,
1055 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001056 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001057 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1058 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001059 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001060 if (proc->ibc > unblocked_ibc)
1061 kvm->arch.model.ibc = unblocked_ibc;
1062 else if (proc->ibc < lowest_ibc)
1063 kvm->arch.model.ibc = lowest_ibc;
1064 else
1065 kvm->arch.model.ibc = proc->ibc;
1066 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001067 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001068 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001069 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1070 kvm->arch.model.ibc,
1071 kvm->arch.model.cpuid);
1072 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1073 kvm->arch.model.fac_list[0],
1074 kvm->arch.model.fac_list[1],
1075 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001076 } else
1077 ret = -EFAULT;
1078 kfree(proc);
1079out:
1080 mutex_unlock(&kvm->lock);
1081 return ret;
1082}
1083
David Hildenbrand15c97052015-03-19 17:36:43 +01001084static int kvm_s390_set_processor_feat(struct kvm *kvm,
1085 struct kvm_device_attr *attr)
1086{
1087 struct kvm_s390_vm_cpu_feat data;
1088 int ret = -EBUSY;
1089
1090 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1091 return -EFAULT;
1092 if (!bitmap_subset((unsigned long *) data.feat,
1093 kvm_s390_available_cpu_feat,
1094 KVM_S390_VM_CPU_FEAT_NR_BITS))
1095 return -EINVAL;
1096
1097 mutex_lock(&kvm->lock);
1098 if (!atomic_read(&kvm->online_vcpus)) {
1099 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1100 KVM_S390_VM_CPU_FEAT_NR_BITS);
1101 ret = 0;
1102 }
1103 mutex_unlock(&kvm->lock);
1104 return ret;
1105}
1106
David Hildenbrand0a763c72016-05-18 16:03:47 +02001107static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1108 struct kvm_device_attr *attr)
1109{
1110 /*
1111 * Once supported by kernel + hw, we have to store the subfunctions
1112 * in kvm->arch and remember that user space configured them.
1113 */
1114 return -ENXIO;
1115}
1116
Michael Mueller658b6ed2015-02-02 15:49:35 +01001117static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1118{
1119 int ret = -ENXIO;
1120
1121 switch (attr->attr) {
1122 case KVM_S390_VM_CPU_PROCESSOR:
1123 ret = kvm_s390_set_processor(kvm, attr);
1124 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001125 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1126 ret = kvm_s390_set_processor_feat(kvm, attr);
1127 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001128 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1129 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1130 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001131 }
1132 return ret;
1133}
1134
1135static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1136{
1137 struct kvm_s390_vm_cpu_processor *proc;
1138 int ret = 0;
1139
1140 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1141 if (!proc) {
1142 ret = -ENOMEM;
1143 goto out;
1144 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001145 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001146 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001147 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1148 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001149 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1150 kvm->arch.model.ibc,
1151 kvm->arch.model.cpuid);
1152 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1153 kvm->arch.model.fac_list[0],
1154 kvm->arch.model.fac_list[1],
1155 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001156 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1157 ret = -EFAULT;
1158 kfree(proc);
1159out:
1160 return ret;
1161}
1162
1163static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1164{
1165 struct kvm_s390_vm_cpu_machine *mach;
1166 int ret = 0;
1167
1168 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1169 if (!mach) {
1170 ret = -ENOMEM;
1171 goto out;
1172 }
1173 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001174 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001175 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001176 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001177 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001178 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001179 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1180 kvm->arch.model.ibc,
1181 kvm->arch.model.cpuid);
1182 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1183 mach->fac_mask[0],
1184 mach->fac_mask[1],
1185 mach->fac_mask[2]);
1186 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1187 mach->fac_list[0],
1188 mach->fac_list[1],
1189 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001190 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1191 ret = -EFAULT;
1192 kfree(mach);
1193out:
1194 return ret;
1195}
1196
David Hildenbrand15c97052015-03-19 17:36:43 +01001197static int kvm_s390_get_processor_feat(struct kvm *kvm,
1198 struct kvm_device_attr *attr)
1199{
1200 struct kvm_s390_vm_cpu_feat data;
1201
1202 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1203 KVM_S390_VM_CPU_FEAT_NR_BITS);
1204 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1205 return -EFAULT;
1206 return 0;
1207}
1208
1209static int kvm_s390_get_machine_feat(struct kvm *kvm,
1210 struct kvm_device_attr *attr)
1211{
1212 struct kvm_s390_vm_cpu_feat data;
1213
1214 bitmap_copy((unsigned long *) data.feat,
1215 kvm_s390_available_cpu_feat,
1216 KVM_S390_VM_CPU_FEAT_NR_BITS);
1217 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1218 return -EFAULT;
1219 return 0;
1220}
1221
David Hildenbrand0a763c72016-05-18 16:03:47 +02001222static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1223 struct kvm_device_attr *attr)
1224{
1225 /*
1226 * Once we can actually configure subfunctions (kernel + hw support),
1227 * we have to check if they were already set by user space, if so copy
1228 * them from kvm->arch.
1229 */
1230 return -ENXIO;
1231}
1232
1233static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1234 struct kvm_device_attr *attr)
1235{
1236 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1237 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1238 return -EFAULT;
1239 return 0;
1240}
Michael Mueller658b6ed2015-02-02 15:49:35 +01001241static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1242{
1243 int ret = -ENXIO;
1244
1245 switch (attr->attr) {
1246 case KVM_S390_VM_CPU_PROCESSOR:
1247 ret = kvm_s390_get_processor(kvm, attr);
1248 break;
1249 case KVM_S390_VM_CPU_MACHINE:
1250 ret = kvm_s390_get_machine(kvm, attr);
1251 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001252 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1253 ret = kvm_s390_get_processor_feat(kvm, attr);
1254 break;
1255 case KVM_S390_VM_CPU_MACHINE_FEAT:
1256 ret = kvm_s390_get_machine_feat(kvm, attr);
1257 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001258 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1259 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1260 break;
1261 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1262 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1263 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001264 }
1265 return ret;
1266}
1267
Dominik Dingelf2061652014-04-09 13:13:00 +02001268static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1269{
1270 int ret;
1271
1272 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001273 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001274 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001275 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001276 case KVM_S390_VM_TOD:
1277 ret = kvm_s390_set_tod(kvm, attr);
1278 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001279 case KVM_S390_VM_CPU_MODEL:
1280 ret = kvm_s390_set_cpu_model(kvm, attr);
1281 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001282 case KVM_S390_VM_CRYPTO:
1283 ret = kvm_s390_vm_set_crypto(kvm, attr);
1284 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001285 case KVM_S390_VM_MIGRATION:
1286 ret = kvm_s390_vm_set_migration(kvm, attr);
1287 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001288 default:
1289 ret = -ENXIO;
1290 break;
1291 }
1292
1293 return ret;
1294}
1295
1296static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1297{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001298 int ret;
1299
1300 switch (attr->group) {
1301 case KVM_S390_VM_MEM_CTRL:
1302 ret = kvm_s390_get_mem_control(kvm, attr);
1303 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001304 case KVM_S390_VM_TOD:
1305 ret = kvm_s390_get_tod(kvm, attr);
1306 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001307 case KVM_S390_VM_CPU_MODEL:
1308 ret = kvm_s390_get_cpu_model(kvm, attr);
1309 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001310 case KVM_S390_VM_MIGRATION:
1311 ret = kvm_s390_vm_get_migration(kvm, attr);
1312 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001313 default:
1314 ret = -ENXIO;
1315 break;
1316 }
1317
1318 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001319}
1320
1321static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1322{
1323 int ret;
1324
1325 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001326 case KVM_S390_VM_MEM_CTRL:
1327 switch (attr->attr) {
1328 case KVM_S390_VM_MEM_ENABLE_CMMA:
1329 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001330 ret = sclp.has_cmma ? 0 : -ENXIO;
1331 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001332 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001333 ret = 0;
1334 break;
1335 default:
1336 ret = -ENXIO;
1337 break;
1338 }
1339 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001340 case KVM_S390_VM_TOD:
1341 switch (attr->attr) {
1342 case KVM_S390_VM_TOD_LOW:
1343 case KVM_S390_VM_TOD_HIGH:
1344 ret = 0;
1345 break;
1346 default:
1347 ret = -ENXIO;
1348 break;
1349 }
1350 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001351 case KVM_S390_VM_CPU_MODEL:
1352 switch (attr->attr) {
1353 case KVM_S390_VM_CPU_PROCESSOR:
1354 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001355 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1356 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001357 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001358 ret = 0;
1359 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001360 /* configuring subfunctions is not supported yet */
1361 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001362 default:
1363 ret = -ENXIO;
1364 break;
1365 }
1366 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001367 case KVM_S390_VM_CRYPTO:
1368 switch (attr->attr) {
1369 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1370 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1371 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1372 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1373 ret = 0;
1374 break;
1375 default:
1376 ret = -ENXIO;
1377 break;
1378 }
1379 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001380 case KVM_S390_VM_MIGRATION:
1381 ret = 0;
1382 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001383 default:
1384 ret = -ENXIO;
1385 break;
1386 }
1387
1388 return ret;
1389}
1390
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001391static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1392{
1393 uint8_t *keys;
1394 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001395 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001396
1397 if (args->flags != 0)
1398 return -EINVAL;
1399
1400 /* Is this guest using storage keys? */
1401 if (!mm_use_skey(current->mm))
1402 return KVM_S390_GET_SKEYS_NONE;
1403
1404 /* Enforce sane limit on memory allocation */
1405 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1406 return -EINVAL;
1407
Michal Hocko752ade62017-05-08 15:57:27 -07001408 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001409 if (!keys)
1410 return -ENOMEM;
1411
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001412 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001413 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001414 for (i = 0; i < args->count; i++) {
1415 hva = gfn_to_hva(kvm, args->start_gfn + i);
1416 if (kvm_is_error_hva(hva)) {
1417 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001418 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001419 }
1420
David Hildenbrand154c8c12016-05-09 11:22:34 +02001421 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1422 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001423 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001424 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001425 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001426 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001427
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001428 if (!r) {
1429 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1430 sizeof(uint8_t) * args->count);
1431 if (r)
1432 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001433 }
1434
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001435 kvfree(keys);
1436 return r;
1437}
1438
1439static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1440{
1441 uint8_t *keys;
1442 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001443 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001444
1445 if (args->flags != 0)
1446 return -EINVAL;
1447
1448 /* Enforce sane limit on memory allocation */
1449 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1450 return -EINVAL;
1451
Michal Hocko752ade62017-05-08 15:57:27 -07001452 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001453 if (!keys)
1454 return -ENOMEM;
1455
1456 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1457 sizeof(uint8_t) * args->count);
1458 if (r) {
1459 r = -EFAULT;
1460 goto out;
1461 }
1462
1463 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001464 r = s390_enable_skey();
1465 if (r)
1466 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001467
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001468 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001469 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001470 for (i = 0; i < args->count; i++) {
1471 hva = gfn_to_hva(kvm, args->start_gfn + i);
1472 if (kvm_is_error_hva(hva)) {
1473 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001474 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001475 }
1476
1477 /* Lowest order bit is reserved */
1478 if (keys[i] & 0x01) {
1479 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001480 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001481 }
1482
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001483 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001484 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001485 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001486 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001487 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001488 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001489out:
1490 kvfree(keys);
1491 return r;
1492}
1493
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001494/*
1495 * Base address and length must be sent at the start of each block, therefore
1496 * it's cheaper to send some clean data, as long as it's less than the size of
1497 * two longs.
1498 */
1499#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1500/* for consistency */
1501#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1502
1503/*
1504 * This function searches for the next page with dirty CMMA attributes, and
1505 * saves the attributes in the buffer up to either the end of the buffer or
1506 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
1507 * no trailing clean bytes are saved.
1508 * In case no dirty bits were found, or if CMMA was not enabled or used, the
1509 * output buffer will indicate 0 as length.
1510 */
1511static int kvm_s390_get_cmma_bits(struct kvm *kvm,
1512 struct kvm_s390_cmma_log *args)
1513{
1514 struct kvm_s390_migration_state *s = kvm->arch.migration_state;
1515 unsigned long bufsize, hva, pgstev, i, next, cur;
1516 int srcu_idx, peek, r = 0, rr;
1517 u8 *res;
1518
1519 cur = args->start_gfn;
1520 i = next = pgstev = 0;
1521
1522 if (unlikely(!kvm->arch.use_cmma))
1523 return -ENXIO;
1524 /* Invalid/unsupported flags were specified */
1525 if (args->flags & ~KVM_S390_CMMA_PEEK)
1526 return -EINVAL;
1527 /* Migration mode query, and we are not doing a migration */
1528 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
1529 if (!peek && !s)
1530 return -EINVAL;
1531 /* CMMA is disabled or was not used, or the buffer has length zero */
1532 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
1533 if (!bufsize || !kvm->mm->context.use_cmma) {
1534 memset(args, 0, sizeof(*args));
1535 return 0;
1536 }
1537
1538 if (!peek) {
1539 /* We are not peeking, and there are no dirty pages */
1540 if (!atomic64_read(&s->dirty_pages)) {
1541 memset(args, 0, sizeof(*args));
1542 return 0;
1543 }
1544 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size,
1545 args->start_gfn);
1546 if (cur >= s->bitmap_size) /* nothing found, loop back */
1547 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size, 0);
1548 if (cur >= s->bitmap_size) { /* again! (very unlikely) */
1549 memset(args, 0, sizeof(*args));
1550 return 0;
1551 }
1552 next = find_next_bit(s->pgste_bitmap, s->bitmap_size, cur + 1);
1553 }
1554
1555 res = vmalloc(bufsize);
1556 if (!res)
1557 return -ENOMEM;
1558
1559 args->start_gfn = cur;
1560
1561 down_read(&kvm->mm->mmap_sem);
1562 srcu_idx = srcu_read_lock(&kvm->srcu);
1563 while (i < bufsize) {
1564 hva = gfn_to_hva(kvm, cur);
1565 if (kvm_is_error_hva(hva)) {
1566 r = -EFAULT;
1567 break;
1568 }
1569 /* decrement only if we actually flipped the bit to 0 */
1570 if (!peek && test_and_clear_bit(cur, s->pgste_bitmap))
1571 atomic64_dec(&s->dirty_pages);
1572 r = get_pgste(kvm->mm, hva, &pgstev);
1573 if (r < 0)
1574 pgstev = 0;
1575 /* save the value */
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02001576 res[i++] = (pgstev >> 24) & 0x43;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001577 /*
1578 * if the next bit is too far away, stop.
1579 * if we reached the previous "next", find the next one
1580 */
1581 if (!peek) {
1582 if (next > cur + KVM_S390_MAX_BIT_DISTANCE)
1583 break;
1584 if (cur == next)
1585 next = find_next_bit(s->pgste_bitmap,
1586 s->bitmap_size, cur + 1);
1587 /* reached the end of the bitmap or of the buffer, stop */
1588 if ((next >= s->bitmap_size) ||
1589 (next >= args->start_gfn + bufsize))
1590 break;
1591 }
1592 cur++;
1593 }
1594 srcu_read_unlock(&kvm->srcu, srcu_idx);
1595 up_read(&kvm->mm->mmap_sem);
1596 args->count = i;
1597 args->remaining = s ? atomic64_read(&s->dirty_pages) : 0;
1598
1599 rr = copy_to_user((void __user *)args->values, res, args->count);
1600 if (rr)
1601 r = -EFAULT;
1602
1603 vfree(res);
1604 return r;
1605}
1606
1607/*
1608 * This function sets the CMMA attributes for the given pages. If the input
1609 * buffer has zero length, no action is taken, otherwise the attributes are
1610 * set and the mm->context.use_cmma flag is set.
1611 */
1612static int kvm_s390_set_cmma_bits(struct kvm *kvm,
1613 const struct kvm_s390_cmma_log *args)
1614{
1615 unsigned long hva, mask, pgstev, i;
1616 uint8_t *bits;
1617 int srcu_idx, r = 0;
1618
1619 mask = args->mask;
1620
1621 if (!kvm->arch.use_cmma)
1622 return -ENXIO;
1623 /* invalid/unsupported flags */
1624 if (args->flags != 0)
1625 return -EINVAL;
1626 /* Enforce sane limit on memory allocation */
1627 if (args->count > KVM_S390_CMMA_SIZE_MAX)
1628 return -EINVAL;
1629 /* Nothing to do */
1630 if (args->count == 0)
1631 return 0;
1632
1633 bits = vmalloc(sizeof(*bits) * args->count);
1634 if (!bits)
1635 return -ENOMEM;
1636
1637 r = copy_from_user(bits, (void __user *)args->values, args->count);
1638 if (r) {
1639 r = -EFAULT;
1640 goto out;
1641 }
1642
1643 down_read(&kvm->mm->mmap_sem);
1644 srcu_idx = srcu_read_lock(&kvm->srcu);
1645 for (i = 0; i < args->count; i++) {
1646 hva = gfn_to_hva(kvm, args->start_gfn + i);
1647 if (kvm_is_error_hva(hva)) {
1648 r = -EFAULT;
1649 break;
1650 }
1651
1652 pgstev = bits[i];
1653 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02001654 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001655 set_pgste_bits(kvm->mm, hva, mask, pgstev);
1656 }
1657 srcu_read_unlock(&kvm->srcu, srcu_idx);
1658 up_read(&kvm->mm->mmap_sem);
1659
1660 if (!kvm->mm->context.use_cmma) {
1661 down_write(&kvm->mm->mmap_sem);
1662 kvm->mm->context.use_cmma = 1;
1663 up_write(&kvm->mm->mmap_sem);
1664 }
1665out:
1666 vfree(bits);
1667 return r;
1668}
1669
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001670long kvm_arch_vm_ioctl(struct file *filp,
1671 unsigned int ioctl, unsigned long arg)
1672{
1673 struct kvm *kvm = filp->private_data;
1674 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001675 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001676 int r;
1677
1678 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001679 case KVM_S390_INTERRUPT: {
1680 struct kvm_s390_interrupt s390int;
1681
1682 r = -EFAULT;
1683 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1684 break;
1685 r = kvm_s390_inject_vm(kvm, &s390int);
1686 break;
1687 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001688 case KVM_ENABLE_CAP: {
1689 struct kvm_enable_cap cap;
1690 r = -EFAULT;
1691 if (copy_from_user(&cap, argp, sizeof(cap)))
1692 break;
1693 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1694 break;
1695 }
Cornelia Huck84223592013-07-15 13:36:01 +02001696 case KVM_CREATE_IRQCHIP: {
1697 struct kvm_irq_routing_entry routing;
1698
1699 r = -EINVAL;
1700 if (kvm->arch.use_irqchip) {
1701 /* Set up dummy routing. */
1702 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001703 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001704 }
1705 break;
1706 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001707 case KVM_SET_DEVICE_ATTR: {
1708 r = -EFAULT;
1709 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1710 break;
1711 r = kvm_s390_vm_set_attr(kvm, &attr);
1712 break;
1713 }
1714 case KVM_GET_DEVICE_ATTR: {
1715 r = -EFAULT;
1716 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1717 break;
1718 r = kvm_s390_vm_get_attr(kvm, &attr);
1719 break;
1720 }
1721 case KVM_HAS_DEVICE_ATTR: {
1722 r = -EFAULT;
1723 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1724 break;
1725 r = kvm_s390_vm_has_attr(kvm, &attr);
1726 break;
1727 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001728 case KVM_S390_GET_SKEYS: {
1729 struct kvm_s390_skeys args;
1730
1731 r = -EFAULT;
1732 if (copy_from_user(&args, argp,
1733 sizeof(struct kvm_s390_skeys)))
1734 break;
1735 r = kvm_s390_get_skeys(kvm, &args);
1736 break;
1737 }
1738 case KVM_S390_SET_SKEYS: {
1739 struct kvm_s390_skeys args;
1740
1741 r = -EFAULT;
1742 if (copy_from_user(&args, argp,
1743 sizeof(struct kvm_s390_skeys)))
1744 break;
1745 r = kvm_s390_set_skeys(kvm, &args);
1746 break;
1747 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001748 case KVM_S390_GET_CMMA_BITS: {
1749 struct kvm_s390_cmma_log args;
1750
1751 r = -EFAULT;
1752 if (copy_from_user(&args, argp, sizeof(args)))
1753 break;
1754 r = kvm_s390_get_cmma_bits(kvm, &args);
1755 if (!r) {
1756 r = copy_to_user(argp, &args, sizeof(args));
1757 if (r)
1758 r = -EFAULT;
1759 }
1760 break;
1761 }
1762 case KVM_S390_SET_CMMA_BITS: {
1763 struct kvm_s390_cmma_log args;
1764
1765 r = -EFAULT;
1766 if (copy_from_user(&args, argp, sizeof(args)))
1767 break;
1768 r = kvm_s390_set_cmma_bits(kvm, &args);
1769 break;
1770 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001771 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001772 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001773 }
1774
1775 return r;
1776}
1777
Tony Krowiak45c9b472015-01-13 11:33:26 -05001778static int kvm_s390_query_ap_config(u8 *config)
1779{
1780 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001781 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001782
Christian Borntraeger86044c82015-02-26 13:53:47 +01001783 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001784 asm volatile(
1785 "lgr 0,%1\n"
1786 "lgr 2,%2\n"
1787 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001788 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001789 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001790 "1:\n"
1791 EX_TABLE(0b, 1b)
1792 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001793 : "r" (fcn_code), "r" (config)
1794 : "cc", "0", "2", "memory"
1795 );
1796
1797 return cc;
1798}
1799
1800static int kvm_s390_apxa_installed(void)
1801{
1802 u8 config[128];
1803 int cc;
1804
Heiko Carstensa6aacc32015-11-24 14:28:12 +01001805 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05001806 cc = kvm_s390_query_ap_config(config);
1807
1808 if (cc)
1809 pr_err("PQAP(QCI) failed with cc=%d", cc);
1810 else
1811 return config[0] & 0x40;
1812 }
1813
1814 return 0;
1815}
1816
1817static void kvm_s390_set_crycb_format(struct kvm *kvm)
1818{
1819 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1820
1821 if (kvm_s390_apxa_installed())
1822 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1823 else
1824 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1825}
1826
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001827static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01001828{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001829 struct cpuid cpuid;
1830
1831 get_cpu_id(&cpuid);
1832 cpuid.version = 0xff;
1833 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001834}
1835
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001836static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04001837{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001838 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001839 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001840
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001841 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001842 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001843
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001844 /* Enable AES/DEA protected key functions by default */
1845 kvm->arch.crypto.aes_kw = 1;
1846 kvm->arch.crypto.dea_kw = 1;
1847 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1848 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1849 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1850 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04001851}
1852
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001853static void sca_dispose(struct kvm *kvm)
1854{
1855 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001856 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001857 else
1858 free_page((unsigned long)(kvm->arch.sca));
1859 kvm->arch.sca = NULL;
1860}
1861
Carsten Ottee08b9632012-01-04 10:25:20 +01001862int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001863{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001864 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001865 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001866 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001867 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001868
Carsten Ottee08b9632012-01-04 10:25:20 +01001869 rc = -EINVAL;
1870#ifdef CONFIG_KVM_S390_UCONTROL
1871 if (type & ~KVM_VM_S390_UCONTROL)
1872 goto out_err;
1873 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1874 goto out_err;
1875#else
1876 if (type)
1877 goto out_err;
1878#endif
1879
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001880 rc = s390_enable_sie();
1881 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001882 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001883
Carsten Otteb2904112011-10-18 12:27:13 +02001884 rc = -ENOMEM;
1885
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001886 kvm->arch.use_esca = 0; /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001887 if (!sclp.has_64bscao)
1888 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001889 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001890 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001891 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001892 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001893 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001894 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001895 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001896 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001897 kvm->arch.sca = (struct bsca_block *)
1898 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001899 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001900
1901 sprintf(debug_name, "kvm-%u", current->pid);
1902
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001903 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001904 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001905 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001906
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001907 kvm->arch.sie_page2 =
1908 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1909 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001910 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001911
Michael Muellerfb5bf932015-02-27 14:25:10 +01001912 /* Populate the facility mask initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001913 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001914 sizeof(S390_lowcore.stfle_fac_list));
Michael Mueller9d8d5782015-02-02 15:42:51 +01001915 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1916 if (i < kvm_s390_fac_list_mask_size())
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001917 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001918 else
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001919 kvm->arch.model.fac_mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001920 }
1921
Michael Mueller981467c2015-02-24 13:51:04 +01001922 /* Populate the facility list initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001923 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1924 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001925 S390_ARCH_FAC_LIST_SIZE_BYTE);
1926
David Hildenbrand19352222017-08-29 16:31:08 +02001927 /* we are always in czam mode - even on pre z14 machines */
1928 set_kvm_facility(kvm->arch.model.fac_mask, 138);
1929 set_kvm_facility(kvm->arch.model.fac_list, 138);
1930 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001931 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1932 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02001933 if (MACHINE_HAS_TLB_GUEST) {
1934 set_kvm_facility(kvm->arch.model.fac_mask, 147);
1935 set_kvm_facility(kvm->arch.model.fac_list, 147);
1936 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001937
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001938 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001939 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001940
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001941 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001942
Fei Li51978392017-02-17 17:06:26 +08001943 mutex_init(&kvm->arch.float_int.ais_lock);
1944 kvm->arch.float_int.simm = 0;
1945 kvm->arch.float_int.nimm = 0;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001946 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001947 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1948 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001949 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001950 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001951
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001952 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001953 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001954
Carsten Ottee08b9632012-01-04 10:25:20 +01001955 if (type & KVM_VM_S390_UCONTROL) {
1956 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01001957 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01001958 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001959 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001960 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001961 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001962 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001963 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001964 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001965 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001966 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001967 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001968 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001969 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001970
1971 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001972 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001973 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001974
David Hildenbrand8ad35752014-03-14 11:00:21 +01001975 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001976 kvm_s390_vsie_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001977 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001978
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001979 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001980out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001981 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01001982 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001983 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001984 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001985 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001986}
1987
Luiz Capitulino235539b2016-09-07 14:47:23 -04001988bool kvm_arch_has_vcpu_debugfs(void)
1989{
1990 return false;
1991}
1992
1993int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
1994{
1995 return 0;
1996}
1997
Christian Borntraegerd329c032008-11-26 14:50:27 +01001998void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1999{
2000 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002001 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002002 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002003 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002004 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002005 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002006
2007 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002008 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002009
Dominik Dingele6db1d62015-05-07 15:41:57 +02002010 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002011 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002012 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002013
Christian Borntraeger6692cef2008-11-26 14:51:08 +01002014 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02002015 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002016}
2017
2018static void kvm_free_vcpus(struct kvm *kvm)
2019{
2020 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002021 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002022
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002023 kvm_for_each_vcpu(i, vcpu, kvm)
2024 kvm_arch_vcpu_destroy(vcpu);
2025
2026 mutex_lock(&kvm->lock);
2027 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2028 kvm->vcpus[i] = NULL;
2029
2030 atomic_set(&kvm->online_vcpus, 0);
2031 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002032}
2033
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002034void kvm_arch_destroy_vm(struct kvm *kvm)
2035{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002036 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002037 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002038 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002039 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002040 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002041 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002042 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002043 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002044 kvm_s390_vsie_destroy(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02002045 if (kvm->arch.migration_state) {
2046 vfree(kvm->arch.migration_state->pgste_bitmap);
2047 kfree(kvm->arch.migration_state);
2048 }
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002049 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002050}
2051
2052/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002053static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2054{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002055 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002056 if (!vcpu->arch.gmap)
2057 return -ENOMEM;
2058 vcpu->arch.gmap->private = vcpu->kvm;
2059
2060 return 0;
2061}
2062
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002063static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2064{
David Hildenbranda6940672016-08-08 22:39:32 +02002065 if (!kvm_s390_use_sca_entries())
2066 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002067 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002068 if (vcpu->kvm->arch.use_esca) {
2069 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002070
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002071 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002072 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002073 } else {
2074 struct bsca_block *sca = vcpu->kvm->arch.sca;
2075
2076 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002077 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002078 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002079 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002080}
2081
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002082static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002083{
David Hildenbranda6940672016-08-08 22:39:32 +02002084 if (!kvm_s390_use_sca_entries()) {
2085 struct bsca_block *sca = vcpu->kvm->arch.sca;
2086
2087 /* we still need the basic sca for the ipte control */
2088 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2089 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2090 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002091 read_lock(&vcpu->kvm->arch.sca_lock);
2092 if (vcpu->kvm->arch.use_esca) {
2093 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002094
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002095 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002096 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2097 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002098 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002099 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002100 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002101 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002102
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002103 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002104 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2105 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002106 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002107 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002108 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002109}
2110
2111/* Basic SCA to Extended SCA data copy routines */
2112static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2113{
2114 d->sda = s->sda;
2115 d->sigp_ctrl.c = s->sigp_ctrl.c;
2116 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2117}
2118
2119static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2120{
2121 int i;
2122
2123 d->ipte_control = s->ipte_control;
2124 d->mcn[0] = s->mcn;
2125 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2126 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2127}
2128
2129static int sca_switch_to_extended(struct kvm *kvm)
2130{
2131 struct bsca_block *old_sca = kvm->arch.sca;
2132 struct esca_block *new_sca;
2133 struct kvm_vcpu *vcpu;
2134 unsigned int vcpu_idx;
2135 u32 scaol, scaoh;
2136
2137 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2138 if (!new_sca)
2139 return -ENOMEM;
2140
2141 scaoh = (u32)((u64)(new_sca) >> 32);
2142 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2143
2144 kvm_s390_vcpu_block_all(kvm);
2145 write_lock(&kvm->arch.sca_lock);
2146
2147 sca_copy_b_to_e(new_sca, old_sca);
2148
2149 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2150 vcpu->arch.sie_block->scaoh = scaoh;
2151 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002152 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002153 }
2154 kvm->arch.sca = new_sca;
2155 kvm->arch.use_esca = 1;
2156
2157 write_unlock(&kvm->arch.sca_lock);
2158 kvm_s390_vcpu_unblock_all(kvm);
2159
2160 free_page((unsigned long)old_sca);
2161
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002162 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2163 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002164 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002165}
2166
2167static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2168{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002169 int rc;
2170
David Hildenbranda6940672016-08-08 22:39:32 +02002171 if (!kvm_s390_use_sca_entries()) {
2172 if (id < KVM_MAX_VCPUS)
2173 return true;
2174 return false;
2175 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002176 if (id < KVM_S390_BSCA_CPU_SLOTS)
2177 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002178 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002179 return false;
2180
2181 mutex_lock(&kvm->lock);
2182 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2183 mutex_unlock(&kvm->lock);
2184
2185 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002186}
2187
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002188int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2189{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002190 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2191 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002192 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2193 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002194 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002195 KVM_SYNC_CRS |
2196 KVM_SYNC_ARCH0 |
2197 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002198 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002199 if (test_kvm_facility(vcpu->kvm, 64))
2200 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002201 if (test_kvm_facility(vcpu->kvm, 133))
2202 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002203 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2204 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2205 */
2206 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002207 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002208 else
2209 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002210
2211 if (kvm_is_ucontrol(vcpu->kvm))
2212 return __kvm_ucontrol_vcpu_init(vcpu);
2213
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002214 return 0;
2215}
2216
David Hildenbranddb0758b2016-02-15 09:42:25 +01002217/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2218static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2219{
2220 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002221 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002222 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002223 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002224}
2225
2226/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2227static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2228{
2229 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002230 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002231 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2232 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002233 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002234}
2235
2236/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2237static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2238{
2239 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2240 vcpu->arch.cputm_enabled = true;
2241 __start_cpu_timer_accounting(vcpu);
2242}
2243
2244/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2245static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2246{
2247 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2248 __stop_cpu_timer_accounting(vcpu);
2249 vcpu->arch.cputm_enabled = false;
2250}
2251
2252static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2253{
2254 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2255 __enable_cpu_timer_accounting(vcpu);
2256 preempt_enable();
2257}
2258
2259static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2260{
2261 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2262 __disable_cpu_timer_accounting(vcpu);
2263 preempt_enable();
2264}
2265
David Hildenbrand4287f242016-02-15 09:40:12 +01002266/* set the cpu timer - may only be called from the VCPU thread itself */
2267void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2268{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002269 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002270 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002271 if (vcpu->arch.cputm_enabled)
2272 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002273 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002274 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002275 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002276}
2277
David Hildenbranddb0758b2016-02-15 09:42:25 +01002278/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002279__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2280{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002281 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002282 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002283
2284 if (unlikely(!vcpu->arch.cputm_enabled))
2285 return vcpu->arch.sie_block->cputm;
2286
David Hildenbrand9c23a132016-02-17 21:53:33 +01002287 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2288 do {
2289 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2290 /*
2291 * If the writer would ever execute a read in the critical
2292 * section, e.g. in irq context, we have a deadlock.
2293 */
2294 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2295 value = vcpu->arch.sie_block->cputm;
2296 /* if cputm_start is 0, accounting is being started/stopped */
2297 if (likely(vcpu->arch.cputm_start))
2298 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2299 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2300 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002301 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002302}
2303
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002304void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2305{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002306
David Hildenbrand37d9df92015-03-11 16:47:33 +01002307 gmap_enable(vcpu->arch.enabled_gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002308 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002309 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002310 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002311 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002312}
2313
2314void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2315{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002316 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002317 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002318 __stop_cpu_timer_accounting(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002319 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002320 vcpu->arch.enabled_gmap = gmap_get_enabled();
2321 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002322
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002323}
2324
2325static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2326{
2327 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2328 vcpu->arch.sie_block->gpsw.mask = 0UL;
2329 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002330 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002331 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002332 vcpu->arch.sie_block->ckc = 0UL;
2333 vcpu->arch.sie_block->todpr = 0;
2334 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
2335 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
2336 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002337 /* make sure the new fpc will be lazily loaded */
2338 save_fpu_regs();
2339 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002340 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002341 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002342 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2343 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002344 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2345 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002346 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002347}
2348
Dominik Dingel31928aa2014-12-04 15:47:07 +01002349void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002350{
Jason J. Herne72f25022014-11-25 09:46:02 -05002351 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002352 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002353 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02002354 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002355 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002356 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002357 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002358 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002359 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002360 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2361 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002362 /* make vcpu_load load the right gmap on the first trigger */
2363 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002364}
2365
Tony Krowiak5102ee82014-06-27 14:46:01 -04002366static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2367{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002368 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002369 return;
2370
Tony Krowiaka374e892014-09-03 10:13:53 +02002371 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
2372
2373 if (vcpu->kvm->arch.crypto.aes_kw)
2374 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2375 if (vcpu->kvm->arch.crypto.dea_kw)
2376 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
2377
Tony Krowiak5102ee82014-06-27 14:46:01 -04002378 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
2379}
2380
Dominik Dingelb31605c2014-03-25 13:47:11 +01002381void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2382{
2383 free_page(vcpu->arch.sie_block->cbrlo);
2384 vcpu->arch.sie_block->cbrlo = 0;
2385}
2386
2387int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2388{
2389 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2390 if (!vcpu->arch.sie_block->cbrlo)
2391 return -ENOMEM;
2392
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002393 vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002394 return 0;
2395}
2396
Michael Mueller91520f12015-02-27 14:32:11 +01002397static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2398{
2399 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2400
Michael Mueller91520f12015-02-27 14:32:11 +01002401 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002402 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002403 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002404}
2405
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002406int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2407{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002408 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002409
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002410 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2411 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002412 CPUSTAT_STOPPED);
2413
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002414 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002415 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002416 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002417 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002418
Michael Mueller91520f12015-02-27 14:32:11 +01002419 kvm_s390_vcpu_setup_model(vcpu);
2420
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002421 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2422 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002423 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002424 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002425 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002426 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002427 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002428
David Hildenbrand873b4252016-04-04 15:53:47 +02002429 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002430 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002431 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002432 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2433 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002434 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002435 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002436 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002437 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002438 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002439 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002440 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002441 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002442 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002443 vcpu->arch.sie_block->eca |= ECA_VX;
2444 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002445 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04002446 if (test_kvm_facility(vcpu->kvm, 139))
2447 vcpu->arch.sie_block->ecd |= ECD_MEF;
2448
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002449 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2450 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002451 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002452
2453 if (sclp.has_kss)
2454 atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
2455 else
2456 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002457
Dominik Dingele6db1d62015-05-07 15:41:57 +02002458 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002459 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2460 if (rc)
2461 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002462 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01002463 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002464 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002465
Tony Krowiak5102ee82014-06-27 14:46:01 -04002466 kvm_s390_vcpu_crypto_setup(vcpu);
2467
Dominik Dingelb31605c2014-03-25 13:47:11 +01002468 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002469}
2470
2471struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2472 unsigned int id)
2473{
Carsten Otte4d475552011-10-18 12:27:12 +02002474 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002475 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02002476 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002477
David Hildenbrand42158252015-10-12 12:57:22 +02002478 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02002479 goto out;
2480
2481 rc = -ENOMEM;
2482
Michael Muellerb110fea2013-06-12 13:54:54 +02002483 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002484 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02002485 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002486
QingFeng Haoda72ca42017-06-07 11:41:19 +02002487 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002488 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2489 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002490 goto out_free_cpu;
2491
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002492 vcpu->arch.sie_block = &sie_page->sie_block;
2493 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2494
David Hildenbrandefed1102015-04-16 12:32:41 +02002495 /* the real guest size will always be smaller than msl */
2496 vcpu->arch.sie_block->mso = 0;
2497 vcpu->arch.sie_block->msl = sclp.hamax;
2498
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002499 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002500 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002501 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02002502 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01002503 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002504 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002505
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002506 rc = kvm_vcpu_init(vcpu, kvm, id);
2507 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002508 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002509 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002510 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02002511 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002512
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002513 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08002514out_free_sie_block:
2515 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002516out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02002517 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02002518out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002519 return ERR_PTR(rc);
2520}
2521
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002522int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2523{
David Hildenbrand9a022062014-08-05 17:40:47 +02002524 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002525}
2526
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002527bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
2528{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08002529 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002530}
2531
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002532void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002533{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002534 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002535 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002536}
2537
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002538void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002539{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002540 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002541}
2542
Christian Borntraeger8e236542015-04-09 13:49:04 +02002543static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2544{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002545 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002546 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002547}
2548
2549static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2550{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04002551 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002552}
2553
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002554/*
2555 * Kick a guest cpu out of SIE and wait until SIE is not running.
2556 * If the CPU is not running (e.g. waiting as idle) the function will
2557 * return immediately. */
2558void exit_sie(struct kvm_vcpu *vcpu)
2559{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002560 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002561 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2562 cpu_relax();
2563}
2564
Christian Borntraeger8e236542015-04-09 13:49:04 +02002565/* Kick a guest cpu out of SIE to process a request synchronously */
2566void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002567{
Christian Borntraeger8e236542015-04-09 13:49:04 +02002568 kvm_make_request(req, vcpu);
2569 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002570}
2571
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002572static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2573 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002574{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002575 struct kvm *kvm = gmap->private;
2576 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002577 unsigned long prefix;
2578 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002579
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02002580 if (gmap_is_shadow(gmap))
2581 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002582 if (start >= 1UL << 31)
2583 /* We are only interested in prefix pages */
2584 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002585 kvm_for_each_vcpu(i, vcpu, kvm) {
2586 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002587 prefix = kvm_s390_get_prefix(vcpu);
2588 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2589 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2590 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002591 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002592 }
2593 }
2594}
2595
Christoffer Dallb6d33832012-03-08 16:44:24 -05002596int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2597{
2598 /* kvm common code refers to this, but never calls it */
2599 BUG();
2600 return 0;
2601}
2602
Carsten Otte14eebd92012-05-15 14:15:26 +02002603static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2604 struct kvm_one_reg *reg)
2605{
2606 int r = -EINVAL;
2607
2608 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002609 case KVM_REG_S390_TODPR:
2610 r = put_user(vcpu->arch.sie_block->todpr,
2611 (u32 __user *)reg->addr);
2612 break;
2613 case KVM_REG_S390_EPOCHDIFF:
2614 r = put_user(vcpu->arch.sie_block->epoch,
2615 (u64 __user *)reg->addr);
2616 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002617 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002618 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002619 (u64 __user *)reg->addr);
2620 break;
2621 case KVM_REG_S390_CLOCK_COMP:
2622 r = put_user(vcpu->arch.sie_block->ckc,
2623 (u64 __user *)reg->addr);
2624 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002625 case KVM_REG_S390_PFTOKEN:
2626 r = put_user(vcpu->arch.pfault_token,
2627 (u64 __user *)reg->addr);
2628 break;
2629 case KVM_REG_S390_PFCOMPARE:
2630 r = put_user(vcpu->arch.pfault_compare,
2631 (u64 __user *)reg->addr);
2632 break;
2633 case KVM_REG_S390_PFSELECT:
2634 r = put_user(vcpu->arch.pfault_select,
2635 (u64 __user *)reg->addr);
2636 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002637 case KVM_REG_S390_PP:
2638 r = put_user(vcpu->arch.sie_block->pp,
2639 (u64 __user *)reg->addr);
2640 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002641 case KVM_REG_S390_GBEA:
2642 r = put_user(vcpu->arch.sie_block->gbea,
2643 (u64 __user *)reg->addr);
2644 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002645 default:
2646 break;
2647 }
2648
2649 return r;
2650}
2651
2652static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2653 struct kvm_one_reg *reg)
2654{
2655 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002656 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002657
2658 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002659 case KVM_REG_S390_TODPR:
2660 r = get_user(vcpu->arch.sie_block->todpr,
2661 (u32 __user *)reg->addr);
2662 break;
2663 case KVM_REG_S390_EPOCHDIFF:
2664 r = get_user(vcpu->arch.sie_block->epoch,
2665 (u64 __user *)reg->addr);
2666 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002667 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002668 r = get_user(val, (u64 __user *)reg->addr);
2669 if (!r)
2670 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002671 break;
2672 case KVM_REG_S390_CLOCK_COMP:
2673 r = get_user(vcpu->arch.sie_block->ckc,
2674 (u64 __user *)reg->addr);
2675 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002676 case KVM_REG_S390_PFTOKEN:
2677 r = get_user(vcpu->arch.pfault_token,
2678 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002679 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2680 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002681 break;
2682 case KVM_REG_S390_PFCOMPARE:
2683 r = get_user(vcpu->arch.pfault_compare,
2684 (u64 __user *)reg->addr);
2685 break;
2686 case KVM_REG_S390_PFSELECT:
2687 r = get_user(vcpu->arch.pfault_select,
2688 (u64 __user *)reg->addr);
2689 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002690 case KVM_REG_S390_PP:
2691 r = get_user(vcpu->arch.sie_block->pp,
2692 (u64 __user *)reg->addr);
2693 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002694 case KVM_REG_S390_GBEA:
2695 r = get_user(vcpu->arch.sie_block->gbea,
2696 (u64 __user *)reg->addr);
2697 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002698 default:
2699 break;
2700 }
2701
2702 return r;
2703}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002704
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002705static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2706{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002707 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002708 return 0;
2709}
2710
2711int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2712{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002713 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002714 return 0;
2715}
2716
2717int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2718{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002719 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002720 return 0;
2721}
2722
2723int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2724 struct kvm_sregs *sregs)
2725{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002726 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002727 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002728 return 0;
2729}
2730
2731int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2732 struct kvm_sregs *sregs)
2733{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002734 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002735 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002736 return 0;
2737}
2738
2739int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2740{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02002741 if (test_fp_ctl(fpu->fpc))
2742 return -EINVAL;
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002743 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002744 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002745 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
2746 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002747 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002748 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002749 return 0;
2750}
2751
2752int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2753{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002754 /* make sure we have the latest values */
2755 save_fpu_regs();
2756 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002757 convert_vx_to_fp((freg_t *) fpu->fprs,
2758 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002759 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002760 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002761 fpu->fpc = vcpu->run->s.regs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002762 return 0;
2763}
2764
2765static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2766{
2767 int rc = 0;
2768
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02002769 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002770 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002771 else {
2772 vcpu->run->psw_mask = psw.mask;
2773 vcpu->run->psw_addr = psw.addr;
2774 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002775 return rc;
2776}
2777
2778int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2779 struct kvm_translation *tr)
2780{
2781 return -EINVAL; /* not implemented yet */
2782}
2783
David Hildenbrand27291e22014-01-23 12:26:52 +01002784#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2785 KVM_GUESTDBG_USE_HW_BP | \
2786 KVM_GUESTDBG_ENABLE)
2787
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002788int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2789 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002790{
David Hildenbrand27291e22014-01-23 12:26:52 +01002791 int rc = 0;
2792
2793 vcpu->guest_debug = 0;
2794 kvm_s390_clear_bp_data(vcpu);
2795
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02002796 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01002797 return -EINVAL;
David Hildenbrand89b5b4d2015-11-24 13:47:13 +01002798 if (!sclp.has_gpere)
2799 return -EINVAL;
David Hildenbrand27291e22014-01-23 12:26:52 +01002800
2801 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2802 vcpu->guest_debug = dbg->control;
2803 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002804 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002805
2806 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2807 rc = kvm_s390_import_bp_data(vcpu, dbg);
2808 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002809 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002810 vcpu->arch.guestdbg.last_bp = 0;
2811 }
2812
2813 if (rc) {
2814 vcpu->guest_debug = 0;
2815 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002816 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002817 }
2818
2819 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002820}
2821
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002822int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2823 struct kvm_mp_state *mp_state)
2824{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002825 /* CHECK_STOP and LOAD are not supported yet */
2826 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2827 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002828}
2829
2830int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2831 struct kvm_mp_state *mp_state)
2832{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002833 int rc = 0;
2834
2835 /* user space knows about this interface - let it control the state */
2836 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2837
2838 switch (mp_state->mp_state) {
2839 case KVM_MP_STATE_STOPPED:
2840 kvm_s390_vcpu_stop(vcpu);
2841 break;
2842 case KVM_MP_STATE_OPERATING:
2843 kvm_s390_vcpu_start(vcpu);
2844 break;
2845 case KVM_MP_STATE_LOAD:
2846 case KVM_MP_STATE_CHECK_STOP:
2847 /* fall through - CHECK_STOP and LOAD are not supported yet */
2848 default:
2849 rc = -ENXIO;
2850 }
2851
2852 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002853}
2854
David Hildenbrand8ad35752014-03-14 11:00:21 +01002855static bool ibs_enabled(struct kvm_vcpu *vcpu)
2856{
2857 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2858}
2859
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002860static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2861{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002862retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02002863 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02002864 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02002865 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002866 /*
2867 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002868 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002869 * This ensures that the ipte instruction for this request has
2870 * already finished. We might race against a second unmapper that
2871 * wants to set the blocking bit. Lets just retry the request loop.
2872 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01002873 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002874 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002875 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2876 kvm_s390_get_prefix(vcpu),
2877 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02002878 if (rc) {
2879 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002880 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02002881 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002882 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002883 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002884
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002885 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2886 vcpu->arch.sie_block->ihcpu = 0xffff;
2887 goto retry;
2888 }
2889
David Hildenbrand8ad35752014-03-14 11:00:21 +01002890 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2891 if (!ibs_enabled(vcpu)) {
2892 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002893 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002894 &vcpu->arch.sie_block->cpuflags);
2895 }
2896 goto retry;
2897 }
2898
2899 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2900 if (ibs_enabled(vcpu)) {
2901 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002902 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002903 &vcpu->arch.sie_block->cpuflags);
2904 }
2905 goto retry;
2906 }
2907
David Hildenbrand6502a342016-06-21 14:19:51 +02002908 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
2909 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2910 goto retry;
2911 }
2912
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02002913 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
2914 /*
2915 * Disable CMMA virtualization; we will emulate the ESSA
2916 * instruction manually, in order to provide additional
2917 * functionalities needed for live migration.
2918 */
2919 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
2920 goto retry;
2921 }
2922
2923 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
2924 /*
2925 * Re-enable CMMA virtualization if CMMA is available and
2926 * was used.
2927 */
2928 if ((vcpu->kvm->arch.use_cmma) &&
2929 (vcpu->kvm->mm->context.use_cmma))
2930 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
2931 goto retry;
2932 }
2933
David Hildenbrand0759d062014-05-13 16:54:32 +02002934 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02002935 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02002936
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002937 return 0;
2938}
2939
Collin L. Walling8fa16962016-07-26 15:29:44 -04002940void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
2941 const struct kvm_s390_vm_tod_clock *gtod)
2942{
2943 struct kvm_vcpu *vcpu;
2944 struct kvm_s390_tod_clock_ext htod;
2945 int i;
2946
2947 mutex_lock(&kvm->lock);
2948 preempt_disable();
2949
2950 get_tod_clock_ext((char *)&htod);
2951
2952 kvm->arch.epoch = gtod->tod - htod.tod;
2953 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
2954
2955 if (kvm->arch.epoch > gtod->tod)
2956 kvm->arch.epdx -= 1;
2957
2958 kvm_s390_vcpu_block_all(kvm);
2959 kvm_for_each_vcpu(i, vcpu, kvm) {
2960 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2961 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
2962 }
2963
2964 kvm_s390_vcpu_unblock_all(kvm);
2965 preempt_enable();
2966 mutex_unlock(&kvm->lock);
2967}
2968
David Hildenbrand25ed1672015-05-12 09:49:14 +02002969void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2970{
2971 struct kvm_vcpu *vcpu;
2972 int i;
2973
2974 mutex_lock(&kvm->lock);
2975 preempt_disable();
2976 kvm->arch.epoch = tod - get_tod_clock();
2977 kvm_s390_vcpu_block_all(kvm);
2978 kvm_for_each_vcpu(i, vcpu, kvm)
2979 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2980 kvm_s390_vcpu_unblock_all(kvm);
2981 preempt_enable();
2982 mutex_unlock(&kvm->lock);
2983}
2984
Thomas Huthfa576c52014-05-06 17:20:16 +02002985/**
2986 * kvm_arch_fault_in_page - fault-in guest page if necessary
2987 * @vcpu: The corresponding virtual cpu
2988 * @gpa: Guest physical address
2989 * @writable: Whether the page should be writable or not
2990 *
2991 * Make sure that a guest page has been faulted-in on the host.
2992 *
2993 * Return: Zero on success, negative error code otherwise.
2994 */
2995long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002996{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002997 return gmap_fault(vcpu->arch.gmap, gpa,
2998 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002999}
3000
Dominik Dingel3c038e62013-10-07 17:11:48 +02003001static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3002 unsigned long token)
3003{
3004 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003005 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003006
3007 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003008 irq.u.ext.ext_params2 = token;
3009 irq.type = KVM_S390_INT_PFAULT_INIT;
3010 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003011 } else {
3012 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003013 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003014 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3015 }
3016}
3017
3018void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3019 struct kvm_async_pf *work)
3020{
3021 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3022 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3023}
3024
3025void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3026 struct kvm_async_pf *work)
3027{
3028 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3029 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3030}
3031
3032void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3033 struct kvm_async_pf *work)
3034{
3035 /* s390 will always inject the page directly */
3036}
3037
3038bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3039{
3040 /*
3041 * s390 will always inject the page directly,
3042 * but we still want check_async_completion to cleanup
3043 */
3044 return true;
3045}
3046
3047static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3048{
3049 hva_t hva;
3050 struct kvm_arch_async_pf arch;
3051 int rc;
3052
3053 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3054 return 0;
3055 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3056 vcpu->arch.pfault_compare)
3057 return 0;
3058 if (psw_extint_disabled(vcpu))
3059 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003060 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003061 return 0;
3062 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
3063 return 0;
3064 if (!vcpu->arch.gmap->pfault_enabled)
3065 return 0;
3066
Heiko Carstens81480cc2014-01-01 16:36:07 +01003067 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3068 hva += current->thread.gmap_addr & ~PAGE_MASK;
3069 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003070 return 0;
3071
3072 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3073 return rc;
3074}
3075
Thomas Huth3fb4c402013-09-12 10:33:43 +02003076static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003077{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003078 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003079
Dominik Dingel3c038e62013-10-07 17:11:48 +02003080 /*
3081 * On s390 notifications for arriving pages will be delivered directly
3082 * to the guest but the house keeping for completed pfaults is
3083 * handled outside the worker.
3084 */
3085 kvm_check_async_pf_completion(vcpu);
3086
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003087 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3088 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003089
3090 if (need_resched())
3091 schedule();
3092
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003093 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003094 s390_handle_mcck();
3095
Jens Freimann79395032014-04-17 10:10:30 +02003096 if (!kvm_is_ucontrol(vcpu->kvm)) {
3097 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3098 if (rc)
3099 return rc;
3100 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003101
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003102 rc = kvm_s390_handle_requests(vcpu);
3103 if (rc)
3104 return rc;
3105
David Hildenbrand27291e22014-01-23 12:26:52 +01003106 if (guestdbg_enabled(vcpu)) {
3107 kvm_s390_backup_guest_per_regs(vcpu);
3108 kvm_s390_patch_guest_per_regs(vcpu);
3109 }
3110
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003111 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003112 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3113 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3114 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003115
Thomas Huth3fb4c402013-09-12 10:33:43 +02003116 return 0;
3117}
3118
Thomas Huth492d8642015-02-10 16:11:01 +01003119static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3120{
David Hildenbrand56317922016-01-12 17:37:58 +01003121 struct kvm_s390_pgm_info pgm_info = {
3122 .code = PGM_ADDRESSING,
3123 };
3124 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003125 int rc;
3126
3127 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3128 trace_kvm_s390_sie_fault(vcpu);
3129
3130 /*
3131 * We want to inject an addressing exception, which is defined as a
3132 * suppressing or terminating exception. However, since we came here
3133 * by a DAT access exception, the PSW still points to the faulting
3134 * instruction since DAT exceptions are nullifying. So we've got
3135 * to look up the current opcode to get the length of the instruction
3136 * to be able to forward the PSW.
3137 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003138 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003139 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003140 if (rc < 0) {
3141 return rc;
3142 } else if (rc) {
3143 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3144 * Forward by arbitrary ilc, injection will take care of
3145 * nullification if necessary.
3146 */
3147 pgm_info = vcpu->arch.pgm;
3148 ilen = 4;
3149 }
David Hildenbrand56317922016-01-12 17:37:58 +01003150 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3151 kvm_s390_forward_psw(vcpu, ilen);
3152 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003153}
3154
Thomas Huth3fb4c402013-09-12 10:33:43 +02003155static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3156{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003157 struct mcck_volatile_info *mcck_info;
3158 struct sie_page *sie_page;
3159
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003160 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3161 vcpu->arch.sie_block->icptcode);
3162 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3163
David Hildenbrand27291e22014-01-23 12:26:52 +01003164 if (guestdbg_enabled(vcpu))
3165 kvm_s390_restore_guest_per_regs(vcpu);
3166
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003167 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3168 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003169
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003170 if (exit_reason == -EINTR) {
3171 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3172 sie_page = container_of(vcpu->arch.sie_block,
3173 struct sie_page, sie_block);
3174 mcck_info = &sie_page->mcck_info;
3175 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3176 return 0;
3177 }
3178
David Hildenbrand71f116b2015-10-19 16:24:28 +02003179 if (vcpu->arch.sie_block->icptcode > 0) {
3180 int rc = kvm_handle_sie_intercept(vcpu);
3181
3182 if (rc != -EOPNOTSUPP)
3183 return rc;
3184 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3185 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3186 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3187 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3188 return -EREMOTE;
3189 } else if (exit_reason != -EFAULT) {
3190 vcpu->stat.exit_null++;
3191 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003192 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3193 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3194 vcpu->run->s390_ucontrol.trans_exc_code =
3195 current->thread.gmap_addr;
3196 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003197 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003198 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003199 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003200 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003201 if (kvm_arch_setup_async_pf(vcpu))
3202 return 0;
3203 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003204 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003205 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003206}
3207
3208static int __vcpu_run(struct kvm_vcpu *vcpu)
3209{
3210 int rc, exit_reason;
3211
Thomas Huth800c1062013-09-12 10:33:45 +02003212 /*
3213 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3214 * ning the guest), so that memslots (and other stuff) are protected
3215 */
3216 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3217
Thomas Hutha76ccff2013-09-12 10:33:44 +02003218 do {
3219 rc = vcpu_pre_run(vcpu);
3220 if (rc)
3221 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003222
Thomas Huth800c1062013-09-12 10:33:45 +02003223 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003224 /*
3225 * As PF_VCPU will be used in fault handler, between
3226 * guest_enter and guest_exit should be no uaccess.
3227 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003228 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003229 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003230 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003231 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003232 exit_reason = sie64a(vcpu->arch.sie_block,
3233 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003234 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003235 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003236 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003237 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003238 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003239
Thomas Hutha76ccff2013-09-12 10:33:44 +02003240 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003241 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003242
Thomas Huth800c1062013-09-12 10:33:45 +02003243 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003244 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003245}
3246
David Hildenbrandb028ee32014-07-17 10:47:43 +02003247static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3248{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003249 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003250 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003251
3252 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003253 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003254 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3255 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3256 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3257 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3258 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3259 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003260 /* some control register changes require a tlb flush */
3261 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003262 }
3263 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003264 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003265 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3266 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3267 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3268 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3269 }
3270 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3271 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3272 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3273 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003274 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3275 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003276 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003277 /*
3278 * If userspace sets the riccb (e.g. after migration) to a valid state,
3279 * we should enable RI here instead of doing the lazy enablement.
3280 */
3281 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003282 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003283 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003284 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003285 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003286 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003287 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003288 /*
3289 * If userspace sets the gscb (e.g. after migration) to non-zero,
3290 * we should enable GS here instead of doing the lazy enablement.
3291 */
3292 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3293 test_kvm_facility(vcpu->kvm, 133) &&
3294 gscb->gssm &&
3295 !vcpu->arch.gs_enabled) {
3296 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3297 vcpu->arch.sie_block->ecb |= ECB_GS;
3298 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3299 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003300 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003301 save_access_regs(vcpu->arch.host_acrs);
3302 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003303 /* save host (userspace) fprs/vrs */
3304 save_fpu_regs();
3305 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3306 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3307 if (MACHINE_HAS_VX)
3308 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3309 else
3310 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3311 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3312 if (test_fp_ctl(current->thread.fpu.fpc))
3313 /* User space provided an invalid FPC, let's clear it */
3314 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003315 if (MACHINE_HAS_GS) {
3316 preempt_disable();
3317 __ctl_set_bit(2, 4);
3318 if (current->thread.gs_cb) {
3319 vcpu->arch.host_gscb = current->thread.gs_cb;
3320 save_gs_cb(vcpu->arch.host_gscb);
3321 }
3322 if (vcpu->arch.gs_enabled) {
3323 current->thread.gs_cb = (struct gs_cb *)
3324 &vcpu->run->s.regs.gscb;
3325 restore_gs_cb(current->thread.gs_cb);
3326 }
3327 preempt_enable();
3328 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003329
David Hildenbrandb028ee32014-07-17 10:47:43 +02003330 kvm_run->kvm_dirty_regs = 0;
3331}
3332
3333static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3334{
3335 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3336 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3337 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3338 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003339 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003340 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3341 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3342 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3343 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3344 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3345 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3346 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003347 save_access_regs(vcpu->run->s.regs.acrs);
3348 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003349 /* Save guest register state */
3350 save_fpu_regs();
3351 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3352 /* Restore will be done lazily at return */
3353 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3354 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003355 if (MACHINE_HAS_GS) {
3356 __ctl_set_bit(2, 4);
3357 if (vcpu->arch.gs_enabled)
3358 save_gs_cb(current->thread.gs_cb);
3359 preempt_disable();
3360 current->thread.gs_cb = vcpu->arch.host_gscb;
3361 restore_gs_cb(vcpu->arch.host_gscb);
3362 preempt_enable();
3363 if (!vcpu->arch.host_gscb)
3364 __ctl_clear_bit(2, 4);
3365 vcpu->arch.host_gscb = NULL;
3366 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003367
David Hildenbrandb028ee32014-07-17 10:47:43 +02003368}
3369
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003370int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3371{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003372 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003373 sigset_t sigsaved;
3374
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003375 if (kvm_run->immediate_exit)
3376 return -EINTR;
3377
David Hildenbrand27291e22014-01-23 12:26:52 +01003378 if (guestdbg_exit_pending(vcpu)) {
3379 kvm_s390_prepare_debug_exit(vcpu);
3380 return 0;
3381 }
3382
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003383 if (vcpu->sigset_active)
3384 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3385
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003386 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3387 kvm_s390_vcpu_start(vcpu);
3388 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003389 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003390 vcpu->vcpu_id);
3391 return -EINVAL;
3392 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003393
David Hildenbrandb028ee32014-07-17 10:47:43 +02003394 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003395 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003396
Heiko Carstensdab4079d2009-06-12 10:26:32 +02003397 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003398 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02003399
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003400 if (signal_pending(current) && !rc) {
3401 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003402 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003403 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003404
David Hildenbrand27291e22014-01-23 12:26:52 +01003405 if (guestdbg_exit_pending(vcpu) && !rc) {
3406 kvm_s390_prepare_debug_exit(vcpu);
3407 rc = 0;
3408 }
3409
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003410 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02003411 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003412 rc = 0;
3413 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003414
David Hildenbranddb0758b2016-02-15 09:42:25 +01003415 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003416 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003417
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003418 if (vcpu->sigset_active)
3419 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3420
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003421 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02003422 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003423}
3424
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003425/*
3426 * store status at address
3427 * we use have two special cases:
3428 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3429 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3430 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01003431int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003432{
Carsten Otte092670c2011-07-24 10:48:22 +02003433 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003434 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02003435 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01003436 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003437 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003438
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003439 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01003440 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
3441 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003442 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003443 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003444 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
3445 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003446 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003447 gpa = px;
3448 } else
3449 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003450
3451 /* manually convert vector registers if necessary */
3452 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01003453 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003454 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3455 fprs, 128);
3456 } else {
3457 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01003458 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003459 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003460 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003461 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003462 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003463 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003464 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02003465 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003466 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003467 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003468 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003469 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01003470 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003471 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01003472 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01003473 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003474 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003475 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003476 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003477 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003478 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003479 &vcpu->arch.sie_block->gcr, 128);
3480 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003481}
3482
Thomas Huthe8798922013-11-06 15:46:33 +01003483int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
3484{
3485 /*
3486 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003487 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01003488 * it into the save area
3489 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02003490 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003491 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01003492 save_access_regs(vcpu->run->s.regs.acrs);
3493
3494 return kvm_s390_store_status_unloaded(vcpu, addr);
3495}
3496
David Hildenbrand8ad35752014-03-14 11:00:21 +01003497static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3498{
3499 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003500 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003501}
3502
3503static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
3504{
3505 unsigned int i;
3506 struct kvm_vcpu *vcpu;
3507
3508 kvm_for_each_vcpu(i, vcpu, kvm) {
3509 __disable_ibs_on_vcpu(vcpu);
3510 }
3511}
3512
3513static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3514{
David Hildenbrand09a400e2016-04-04 15:57:08 +02003515 if (!sclp.has_ibs)
3516 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01003517 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003518 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003519}
3520
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003521void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
3522{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003523 int i, online_vcpus, started_vcpus = 0;
3524
3525 if (!is_vcpu_stopped(vcpu))
3526 return;
3527
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003528 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003529 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003530 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003531 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3532
3533 for (i = 0; i < online_vcpus; i++) {
3534 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
3535 started_vcpus++;
3536 }
3537
3538 if (started_vcpus == 0) {
3539 /* we're the only active VCPU -> speed it up */
3540 __enable_ibs_on_vcpu(vcpu);
3541 } else if (started_vcpus == 1) {
3542 /*
3543 * As we are starting a second VCPU, we have to disable
3544 * the IBS facility on all VCPUs to remove potentially
3545 * oustanding ENABLE requests.
3546 */
3547 __disable_ibs_on_all_vcpus(vcpu->kvm);
3548 }
3549
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003550 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003551 /*
3552 * Another VCPU might have used IBS while we were offline.
3553 * Let's play safe and flush the VCPU at startup.
3554 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003555 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003556 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003557 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003558}
3559
3560void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
3561{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003562 int i, online_vcpus, started_vcpus = 0;
3563 struct kvm_vcpu *started_vcpu = NULL;
3564
3565 if (is_vcpu_stopped(vcpu))
3566 return;
3567
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003568 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003569 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003570 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003571 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3572
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003573 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02003574 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003575
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003576 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003577 __disable_ibs_on_vcpu(vcpu);
3578
3579 for (i = 0; i < online_vcpus; i++) {
3580 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3581 started_vcpus++;
3582 started_vcpu = vcpu->kvm->vcpus[i];
3583 }
3584 }
3585
3586 if (started_vcpus == 1) {
3587 /*
3588 * As we only have one VCPU left, we want to enable the
3589 * IBS facility for that VCPU to speed it up.
3590 */
3591 __enable_ibs_on_vcpu(started_vcpu);
3592 }
3593
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003594 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003595 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003596}
3597
Cornelia Huckd6712df2012-12-20 15:32:11 +01003598static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3599 struct kvm_enable_cap *cap)
3600{
3601 int r;
3602
3603 if (cap->flags)
3604 return -EINVAL;
3605
3606 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003607 case KVM_CAP_S390_CSS_SUPPORT:
3608 if (!vcpu->kvm->arch.css_support) {
3609 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02003610 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003611 trace_kvm_s390_enable_css(vcpu->kvm);
3612 }
3613 r = 0;
3614 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01003615 default:
3616 r = -EINVAL;
3617 break;
3618 }
3619 return r;
3620}
3621
Thomas Huth41408c282015-02-06 15:01:21 +01003622static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3623 struct kvm_s390_mem_op *mop)
3624{
3625 void __user *uaddr = (void __user *)mop->buf;
3626 void *tmpbuf = NULL;
3627 int r, srcu_idx;
3628 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3629 | KVM_S390_MEMOP_F_CHECK_ONLY;
3630
3631 if (mop->flags & ~supported_flags)
3632 return -EINVAL;
3633
3634 if (mop->size > MEM_OP_MAX_SIZE)
3635 return -E2BIG;
3636
3637 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3638 tmpbuf = vmalloc(mop->size);
3639 if (!tmpbuf)
3640 return -ENOMEM;
3641 }
3642
3643 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3644
3645 switch (mop->op) {
3646 case KVM_S390_MEMOP_LOGICAL_READ:
3647 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003648 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3649 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01003650 break;
3651 }
3652 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3653 if (r == 0) {
3654 if (copy_to_user(uaddr, tmpbuf, mop->size))
3655 r = -EFAULT;
3656 }
3657 break;
3658 case KVM_S390_MEMOP_LOGICAL_WRITE:
3659 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003660 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3661 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01003662 break;
3663 }
3664 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3665 r = -EFAULT;
3666 break;
3667 }
3668 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3669 break;
3670 default:
3671 r = -EINVAL;
3672 }
3673
3674 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3675
3676 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3677 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3678
3679 vfree(tmpbuf);
3680 return r;
3681}
3682
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003683long kvm_arch_vcpu_ioctl(struct file *filp,
3684 unsigned int ioctl, unsigned long arg)
3685{
3686 struct kvm_vcpu *vcpu = filp->private_data;
3687 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02003688 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03003689 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003690
Avi Kivity93736622010-05-13 12:35:17 +03003691 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01003692 case KVM_S390_IRQ: {
3693 struct kvm_s390_irq s390irq;
3694
3695 r = -EFAULT;
3696 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3697 break;
3698 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3699 break;
3700 }
Avi Kivity93736622010-05-13 12:35:17 +03003701 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01003702 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02003703 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003704
Avi Kivity93736622010-05-13 12:35:17 +03003705 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003706 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03003707 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02003708 if (s390int_to_s390irq(&s390int, &s390irq))
3709 return -EINVAL;
3710 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity93736622010-05-13 12:35:17 +03003711 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003712 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003713 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02003714 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003715 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02003716 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003717 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003718 case KVM_S390_SET_INITIAL_PSW: {
3719 psw_t psw;
3720
Avi Kivitybc923cc2010-05-13 12:21:46 +03003721 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003722 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03003723 break;
3724 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3725 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003726 }
3727 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03003728 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3729 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003730 case KVM_SET_ONE_REG:
3731 case KVM_GET_ONE_REG: {
3732 struct kvm_one_reg reg;
3733 r = -EFAULT;
3734 if (copy_from_user(&reg, argp, sizeof(reg)))
3735 break;
3736 if (ioctl == KVM_SET_ONE_REG)
3737 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3738 else
3739 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3740 break;
3741 }
Carsten Otte27e03932012-01-04 10:25:21 +01003742#ifdef CONFIG_KVM_S390_UCONTROL
3743 case KVM_S390_UCAS_MAP: {
3744 struct kvm_s390_ucas_mapping ucasmap;
3745
3746 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3747 r = -EFAULT;
3748 break;
3749 }
3750
3751 if (!kvm_is_ucontrol(vcpu->kvm)) {
3752 r = -EINVAL;
3753 break;
3754 }
3755
3756 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3757 ucasmap.vcpu_addr, ucasmap.length);
3758 break;
3759 }
3760 case KVM_S390_UCAS_UNMAP: {
3761 struct kvm_s390_ucas_mapping ucasmap;
3762
3763 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3764 r = -EFAULT;
3765 break;
3766 }
3767
3768 if (!kvm_is_ucontrol(vcpu->kvm)) {
3769 r = -EINVAL;
3770 break;
3771 }
3772
3773 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3774 ucasmap.length);
3775 break;
3776 }
3777#endif
Carsten Otteccc79102012-01-04 10:25:26 +01003778 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003779 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01003780 break;
3781 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01003782 case KVM_ENABLE_CAP:
3783 {
3784 struct kvm_enable_cap cap;
3785 r = -EFAULT;
3786 if (copy_from_user(&cap, argp, sizeof(cap)))
3787 break;
3788 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3789 break;
3790 }
Thomas Huth41408c282015-02-06 15:01:21 +01003791 case KVM_S390_MEM_OP: {
3792 struct kvm_s390_mem_op mem_op;
3793
3794 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3795 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3796 else
3797 r = -EFAULT;
3798 break;
3799 }
Jens Freimann816c7662014-11-24 17:13:46 +01003800 case KVM_S390_SET_IRQ_STATE: {
3801 struct kvm_s390_irq_state irq_state;
3802
3803 r = -EFAULT;
3804 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3805 break;
3806 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3807 irq_state.len == 0 ||
3808 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3809 r = -EINVAL;
3810 break;
3811 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003812 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01003813 r = kvm_s390_set_irq_state(vcpu,
3814 (void __user *) irq_state.buf,
3815 irq_state.len);
3816 break;
3817 }
3818 case KVM_S390_GET_IRQ_STATE: {
3819 struct kvm_s390_irq_state irq_state;
3820
3821 r = -EFAULT;
3822 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3823 break;
3824 if (irq_state.len == 0) {
3825 r = -EINVAL;
3826 break;
3827 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003828 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01003829 r = kvm_s390_get_irq_state(vcpu,
3830 (__u8 __user *) irq_state.buf,
3831 irq_state.len);
3832 break;
3833 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003834 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01003835 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003836 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03003837 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003838}
3839
Carsten Otte5b1c1492012-01-04 10:25:23 +01003840int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3841{
3842#ifdef CONFIG_KVM_S390_UCONTROL
3843 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3844 && (kvm_is_ucontrol(vcpu->kvm))) {
3845 vmf->page = virt_to_page(vcpu->arch.sie_block);
3846 get_page(vmf->page);
3847 return 0;
3848 }
3849#endif
3850 return VM_FAULT_SIGBUS;
3851}
3852
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05303853int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3854 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09003855{
3856 return 0;
3857}
3858
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003859/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003860int kvm_arch_prepare_memory_region(struct kvm *kvm,
3861 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003862 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09003863 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003864{
Nick Wangdd2887e2013-03-25 17:22:57 +01003865 /* A few sanity checks. We can have memory slots which have to be
3866 located/ended at a segment boundary (1MB). The memory in userland is
3867 ok to be fragmented into various different vmas. It is okay to mmap()
3868 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003869
Carsten Otte598841c2011-07-24 10:48:21 +02003870 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003871 return -EINVAL;
3872
Carsten Otte598841c2011-07-24 10:48:21 +02003873 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003874 return -EINVAL;
3875
Dominik Dingela3a92c32014-12-01 17:24:42 +01003876 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3877 return -EINVAL;
3878
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003879 return 0;
3880}
3881
3882void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003883 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003884 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02003885 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003886 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003887{
Carsten Ottef7850c92011-07-24 10:48:23 +02003888 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003889
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01003890 /* If the basics of the memslot do not change, we do not want
3891 * to update the gmap. Every update causes several unnecessary
3892 * segment translation exceptions. This is usually handled just
3893 * fine by the normal fault handler + gmap, but it will also
3894 * cause faults on the prefix page of running guest CPUs.
3895 */
3896 if (old->userspace_addr == mem->userspace_addr &&
3897 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3898 old->npages * PAGE_SIZE == mem->memory_size)
3899 return;
Carsten Otte598841c2011-07-24 10:48:21 +02003900
3901 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3902 mem->guest_phys_addr, mem->memory_size);
3903 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003904 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02003905 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003906}
3907
Alexander Yarygin60a37702016-04-01 15:38:57 +03003908static inline unsigned long nonhyp_mask(int i)
3909{
3910 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3911
3912 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3913}
3914
Christian Borntraeger3491caf2016-05-13 12:16:35 +02003915void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3916{
3917 vcpu->valid_wakeup = false;
3918}
3919
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003920static int __init kvm_s390_init(void)
3921{
Alexander Yarygin60a37702016-04-01 15:38:57 +03003922 int i;
3923
David Hildenbrand07197fd2015-01-30 16:01:38 +01003924 if (!sclp.has_sief2) {
3925 pr_info("SIE not available\n");
3926 return -ENODEV;
3927 }
3928
Alexander Yarygin60a37702016-04-01 15:38:57 +03003929 for (i = 0; i < 16; i++)
3930 kvm_s390_fac_list_mask[i] |=
3931 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3932
Michael Mueller9d8d5782015-02-02 15:42:51 +01003933 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003934}
3935
3936static void __exit kvm_s390_exit(void)
3937{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003938 kvm_exit();
3939}
3940
3941module_init(kvm_s390_init);
3942module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02003943
3944/*
3945 * Enable autoloading of the kvm module.
3946 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3947 * since x86 takes a different approach.
3948 */
3949#include <linux/miscdevice.h>
3950MODULE_ALIAS_MISCDEV(KVM_MINOR);
3951MODULE_ALIAS("devname:kvm");