blob: 44317813f498244bf8e2478063ea437bbbbc15a9 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Christian Borntraegerbb64da92017-11-21 16:02:52 +01005 * Copyright IBM Corp. 2008, 2017
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
14#include <linux/compiler.h>
15#include <linux/err.h>
16#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020017#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010018#include <linux/init.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010021#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010022#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050023#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020024#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010026#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010027#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010028#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010029#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020030#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010031
Heiko Carstenscbb870c2010-02-26 22:37:43 +010032#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010033#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020034#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010036#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010037#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010038#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020039#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020040#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020041#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040042#include <asm/timex.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010043#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010044#include "gaccess.h"
45
David Hildenbrandea2cdd22015-05-20 13:24:02 +020046#define KMSG_COMPONENT "kvm-s390"
47#undef pr_fmt
48#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
49
Cornelia Huck5786fff2012-07-23 17:20:29 +020050#define CREATE_TRACE_POINTS
51#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020052#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020053
Thomas Huth41408c282015-02-06 15:01:21 +010054#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010055#define LOCAL_IRQS 32
56#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010058
Heiko Carstensb0c632d2008-03-25 18:47:20 +010059#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
60
61struct kvm_stats_debugfs_item debugfs_entries[] = {
62 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020063 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010064 { "exit_validity", VCPU_STAT(exit_validity) },
65 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
66 { "exit_external_request", VCPU_STAT(exit_external_request) },
67 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010068 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030069 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
71 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020072 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010073 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020074 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020075 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020076 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020077 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010078 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010079 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
80 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020082 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010083 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
84 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
85 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
86 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
87 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
88 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
89 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020090 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010091 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
92 { "instruction_spx", VCPU_STAT(instruction_spx) },
93 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
94 { "instruction_stap", VCPU_STAT(instruction_stap) },
95 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010096 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010097 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
98 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020099 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100100 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
101 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200102 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200103 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200104 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100105 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100106 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200107 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100108 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200109 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
110 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100111 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200112 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
113 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500114 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100115 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
116 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
117 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200118 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
119 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
120 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100121 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100122 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200123 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200124 { "diagnose_258", VCPU_STAT(diagnose_258) },
125 { "diagnose_308", VCPU_STAT(diagnose_308) },
126 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100127 { NULL }
128};
129
Collin L. Walling8fa16962016-07-26 15:29:44 -0400130struct kvm_s390_tod_clock_ext {
131 __u8 epoch_idx;
132 __u64 tod;
133 __u8 reserved[7];
134} __packed;
135
David Hildenbranda411edf2016-02-02 15:41:22 +0100136/* allow nested virtualization in KVM (if enabled by user space) */
137static int nested;
138module_param(nested, int, S_IRUGO);
139MODULE_PARM_DESC(nested, "Nested virtualization support");
140
Michael Mueller9d8d5782015-02-02 15:42:51 +0100141/* upper facilities limit for kvm */
Heiko Carstensf6c1d352016-08-16 10:31:10 +0200142unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100143
Michael Mueller9d8d5782015-02-02 15:42:51 +0100144unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200145{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100146 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
147 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b592013-07-26 15:04:04 +0200148}
149
David Hildenbrand15c97052015-03-19 17:36:43 +0100150/* available cpu features supported by kvm */
151static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200152/* available subfunctions indicated via query / "test bit" */
153static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100154
Michael Mueller9d8d5782015-02-02 15:42:51 +0100155static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200156static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200157debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100158
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100159/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200160int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100161{
162 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200163 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100164}
165
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100166static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
167 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200168
Fan Zhangfdf03652015-05-13 10:58:41 +0200169/*
170 * This callback is executed during stop_machine(). All CPUs are therefore
171 * temporarily stopped. In order not to change guest behavior, we have to
172 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
173 * so a CPU won't be stopped while calculating with the epoch.
174 */
175static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
176 void *v)
177{
178 struct kvm *kvm;
179 struct kvm_vcpu *vcpu;
180 int i;
181 unsigned long long *delta = v;
182
183 list_for_each_entry(kvm, &vm_list, vm_list) {
184 kvm->arch.epoch -= *delta;
185 kvm_for_each_vcpu(i, vcpu, kvm) {
186 vcpu->arch.sie_block->epoch -= *delta;
David Hildenbranddb0758b2016-02-15 09:42:25 +0100187 if (vcpu->arch.cputm_enabled)
188 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100189 if (vcpu->arch.vsie_block)
190 vcpu->arch.vsie_block->epoch -= *delta;
Fan Zhangfdf03652015-05-13 10:58:41 +0200191 }
192 }
193 return NOTIFY_OK;
194}
195
196static struct notifier_block kvm_clock_notifier = {
197 .notifier_call = kvm_clock_sync,
198};
199
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100200int kvm_arch_hardware_setup(void)
201{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200202 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100203 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200204 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
205 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200206 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
207 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100208 return 0;
209}
210
211void kvm_arch_hardware_unsetup(void)
212{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100213 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200214 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200215 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
216 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100217}
218
David Hildenbrand22be5a132016-01-21 13:22:54 +0100219static void allow_cpu_feat(unsigned long nr)
220{
221 set_bit_inv(nr, kvm_s390_available_cpu_feat);
222}
223
David Hildenbrand0a763c72016-05-18 16:03:47 +0200224static inline int plo_test_bit(unsigned char nr)
225{
226 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100227 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200228
229 asm volatile(
230 /* Parameter registers are ignored for "test bit" */
231 " plo 0,0,0,0(0)\n"
232 " ipm %0\n"
233 " srl %0,28\n"
234 : "=d" (cc)
235 : "d" (r0)
236 : "cc");
237 return cc == 0;
238}
239
David Hildenbrand22be5a132016-01-21 13:22:54 +0100240static void kvm_s390_cpu_feat_init(void)
241{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200242 int i;
243
244 for (i = 0; i < 256; ++i) {
245 if (plo_test_bit(i))
246 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
247 }
248
249 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400250 ptff(kvm_s390_available_subfunc.ptff,
251 sizeof(kvm_s390_available_subfunc.ptff),
252 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200253
254 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200255 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
256 kvm_s390_available_subfunc.kmac);
257 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
258 kvm_s390_available_subfunc.kmc);
259 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
260 kvm_s390_available_subfunc.km);
261 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
262 kvm_s390_available_subfunc.kimd);
263 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
264 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200265 }
266 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200267 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
268 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200269 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200270 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
271 kvm_s390_available_subfunc.kmctr);
272 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
273 kvm_s390_available_subfunc.kmf);
274 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
275 kvm_s390_available_subfunc.kmo);
276 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
277 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200278 }
279 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100280 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200281 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200282
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400283 if (test_facility(146)) /* MSA8 */
284 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
285 kvm_s390_available_subfunc.kma);
286
David Hildenbrand22be5a132016-01-21 13:22:54 +0100287 if (MACHINE_HAS_ESOP)
288 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200289 /*
290 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
291 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
292 */
293 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100294 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200295 return;
296 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100297 if (sclp.has_64bscao)
298 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100299 if (sclp.has_siif)
300 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100301 if (sclp.has_gpere)
302 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100303 if (sclp.has_gsls)
304 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100305 if (sclp.has_ib)
306 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100307 if (sclp.has_cei)
308 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100309 if (sclp.has_ibs)
310 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500311 if (sclp.has_kss)
312 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200313 /*
314 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
315 * all skey handling functions read/set the skey from the PGSTE
316 * instead of the real storage key.
317 *
318 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
319 * pages being detected as preserved although they are resident.
320 *
321 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
322 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
323 *
324 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
325 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
326 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
327 *
328 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
329 * cannot easily shadow the SCA because of the ipte lock.
330 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100331}
332
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100333int kvm_arch_init(void *opaque)
334{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200335 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
336 if (!kvm_s390_dbf)
337 return -ENOMEM;
338
339 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
340 debug_unregister(kvm_s390_dbf);
341 return -ENOMEM;
342 }
343
David Hildenbrand22be5a132016-01-21 13:22:54 +0100344 kvm_s390_cpu_feat_init();
345
Cornelia Huck84877d92014-09-02 10:27:35 +0100346 /* Register floating interrupt controller interface. */
347 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100348}
349
Christian Borntraeger78f26132015-07-22 15:50:58 +0200350void kvm_arch_exit(void)
351{
352 debug_unregister(kvm_s390_dbf);
353}
354
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100355/* Section: device related */
356long kvm_arch_dev_ioctl(struct file *filp,
357 unsigned int ioctl, unsigned long arg)
358{
359 if (ioctl == KVM_S390_ENABLE_SIE)
360 return s390_enable_sie();
361 return -EINVAL;
362}
363
Alexander Graf784aa3d2014-07-14 18:27:35 +0200364int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100365{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100366 int r;
367
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200368 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100369 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200370 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100371 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100372#ifdef CONFIG_KVM_S390_UCONTROL
373 case KVM_CAP_S390_UCONTROL:
374#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200375 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100376 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200377 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100378 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100379 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100380 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200381 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200382 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200383 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200384 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200385 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100386 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100387 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200388 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100389 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400390 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100391 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200392 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200393 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100394 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100395 case KVM_CAP_S390_AIS_MIGRATION:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100396 r = 1;
397 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100398 case KVM_CAP_S390_MEM_OP:
399 r = MEM_OP_MAX_SIZE;
400 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200401 case KVM_CAP_NR_VCPUS:
402 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100403 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200404 if (!kvm_s390_use_sca_entries())
405 r = KVM_MAX_VCPUS;
406 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100407 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200408 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100409 case KVM_CAP_NR_MEMSLOTS:
410 r = KVM_USER_MEM_SLOTS;
411 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200412 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100413 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200414 break;
Eric Farman68c55752014-06-09 10:57:26 -0400415 case KVM_CAP_S390_VECTOR_REGISTERS:
416 r = MACHINE_HAS_VX;
417 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800418 case KVM_CAP_S390_RI:
419 r = test_facility(64);
420 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100421 case KVM_CAP_S390_GS:
422 r = test_facility(133);
423 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200424 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100425 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200426 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100427 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100428}
429
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400430static void kvm_s390_sync_dirty_log(struct kvm *kvm,
431 struct kvm_memory_slot *memslot)
432{
433 gfn_t cur_gfn, last_gfn;
434 unsigned long address;
435 struct gmap *gmap = kvm->arch.gmap;
436
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400437 /* Loop over all guest pages */
438 last_gfn = memslot->base_gfn + memslot->npages;
439 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
440 address = gfn_to_hva_memslot(memslot, cur_gfn);
441
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100442 if (test_and_clear_guest_dirty(gmap->mm, address))
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400443 mark_page_dirty(kvm, cur_gfn);
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100444 if (fatal_signal_pending(current))
445 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100446 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400447 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400448}
449
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100450/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200451static void sca_del_vcpu(struct kvm_vcpu *vcpu);
452
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100453/*
454 * Get (and clear) the dirty memory log for a memory slot.
455 */
456int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
457 struct kvm_dirty_log *log)
458{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400459 int r;
460 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200461 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400462 struct kvm_memory_slot *memslot;
463 int is_dirty = 0;
464
Janosch Franke1e8a962017-02-02 16:39:31 +0100465 if (kvm_is_ucontrol(kvm))
466 return -EINVAL;
467
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400468 mutex_lock(&kvm->slots_lock);
469
470 r = -EINVAL;
471 if (log->slot >= KVM_USER_MEM_SLOTS)
472 goto out;
473
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200474 slots = kvm_memslots(kvm);
475 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400476 r = -ENOENT;
477 if (!memslot->dirty_bitmap)
478 goto out;
479
480 kvm_s390_sync_dirty_log(kvm, memslot);
481 r = kvm_get_dirty_log(kvm, log, &is_dirty);
482 if (r)
483 goto out;
484
485 /* Clear the dirty log */
486 if (is_dirty) {
487 n = kvm_dirty_bitmap_bytes(memslot);
488 memset(memslot->dirty_bitmap, 0, n);
489 }
490 r = 0;
491out:
492 mutex_unlock(&kvm->slots_lock);
493 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100494}
495
David Hildenbrand6502a342016-06-21 14:19:51 +0200496static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
497{
498 unsigned int i;
499 struct kvm_vcpu *vcpu;
500
501 kvm_for_each_vcpu(i, vcpu, kvm) {
502 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
503 }
504}
505
Cornelia Huckd938dc52013-10-23 18:26:34 +0200506static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
507{
508 int r;
509
510 if (cap->flags)
511 return -EINVAL;
512
513 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200514 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200515 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200516 kvm->arch.use_irqchip = 1;
517 r = 0;
518 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200519 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200520 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200521 kvm->arch.user_sigp = 1;
522 r = 0;
523 break;
Eric Farman68c55752014-06-09 10:57:26 -0400524 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100525 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200526 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100527 r = -EBUSY;
528 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100529 set_kvm_facility(kvm->arch.model.fac_mask, 129);
530 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200531 if (test_facility(134)) {
532 set_kvm_facility(kvm->arch.model.fac_mask, 134);
533 set_kvm_facility(kvm->arch.model.fac_list, 134);
534 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100535 if (test_facility(135)) {
536 set_kvm_facility(kvm->arch.model.fac_mask, 135);
537 set_kvm_facility(kvm->arch.model.fac_list, 135);
538 }
Michael Mueller18280d82015-03-16 16:05:41 +0100539 r = 0;
540 } else
541 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100542 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200543 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
544 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400545 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800546 case KVM_CAP_S390_RI:
547 r = -EINVAL;
548 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200549 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800550 r = -EBUSY;
551 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100552 set_kvm_facility(kvm->arch.model.fac_mask, 64);
553 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800554 r = 0;
555 }
556 mutex_unlock(&kvm->lock);
557 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
558 r ? "(not available)" : "(success)");
559 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100560 case KVM_CAP_S390_AIS:
561 mutex_lock(&kvm->lock);
562 if (kvm->created_vcpus) {
563 r = -EBUSY;
564 } else {
565 set_kvm_facility(kvm->arch.model.fac_mask, 72);
566 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100567 r = 0;
568 }
569 mutex_unlock(&kvm->lock);
570 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
571 r ? "(not available)" : "(success)");
572 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100573 case KVM_CAP_S390_GS:
574 r = -EINVAL;
575 mutex_lock(&kvm->lock);
576 if (atomic_read(&kvm->online_vcpus)) {
577 r = -EBUSY;
578 } else if (test_facility(133)) {
579 set_kvm_facility(kvm->arch.model.fac_mask, 133);
580 set_kvm_facility(kvm->arch.model.fac_list, 133);
581 r = 0;
582 }
583 mutex_unlock(&kvm->lock);
584 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
585 r ? "(not available)" : "(success)");
586 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100587 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200588 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100589 kvm->arch.user_stsi = 1;
590 r = 0;
591 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200592 case KVM_CAP_S390_USER_INSTR0:
593 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
594 kvm->arch.user_instr0 = 1;
595 icpt_operexc_on_all_vcpus(kvm);
596 r = 0;
597 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200598 default:
599 r = -EINVAL;
600 break;
601 }
602 return r;
603}
604
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100605static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
606{
607 int ret;
608
609 switch (attr->attr) {
610 case KVM_S390_VM_MEM_LIMIT_SIZE:
611 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200612 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100613 kvm->arch.mem_limit);
614 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100615 ret = -EFAULT;
616 break;
617 default:
618 ret = -ENXIO;
619 break;
620 }
621 return ret;
622}
623
624static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200625{
626 int ret;
627 unsigned int idx;
628 switch (attr->attr) {
629 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100630 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100631 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200632 break;
633
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200634 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200635 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200636 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200637 if (!kvm->created_vcpus) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200638 kvm->arch.use_cmma = 1;
639 ret = 0;
640 }
641 mutex_unlock(&kvm->lock);
642 break;
643 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100644 ret = -ENXIO;
645 if (!sclp.has_cmma)
646 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200647 ret = -EINVAL;
648 if (!kvm->arch.use_cmma)
649 break;
650
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200651 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200652 mutex_lock(&kvm->lock);
653 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200654 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200655 srcu_read_unlock(&kvm->srcu, idx);
656 mutex_unlock(&kvm->lock);
657 ret = 0;
658 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100659 case KVM_S390_VM_MEM_LIMIT_SIZE: {
660 unsigned long new_limit;
661
662 if (kvm_is_ucontrol(kvm))
663 return -EINVAL;
664
665 if (get_user(new_limit, (u64 __user *)attr->addr))
666 return -EFAULT;
667
Dominik Dingela3a92c32014-12-01 17:24:42 +0100668 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
669 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100670 return -E2BIG;
671
Dominik Dingela3a92c32014-12-01 17:24:42 +0100672 if (!new_limit)
673 return -EINVAL;
674
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100675 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100676 if (new_limit != KVM_S390_NO_MEM_LIMIT)
677 new_limit -= 1;
678
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100679 ret = -EBUSY;
680 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200681 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100682 /* gmap_create will round the limit up */
683 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100684
685 if (!new) {
686 ret = -ENOMEM;
687 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100688 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100689 new->private = kvm;
690 kvm->arch.gmap = new;
691 ret = 0;
692 }
693 }
694 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100695 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
696 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
697 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100698 break;
699 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200700 default:
701 ret = -ENXIO;
702 break;
703 }
704 return ret;
705}
706
Tony Krowiaka374e892014-09-03 10:13:53 +0200707static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
708
709static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
710{
711 struct kvm_vcpu *vcpu;
712 int i;
713
Michael Mueller9d8d5782015-02-02 15:42:51 +0100714 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200715 return -EINVAL;
716
717 mutex_lock(&kvm->lock);
718 switch (attr->attr) {
719 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
720 get_random_bytes(
721 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
722 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
723 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200724 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200725 break;
726 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
727 get_random_bytes(
728 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
729 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
730 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200731 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200732 break;
733 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
734 kvm->arch.crypto.aes_kw = 0;
735 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
736 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200737 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200738 break;
739 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
740 kvm->arch.crypto.dea_kw = 0;
741 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
742 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200743 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200744 break;
745 default:
746 mutex_unlock(&kvm->lock);
747 return -ENXIO;
748 }
749
750 kvm_for_each_vcpu(i, vcpu, kvm) {
751 kvm_s390_vcpu_crypto_setup(vcpu);
752 exit_sie(vcpu);
753 }
754 mutex_unlock(&kvm->lock);
755 return 0;
756}
757
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200758static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
759{
760 int cx;
761 struct kvm_vcpu *vcpu;
762
763 kvm_for_each_vcpu(cx, vcpu, kvm)
764 kvm_s390_sync_request(req, vcpu);
765}
766
767/*
768 * Must be called with kvm->srcu held to avoid races on memslots, and with
769 * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
770 */
771static int kvm_s390_vm_start_migration(struct kvm *kvm)
772{
773 struct kvm_s390_migration_state *mgs;
774 struct kvm_memory_slot *ms;
775 /* should be the only one */
776 struct kvm_memslots *slots;
777 unsigned long ram_pages;
778 int slotnr;
779
780 /* migration mode already enabled */
781 if (kvm->arch.migration_state)
782 return 0;
783
784 slots = kvm_memslots(kvm);
785 if (!slots || !slots->used_slots)
786 return -EINVAL;
787
788 mgs = kzalloc(sizeof(*mgs), GFP_KERNEL);
789 if (!mgs)
790 return -ENOMEM;
791 kvm->arch.migration_state = mgs;
792
793 if (kvm->arch.use_cmma) {
794 /*
795 * Get the last slot. They should be sorted by base_gfn, so the
796 * last slot is also the one at the end of the address space.
797 * We have verified above that at least one slot is present.
798 */
799 ms = slots->memslots + slots->used_slots - 1;
800 /* round up so we only use full longs */
801 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
802 /* allocate enough bytes to store all the bits */
803 mgs->pgste_bitmap = vmalloc(ram_pages / 8);
804 if (!mgs->pgste_bitmap) {
805 kfree(mgs);
806 kvm->arch.migration_state = NULL;
807 return -ENOMEM;
808 }
809
810 mgs->bitmap_size = ram_pages;
811 atomic64_set(&mgs->dirty_pages, ram_pages);
812 /* mark all the pages in active slots as dirty */
813 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
814 ms = slots->memslots + slotnr;
815 bitmap_set(mgs->pgste_bitmap, ms->base_gfn, ms->npages);
816 }
817
818 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
819 }
820 return 0;
821}
822
823/*
824 * Must be called with kvm->lock to avoid races with ourselves and
825 * kvm_s390_vm_start_migration.
826 */
827static int kvm_s390_vm_stop_migration(struct kvm *kvm)
828{
829 struct kvm_s390_migration_state *mgs;
830
831 /* migration mode already disabled */
832 if (!kvm->arch.migration_state)
833 return 0;
834 mgs = kvm->arch.migration_state;
835 kvm->arch.migration_state = NULL;
836
837 if (kvm->arch.use_cmma) {
838 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
839 vfree(mgs->pgste_bitmap);
840 }
841 kfree(mgs);
842 return 0;
843}
844
845static int kvm_s390_vm_set_migration(struct kvm *kvm,
846 struct kvm_device_attr *attr)
847{
848 int idx, res = -ENXIO;
849
850 mutex_lock(&kvm->lock);
851 switch (attr->attr) {
852 case KVM_S390_VM_MIGRATION_START:
853 idx = srcu_read_lock(&kvm->srcu);
854 res = kvm_s390_vm_start_migration(kvm);
855 srcu_read_unlock(&kvm->srcu, idx);
856 break;
857 case KVM_S390_VM_MIGRATION_STOP:
858 res = kvm_s390_vm_stop_migration(kvm);
859 break;
860 default:
861 break;
862 }
863 mutex_unlock(&kvm->lock);
864
865 return res;
866}
867
868static int kvm_s390_vm_get_migration(struct kvm *kvm,
869 struct kvm_device_attr *attr)
870{
871 u64 mig = (kvm->arch.migration_state != NULL);
872
873 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
874 return -ENXIO;
875
876 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
877 return -EFAULT;
878 return 0;
879}
880
Collin L. Walling8fa16962016-07-26 15:29:44 -0400881static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
882{
883 struct kvm_s390_vm_tod_clock gtod;
884
885 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
886 return -EFAULT;
887
888 if (test_kvm_facility(kvm, 139))
889 kvm_s390_set_tod_clock_ext(kvm, &gtod);
890 else if (gtod.epoch_idx == 0)
891 kvm_s390_set_tod_clock(kvm, gtod.tod);
892 else
893 return -EINVAL;
894
895 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
896 gtod.epoch_idx, gtod.tod);
897
898 return 0;
899}
900
Jason J. Herne72f25022014-11-25 09:46:02 -0500901static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
902{
903 u8 gtod_high;
904
905 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
906 sizeof(gtod_high)))
907 return -EFAULT;
908
909 if (gtod_high != 0)
910 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200911 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500912
913 return 0;
914}
915
916static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
917{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200918 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500919
920 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
921 return -EFAULT;
922
David Hildenbrand25ed1672015-05-12 09:49:14 +0200923 kvm_s390_set_tod_clock(kvm, gtod);
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200924 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500925 return 0;
926}
927
928static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
929{
930 int ret;
931
932 if (attr->flags)
933 return -EINVAL;
934
935 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -0400936 case KVM_S390_VM_TOD_EXT:
937 ret = kvm_s390_set_tod_ext(kvm, attr);
938 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500939 case KVM_S390_VM_TOD_HIGH:
940 ret = kvm_s390_set_tod_high(kvm, attr);
941 break;
942 case KVM_S390_VM_TOD_LOW:
943 ret = kvm_s390_set_tod_low(kvm, attr);
944 break;
945 default:
946 ret = -ENXIO;
947 break;
948 }
949 return ret;
950}
951
Collin L. Walling8fa16962016-07-26 15:29:44 -0400952static void kvm_s390_get_tod_clock_ext(struct kvm *kvm,
953 struct kvm_s390_vm_tod_clock *gtod)
954{
955 struct kvm_s390_tod_clock_ext htod;
956
957 preempt_disable();
958
959 get_tod_clock_ext((char *)&htod);
960
961 gtod->tod = htod.tod + kvm->arch.epoch;
962 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
963
964 if (gtod->tod < htod.tod)
965 gtod->epoch_idx += 1;
966
967 preempt_enable();
968}
969
970static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
971{
972 struct kvm_s390_vm_tod_clock gtod;
973
974 memset(&gtod, 0, sizeof(gtod));
975
976 if (test_kvm_facility(kvm, 139))
977 kvm_s390_get_tod_clock_ext(kvm, &gtod);
978 else
979 gtod.tod = kvm_s390_get_tod_clock_fast(kvm);
980
981 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
982 return -EFAULT;
983
984 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
985 gtod.epoch_idx, gtod.tod);
986 return 0;
987}
988
Jason J. Herne72f25022014-11-25 09:46:02 -0500989static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
990{
991 u8 gtod_high = 0;
992
993 if (copy_to_user((void __user *)attr->addr, &gtod_high,
994 sizeof(gtod_high)))
995 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200996 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500997
998 return 0;
999}
1000
1001static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1002{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001003 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001004
David Hildenbrand60417fc2015-09-29 16:20:36 +02001005 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001006 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1007 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001008 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001009
1010 return 0;
1011}
1012
1013static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1014{
1015 int ret;
1016
1017 if (attr->flags)
1018 return -EINVAL;
1019
1020 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001021 case KVM_S390_VM_TOD_EXT:
1022 ret = kvm_s390_get_tod_ext(kvm, attr);
1023 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001024 case KVM_S390_VM_TOD_HIGH:
1025 ret = kvm_s390_get_tod_high(kvm, attr);
1026 break;
1027 case KVM_S390_VM_TOD_LOW:
1028 ret = kvm_s390_get_tod_low(kvm, attr);
1029 break;
1030 default:
1031 ret = -ENXIO;
1032 break;
1033 }
1034 return ret;
1035}
1036
Michael Mueller658b6ed2015-02-02 15:49:35 +01001037static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1038{
1039 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001040 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001041 int ret = 0;
1042
1043 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001044 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001045 ret = -EBUSY;
1046 goto out;
1047 }
1048 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1049 if (!proc) {
1050 ret = -ENOMEM;
1051 goto out;
1052 }
1053 if (!copy_from_user(proc, (void __user *)attr->addr,
1054 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001055 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001056 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1057 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001058 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001059 if (proc->ibc > unblocked_ibc)
1060 kvm->arch.model.ibc = unblocked_ibc;
1061 else if (proc->ibc < lowest_ibc)
1062 kvm->arch.model.ibc = lowest_ibc;
1063 else
1064 kvm->arch.model.ibc = proc->ibc;
1065 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001066 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001067 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001068 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1069 kvm->arch.model.ibc,
1070 kvm->arch.model.cpuid);
1071 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1072 kvm->arch.model.fac_list[0],
1073 kvm->arch.model.fac_list[1],
1074 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001075 } else
1076 ret = -EFAULT;
1077 kfree(proc);
1078out:
1079 mutex_unlock(&kvm->lock);
1080 return ret;
1081}
1082
David Hildenbrand15c97052015-03-19 17:36:43 +01001083static int kvm_s390_set_processor_feat(struct kvm *kvm,
1084 struct kvm_device_attr *attr)
1085{
1086 struct kvm_s390_vm_cpu_feat data;
1087 int ret = -EBUSY;
1088
1089 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1090 return -EFAULT;
1091 if (!bitmap_subset((unsigned long *) data.feat,
1092 kvm_s390_available_cpu_feat,
1093 KVM_S390_VM_CPU_FEAT_NR_BITS))
1094 return -EINVAL;
1095
1096 mutex_lock(&kvm->lock);
1097 if (!atomic_read(&kvm->online_vcpus)) {
1098 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1099 KVM_S390_VM_CPU_FEAT_NR_BITS);
1100 ret = 0;
1101 }
1102 mutex_unlock(&kvm->lock);
1103 return ret;
1104}
1105
David Hildenbrand0a763c72016-05-18 16:03:47 +02001106static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1107 struct kvm_device_attr *attr)
1108{
1109 /*
1110 * Once supported by kernel + hw, we have to store the subfunctions
1111 * in kvm->arch and remember that user space configured them.
1112 */
1113 return -ENXIO;
1114}
1115
Michael Mueller658b6ed2015-02-02 15:49:35 +01001116static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1117{
1118 int ret = -ENXIO;
1119
1120 switch (attr->attr) {
1121 case KVM_S390_VM_CPU_PROCESSOR:
1122 ret = kvm_s390_set_processor(kvm, attr);
1123 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001124 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1125 ret = kvm_s390_set_processor_feat(kvm, attr);
1126 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001127 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1128 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1129 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001130 }
1131 return ret;
1132}
1133
1134static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1135{
1136 struct kvm_s390_vm_cpu_processor *proc;
1137 int ret = 0;
1138
1139 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1140 if (!proc) {
1141 ret = -ENOMEM;
1142 goto out;
1143 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001144 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001145 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001146 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1147 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001148 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1149 kvm->arch.model.ibc,
1150 kvm->arch.model.cpuid);
1151 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1152 kvm->arch.model.fac_list[0],
1153 kvm->arch.model.fac_list[1],
1154 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001155 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1156 ret = -EFAULT;
1157 kfree(proc);
1158out:
1159 return ret;
1160}
1161
1162static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1163{
1164 struct kvm_s390_vm_cpu_machine *mach;
1165 int ret = 0;
1166
1167 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1168 if (!mach) {
1169 ret = -ENOMEM;
1170 goto out;
1171 }
1172 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001173 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001174 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001175 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001176 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001177 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001178 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1179 kvm->arch.model.ibc,
1180 kvm->arch.model.cpuid);
1181 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1182 mach->fac_mask[0],
1183 mach->fac_mask[1],
1184 mach->fac_mask[2]);
1185 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1186 mach->fac_list[0],
1187 mach->fac_list[1],
1188 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001189 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1190 ret = -EFAULT;
1191 kfree(mach);
1192out:
1193 return ret;
1194}
1195
David Hildenbrand15c97052015-03-19 17:36:43 +01001196static int kvm_s390_get_processor_feat(struct kvm *kvm,
1197 struct kvm_device_attr *attr)
1198{
1199 struct kvm_s390_vm_cpu_feat data;
1200
1201 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1202 KVM_S390_VM_CPU_FEAT_NR_BITS);
1203 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1204 return -EFAULT;
1205 return 0;
1206}
1207
1208static int kvm_s390_get_machine_feat(struct kvm *kvm,
1209 struct kvm_device_attr *attr)
1210{
1211 struct kvm_s390_vm_cpu_feat data;
1212
1213 bitmap_copy((unsigned long *) data.feat,
1214 kvm_s390_available_cpu_feat,
1215 KVM_S390_VM_CPU_FEAT_NR_BITS);
1216 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1217 return -EFAULT;
1218 return 0;
1219}
1220
David Hildenbrand0a763c72016-05-18 16:03:47 +02001221static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1222 struct kvm_device_attr *attr)
1223{
1224 /*
1225 * Once we can actually configure subfunctions (kernel + hw support),
1226 * we have to check if they were already set by user space, if so copy
1227 * them from kvm->arch.
1228 */
1229 return -ENXIO;
1230}
1231
1232static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1233 struct kvm_device_attr *attr)
1234{
1235 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1236 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1237 return -EFAULT;
1238 return 0;
1239}
Michael Mueller658b6ed2015-02-02 15:49:35 +01001240static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1241{
1242 int ret = -ENXIO;
1243
1244 switch (attr->attr) {
1245 case KVM_S390_VM_CPU_PROCESSOR:
1246 ret = kvm_s390_get_processor(kvm, attr);
1247 break;
1248 case KVM_S390_VM_CPU_MACHINE:
1249 ret = kvm_s390_get_machine(kvm, attr);
1250 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001251 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1252 ret = kvm_s390_get_processor_feat(kvm, attr);
1253 break;
1254 case KVM_S390_VM_CPU_MACHINE_FEAT:
1255 ret = kvm_s390_get_machine_feat(kvm, attr);
1256 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001257 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1258 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1259 break;
1260 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1261 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1262 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001263 }
1264 return ret;
1265}
1266
Dominik Dingelf2061652014-04-09 13:13:00 +02001267static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1268{
1269 int ret;
1270
1271 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001272 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001273 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001274 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001275 case KVM_S390_VM_TOD:
1276 ret = kvm_s390_set_tod(kvm, attr);
1277 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001278 case KVM_S390_VM_CPU_MODEL:
1279 ret = kvm_s390_set_cpu_model(kvm, attr);
1280 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001281 case KVM_S390_VM_CRYPTO:
1282 ret = kvm_s390_vm_set_crypto(kvm, attr);
1283 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001284 case KVM_S390_VM_MIGRATION:
1285 ret = kvm_s390_vm_set_migration(kvm, attr);
1286 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001287 default:
1288 ret = -ENXIO;
1289 break;
1290 }
1291
1292 return ret;
1293}
1294
1295static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1296{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001297 int ret;
1298
1299 switch (attr->group) {
1300 case KVM_S390_VM_MEM_CTRL:
1301 ret = kvm_s390_get_mem_control(kvm, attr);
1302 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001303 case KVM_S390_VM_TOD:
1304 ret = kvm_s390_get_tod(kvm, attr);
1305 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001306 case KVM_S390_VM_CPU_MODEL:
1307 ret = kvm_s390_get_cpu_model(kvm, attr);
1308 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001309 case KVM_S390_VM_MIGRATION:
1310 ret = kvm_s390_vm_get_migration(kvm, attr);
1311 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001312 default:
1313 ret = -ENXIO;
1314 break;
1315 }
1316
1317 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001318}
1319
1320static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1321{
1322 int ret;
1323
1324 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001325 case KVM_S390_VM_MEM_CTRL:
1326 switch (attr->attr) {
1327 case KVM_S390_VM_MEM_ENABLE_CMMA:
1328 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001329 ret = sclp.has_cmma ? 0 : -ENXIO;
1330 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001331 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001332 ret = 0;
1333 break;
1334 default:
1335 ret = -ENXIO;
1336 break;
1337 }
1338 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001339 case KVM_S390_VM_TOD:
1340 switch (attr->attr) {
1341 case KVM_S390_VM_TOD_LOW:
1342 case KVM_S390_VM_TOD_HIGH:
1343 ret = 0;
1344 break;
1345 default:
1346 ret = -ENXIO;
1347 break;
1348 }
1349 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001350 case KVM_S390_VM_CPU_MODEL:
1351 switch (attr->attr) {
1352 case KVM_S390_VM_CPU_PROCESSOR:
1353 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001354 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1355 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001356 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001357 ret = 0;
1358 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001359 /* configuring subfunctions is not supported yet */
1360 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001361 default:
1362 ret = -ENXIO;
1363 break;
1364 }
1365 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001366 case KVM_S390_VM_CRYPTO:
1367 switch (attr->attr) {
1368 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1369 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1370 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1371 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1372 ret = 0;
1373 break;
1374 default:
1375 ret = -ENXIO;
1376 break;
1377 }
1378 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001379 case KVM_S390_VM_MIGRATION:
1380 ret = 0;
1381 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001382 default:
1383 ret = -ENXIO;
1384 break;
1385 }
1386
1387 return ret;
1388}
1389
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001390static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1391{
1392 uint8_t *keys;
1393 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001394 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001395
1396 if (args->flags != 0)
1397 return -EINVAL;
1398
1399 /* Is this guest using storage keys? */
1400 if (!mm_use_skey(current->mm))
1401 return KVM_S390_GET_SKEYS_NONE;
1402
1403 /* Enforce sane limit on memory allocation */
1404 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1405 return -EINVAL;
1406
Michal Hocko752ade62017-05-08 15:57:27 -07001407 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001408 if (!keys)
1409 return -ENOMEM;
1410
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001411 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001412 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001413 for (i = 0; i < args->count; i++) {
1414 hva = gfn_to_hva(kvm, args->start_gfn + i);
1415 if (kvm_is_error_hva(hva)) {
1416 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001417 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001418 }
1419
David Hildenbrand154c8c12016-05-09 11:22:34 +02001420 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1421 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001422 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001423 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001424 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001425 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001426
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001427 if (!r) {
1428 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1429 sizeof(uint8_t) * args->count);
1430 if (r)
1431 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001432 }
1433
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001434 kvfree(keys);
1435 return r;
1436}
1437
1438static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1439{
1440 uint8_t *keys;
1441 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001442 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001443
1444 if (args->flags != 0)
1445 return -EINVAL;
1446
1447 /* Enforce sane limit on memory allocation */
1448 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1449 return -EINVAL;
1450
Michal Hocko752ade62017-05-08 15:57:27 -07001451 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001452 if (!keys)
1453 return -ENOMEM;
1454
1455 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1456 sizeof(uint8_t) * args->count);
1457 if (r) {
1458 r = -EFAULT;
1459 goto out;
1460 }
1461
1462 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001463 r = s390_enable_skey();
1464 if (r)
1465 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001466
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001467 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001468 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001469 for (i = 0; i < args->count; i++) {
1470 hva = gfn_to_hva(kvm, args->start_gfn + i);
1471 if (kvm_is_error_hva(hva)) {
1472 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001473 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001474 }
1475
1476 /* Lowest order bit is reserved */
1477 if (keys[i] & 0x01) {
1478 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001479 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001480 }
1481
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001482 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001483 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001484 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001485 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001486 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001487 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001488out:
1489 kvfree(keys);
1490 return r;
1491}
1492
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001493/*
1494 * Base address and length must be sent at the start of each block, therefore
1495 * it's cheaper to send some clean data, as long as it's less than the size of
1496 * two longs.
1497 */
1498#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1499/* for consistency */
1500#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1501
1502/*
1503 * This function searches for the next page with dirty CMMA attributes, and
1504 * saves the attributes in the buffer up to either the end of the buffer or
1505 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
1506 * no trailing clean bytes are saved.
1507 * In case no dirty bits were found, or if CMMA was not enabled or used, the
1508 * output buffer will indicate 0 as length.
1509 */
1510static int kvm_s390_get_cmma_bits(struct kvm *kvm,
1511 struct kvm_s390_cmma_log *args)
1512{
1513 struct kvm_s390_migration_state *s = kvm->arch.migration_state;
1514 unsigned long bufsize, hva, pgstev, i, next, cur;
1515 int srcu_idx, peek, r = 0, rr;
1516 u8 *res;
1517
1518 cur = args->start_gfn;
1519 i = next = pgstev = 0;
1520
1521 if (unlikely(!kvm->arch.use_cmma))
1522 return -ENXIO;
1523 /* Invalid/unsupported flags were specified */
1524 if (args->flags & ~KVM_S390_CMMA_PEEK)
1525 return -EINVAL;
1526 /* Migration mode query, and we are not doing a migration */
1527 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
1528 if (!peek && !s)
1529 return -EINVAL;
1530 /* CMMA is disabled or was not used, or the buffer has length zero */
1531 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
1532 if (!bufsize || !kvm->mm->context.use_cmma) {
1533 memset(args, 0, sizeof(*args));
1534 return 0;
1535 }
1536
1537 if (!peek) {
1538 /* We are not peeking, and there are no dirty pages */
1539 if (!atomic64_read(&s->dirty_pages)) {
1540 memset(args, 0, sizeof(*args));
1541 return 0;
1542 }
1543 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size,
1544 args->start_gfn);
1545 if (cur >= s->bitmap_size) /* nothing found, loop back */
1546 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size, 0);
1547 if (cur >= s->bitmap_size) { /* again! (very unlikely) */
1548 memset(args, 0, sizeof(*args));
1549 return 0;
1550 }
1551 next = find_next_bit(s->pgste_bitmap, s->bitmap_size, cur + 1);
1552 }
1553
1554 res = vmalloc(bufsize);
1555 if (!res)
1556 return -ENOMEM;
1557
1558 args->start_gfn = cur;
1559
1560 down_read(&kvm->mm->mmap_sem);
1561 srcu_idx = srcu_read_lock(&kvm->srcu);
1562 while (i < bufsize) {
1563 hva = gfn_to_hva(kvm, cur);
1564 if (kvm_is_error_hva(hva)) {
1565 r = -EFAULT;
1566 break;
1567 }
1568 /* decrement only if we actually flipped the bit to 0 */
1569 if (!peek && test_and_clear_bit(cur, s->pgste_bitmap))
1570 atomic64_dec(&s->dirty_pages);
1571 r = get_pgste(kvm->mm, hva, &pgstev);
1572 if (r < 0)
1573 pgstev = 0;
1574 /* save the value */
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02001575 res[i++] = (pgstev >> 24) & 0x43;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001576 /*
1577 * if the next bit is too far away, stop.
1578 * if we reached the previous "next", find the next one
1579 */
1580 if (!peek) {
1581 if (next > cur + KVM_S390_MAX_BIT_DISTANCE)
1582 break;
1583 if (cur == next)
1584 next = find_next_bit(s->pgste_bitmap,
1585 s->bitmap_size, cur + 1);
1586 /* reached the end of the bitmap or of the buffer, stop */
1587 if ((next >= s->bitmap_size) ||
1588 (next >= args->start_gfn + bufsize))
1589 break;
1590 }
1591 cur++;
1592 }
1593 srcu_read_unlock(&kvm->srcu, srcu_idx);
1594 up_read(&kvm->mm->mmap_sem);
1595 args->count = i;
1596 args->remaining = s ? atomic64_read(&s->dirty_pages) : 0;
1597
1598 rr = copy_to_user((void __user *)args->values, res, args->count);
1599 if (rr)
1600 r = -EFAULT;
1601
1602 vfree(res);
1603 return r;
1604}
1605
1606/*
1607 * This function sets the CMMA attributes for the given pages. If the input
1608 * buffer has zero length, no action is taken, otherwise the attributes are
1609 * set and the mm->context.use_cmma flag is set.
1610 */
1611static int kvm_s390_set_cmma_bits(struct kvm *kvm,
1612 const struct kvm_s390_cmma_log *args)
1613{
1614 unsigned long hva, mask, pgstev, i;
1615 uint8_t *bits;
1616 int srcu_idx, r = 0;
1617
1618 mask = args->mask;
1619
1620 if (!kvm->arch.use_cmma)
1621 return -ENXIO;
1622 /* invalid/unsupported flags */
1623 if (args->flags != 0)
1624 return -EINVAL;
1625 /* Enforce sane limit on memory allocation */
1626 if (args->count > KVM_S390_CMMA_SIZE_MAX)
1627 return -EINVAL;
1628 /* Nothing to do */
1629 if (args->count == 0)
1630 return 0;
1631
1632 bits = vmalloc(sizeof(*bits) * args->count);
1633 if (!bits)
1634 return -ENOMEM;
1635
1636 r = copy_from_user(bits, (void __user *)args->values, args->count);
1637 if (r) {
1638 r = -EFAULT;
1639 goto out;
1640 }
1641
1642 down_read(&kvm->mm->mmap_sem);
1643 srcu_idx = srcu_read_lock(&kvm->srcu);
1644 for (i = 0; i < args->count; i++) {
1645 hva = gfn_to_hva(kvm, args->start_gfn + i);
1646 if (kvm_is_error_hva(hva)) {
1647 r = -EFAULT;
1648 break;
1649 }
1650
1651 pgstev = bits[i];
1652 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02001653 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001654 set_pgste_bits(kvm->mm, hva, mask, pgstev);
1655 }
1656 srcu_read_unlock(&kvm->srcu, srcu_idx);
1657 up_read(&kvm->mm->mmap_sem);
1658
1659 if (!kvm->mm->context.use_cmma) {
1660 down_write(&kvm->mm->mmap_sem);
1661 kvm->mm->context.use_cmma = 1;
1662 up_write(&kvm->mm->mmap_sem);
1663 }
1664out:
1665 vfree(bits);
1666 return r;
1667}
1668
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001669long kvm_arch_vm_ioctl(struct file *filp,
1670 unsigned int ioctl, unsigned long arg)
1671{
1672 struct kvm *kvm = filp->private_data;
1673 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001674 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001675 int r;
1676
1677 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001678 case KVM_S390_INTERRUPT: {
1679 struct kvm_s390_interrupt s390int;
1680
1681 r = -EFAULT;
1682 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1683 break;
1684 r = kvm_s390_inject_vm(kvm, &s390int);
1685 break;
1686 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001687 case KVM_ENABLE_CAP: {
1688 struct kvm_enable_cap cap;
1689 r = -EFAULT;
1690 if (copy_from_user(&cap, argp, sizeof(cap)))
1691 break;
1692 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1693 break;
1694 }
Cornelia Huck84223592013-07-15 13:36:01 +02001695 case KVM_CREATE_IRQCHIP: {
1696 struct kvm_irq_routing_entry routing;
1697
1698 r = -EINVAL;
1699 if (kvm->arch.use_irqchip) {
1700 /* Set up dummy routing. */
1701 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001702 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001703 }
1704 break;
1705 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001706 case KVM_SET_DEVICE_ATTR: {
1707 r = -EFAULT;
1708 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1709 break;
1710 r = kvm_s390_vm_set_attr(kvm, &attr);
1711 break;
1712 }
1713 case KVM_GET_DEVICE_ATTR: {
1714 r = -EFAULT;
1715 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1716 break;
1717 r = kvm_s390_vm_get_attr(kvm, &attr);
1718 break;
1719 }
1720 case KVM_HAS_DEVICE_ATTR: {
1721 r = -EFAULT;
1722 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1723 break;
1724 r = kvm_s390_vm_has_attr(kvm, &attr);
1725 break;
1726 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001727 case KVM_S390_GET_SKEYS: {
1728 struct kvm_s390_skeys args;
1729
1730 r = -EFAULT;
1731 if (copy_from_user(&args, argp,
1732 sizeof(struct kvm_s390_skeys)))
1733 break;
1734 r = kvm_s390_get_skeys(kvm, &args);
1735 break;
1736 }
1737 case KVM_S390_SET_SKEYS: {
1738 struct kvm_s390_skeys args;
1739
1740 r = -EFAULT;
1741 if (copy_from_user(&args, argp,
1742 sizeof(struct kvm_s390_skeys)))
1743 break;
1744 r = kvm_s390_set_skeys(kvm, &args);
1745 break;
1746 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001747 case KVM_S390_GET_CMMA_BITS: {
1748 struct kvm_s390_cmma_log args;
1749
1750 r = -EFAULT;
1751 if (copy_from_user(&args, argp, sizeof(args)))
1752 break;
1753 r = kvm_s390_get_cmma_bits(kvm, &args);
1754 if (!r) {
1755 r = copy_to_user(argp, &args, sizeof(args));
1756 if (r)
1757 r = -EFAULT;
1758 }
1759 break;
1760 }
1761 case KVM_S390_SET_CMMA_BITS: {
1762 struct kvm_s390_cmma_log args;
1763
1764 r = -EFAULT;
1765 if (copy_from_user(&args, argp, sizeof(args)))
1766 break;
1767 r = kvm_s390_set_cmma_bits(kvm, &args);
1768 break;
1769 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001770 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001771 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001772 }
1773
1774 return r;
1775}
1776
Tony Krowiak45c9b472015-01-13 11:33:26 -05001777static int kvm_s390_query_ap_config(u8 *config)
1778{
1779 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001780 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001781
Christian Borntraeger86044c82015-02-26 13:53:47 +01001782 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001783 asm volatile(
1784 "lgr 0,%1\n"
1785 "lgr 2,%2\n"
1786 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001787 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001788 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001789 "1:\n"
1790 EX_TABLE(0b, 1b)
1791 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001792 : "r" (fcn_code), "r" (config)
1793 : "cc", "0", "2", "memory"
1794 );
1795
1796 return cc;
1797}
1798
1799static int kvm_s390_apxa_installed(void)
1800{
1801 u8 config[128];
1802 int cc;
1803
Heiko Carstensa6aacc32015-11-24 14:28:12 +01001804 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05001805 cc = kvm_s390_query_ap_config(config);
1806
1807 if (cc)
1808 pr_err("PQAP(QCI) failed with cc=%d", cc);
1809 else
1810 return config[0] & 0x40;
1811 }
1812
1813 return 0;
1814}
1815
1816static void kvm_s390_set_crycb_format(struct kvm *kvm)
1817{
1818 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1819
1820 if (kvm_s390_apxa_installed())
1821 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1822 else
1823 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1824}
1825
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001826static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01001827{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001828 struct cpuid cpuid;
1829
1830 get_cpu_id(&cpuid);
1831 cpuid.version = 0xff;
1832 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001833}
1834
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001835static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04001836{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001837 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001838 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001839
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001840 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001841 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001842
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001843 /* Enable AES/DEA protected key functions by default */
1844 kvm->arch.crypto.aes_kw = 1;
1845 kvm->arch.crypto.dea_kw = 1;
1846 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1847 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1848 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1849 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04001850}
1851
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001852static void sca_dispose(struct kvm *kvm)
1853{
1854 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001855 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001856 else
1857 free_page((unsigned long)(kvm->arch.sca));
1858 kvm->arch.sca = NULL;
1859}
1860
Carsten Ottee08b9632012-01-04 10:25:20 +01001861int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001862{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001863 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001864 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001865 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001866 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001867
Carsten Ottee08b9632012-01-04 10:25:20 +01001868 rc = -EINVAL;
1869#ifdef CONFIG_KVM_S390_UCONTROL
1870 if (type & ~KVM_VM_S390_UCONTROL)
1871 goto out_err;
1872 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1873 goto out_err;
1874#else
1875 if (type)
1876 goto out_err;
1877#endif
1878
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001879 rc = s390_enable_sie();
1880 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001881 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001882
Carsten Otteb2904112011-10-18 12:27:13 +02001883 rc = -ENOMEM;
1884
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001885 kvm->arch.use_esca = 0; /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001886 if (!sclp.has_64bscao)
1887 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001888 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001889 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001890 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001891 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001892 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001893 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001894 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001895 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001896 kvm->arch.sca = (struct bsca_block *)
1897 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001898 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001899
1900 sprintf(debug_name, "kvm-%u", current->pid);
1901
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001902 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001903 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001904 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001905
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001906 kvm->arch.sie_page2 =
1907 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1908 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001909 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001910
Michael Muellerfb5bf932015-02-27 14:25:10 +01001911 /* Populate the facility mask initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001912 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001913 sizeof(S390_lowcore.stfle_fac_list));
Michael Mueller9d8d5782015-02-02 15:42:51 +01001914 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1915 if (i < kvm_s390_fac_list_mask_size())
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001916 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001917 else
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001918 kvm->arch.model.fac_mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001919 }
1920
Michael Mueller981467c2015-02-24 13:51:04 +01001921 /* Populate the facility list initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001922 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1923 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001924 S390_ARCH_FAC_LIST_SIZE_BYTE);
1925
David Hildenbrand19352222017-08-29 16:31:08 +02001926 /* we are always in czam mode - even on pre z14 machines */
1927 set_kvm_facility(kvm->arch.model.fac_mask, 138);
1928 set_kvm_facility(kvm->arch.model.fac_list, 138);
1929 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001930 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1931 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02001932 if (MACHINE_HAS_TLB_GUEST) {
1933 set_kvm_facility(kvm->arch.model.fac_mask, 147);
1934 set_kvm_facility(kvm->arch.model.fac_list, 147);
1935 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001936
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001937 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001938 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001939
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001940 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001941
Fei Li51978392017-02-17 17:06:26 +08001942 mutex_init(&kvm->arch.float_int.ais_lock);
1943 kvm->arch.float_int.simm = 0;
1944 kvm->arch.float_int.nimm = 0;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001945 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001946 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1947 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001948 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001949 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001950
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001951 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001952 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001953
Carsten Ottee08b9632012-01-04 10:25:20 +01001954 if (type & KVM_VM_S390_UCONTROL) {
1955 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01001956 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01001957 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001958 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001959 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001960 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001961 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001962 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001963 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001964 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001965 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001966 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001967 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001968 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001969
1970 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001971 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001972 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001973
David Hildenbrand8ad35752014-03-14 11:00:21 +01001974 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001975 kvm_s390_vsie_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001976 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001977
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001978 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001979out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001980 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01001981 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001982 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001983 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001984 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001985}
1986
Luiz Capitulino235539b2016-09-07 14:47:23 -04001987bool kvm_arch_has_vcpu_debugfs(void)
1988{
1989 return false;
1990}
1991
1992int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
1993{
1994 return 0;
1995}
1996
Christian Borntraegerd329c032008-11-26 14:50:27 +01001997void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1998{
1999 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002000 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002001 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002002 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002003 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002004 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002005
2006 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002007 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002008
Dominik Dingele6db1d62015-05-07 15:41:57 +02002009 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002010 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002011 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002012
Christian Borntraeger6692cef2008-11-26 14:51:08 +01002013 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02002014 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002015}
2016
2017static void kvm_free_vcpus(struct kvm *kvm)
2018{
2019 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002020 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002021
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002022 kvm_for_each_vcpu(i, vcpu, kvm)
2023 kvm_arch_vcpu_destroy(vcpu);
2024
2025 mutex_lock(&kvm->lock);
2026 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2027 kvm->vcpus[i] = NULL;
2028
2029 atomic_set(&kvm->online_vcpus, 0);
2030 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002031}
2032
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002033void kvm_arch_destroy_vm(struct kvm *kvm)
2034{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002035 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002036 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002037 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002038 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002039 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002040 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002041 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002042 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002043 kvm_s390_vsie_destroy(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02002044 if (kvm->arch.migration_state) {
2045 vfree(kvm->arch.migration_state->pgste_bitmap);
2046 kfree(kvm->arch.migration_state);
2047 }
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002048 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002049}
2050
2051/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002052static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2053{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002054 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002055 if (!vcpu->arch.gmap)
2056 return -ENOMEM;
2057 vcpu->arch.gmap->private = vcpu->kvm;
2058
2059 return 0;
2060}
2061
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002062static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2063{
David Hildenbranda6940672016-08-08 22:39:32 +02002064 if (!kvm_s390_use_sca_entries())
2065 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002066 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002067 if (vcpu->kvm->arch.use_esca) {
2068 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002069
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002070 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002071 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002072 } else {
2073 struct bsca_block *sca = vcpu->kvm->arch.sca;
2074
2075 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002076 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002077 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002078 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002079}
2080
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002081static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002082{
David Hildenbranda6940672016-08-08 22:39:32 +02002083 if (!kvm_s390_use_sca_entries()) {
2084 struct bsca_block *sca = vcpu->kvm->arch.sca;
2085
2086 /* we still need the basic sca for the ipte control */
2087 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2088 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2089 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002090 read_lock(&vcpu->kvm->arch.sca_lock);
2091 if (vcpu->kvm->arch.use_esca) {
2092 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002093
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002094 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002095 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2096 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002097 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002098 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002099 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002100 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002101
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002102 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002103 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2104 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002105 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002106 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002107 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002108}
2109
2110/* Basic SCA to Extended SCA data copy routines */
2111static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2112{
2113 d->sda = s->sda;
2114 d->sigp_ctrl.c = s->sigp_ctrl.c;
2115 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2116}
2117
2118static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2119{
2120 int i;
2121
2122 d->ipte_control = s->ipte_control;
2123 d->mcn[0] = s->mcn;
2124 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2125 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2126}
2127
2128static int sca_switch_to_extended(struct kvm *kvm)
2129{
2130 struct bsca_block *old_sca = kvm->arch.sca;
2131 struct esca_block *new_sca;
2132 struct kvm_vcpu *vcpu;
2133 unsigned int vcpu_idx;
2134 u32 scaol, scaoh;
2135
2136 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2137 if (!new_sca)
2138 return -ENOMEM;
2139
2140 scaoh = (u32)((u64)(new_sca) >> 32);
2141 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2142
2143 kvm_s390_vcpu_block_all(kvm);
2144 write_lock(&kvm->arch.sca_lock);
2145
2146 sca_copy_b_to_e(new_sca, old_sca);
2147
2148 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2149 vcpu->arch.sie_block->scaoh = scaoh;
2150 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002151 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002152 }
2153 kvm->arch.sca = new_sca;
2154 kvm->arch.use_esca = 1;
2155
2156 write_unlock(&kvm->arch.sca_lock);
2157 kvm_s390_vcpu_unblock_all(kvm);
2158
2159 free_page((unsigned long)old_sca);
2160
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002161 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2162 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002163 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002164}
2165
2166static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2167{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002168 int rc;
2169
David Hildenbranda6940672016-08-08 22:39:32 +02002170 if (!kvm_s390_use_sca_entries()) {
2171 if (id < KVM_MAX_VCPUS)
2172 return true;
2173 return false;
2174 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002175 if (id < KVM_S390_BSCA_CPU_SLOTS)
2176 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002177 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002178 return false;
2179
2180 mutex_lock(&kvm->lock);
2181 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2182 mutex_unlock(&kvm->lock);
2183
2184 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002185}
2186
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002187int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2188{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002189 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2190 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002191 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2192 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002193 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002194 KVM_SYNC_CRS |
2195 KVM_SYNC_ARCH0 |
2196 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002197 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002198 if (test_kvm_facility(vcpu->kvm, 64))
2199 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002200 if (test_kvm_facility(vcpu->kvm, 133))
2201 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002202 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2203 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2204 */
2205 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002206 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002207 else
2208 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002209
2210 if (kvm_is_ucontrol(vcpu->kvm))
2211 return __kvm_ucontrol_vcpu_init(vcpu);
2212
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002213 return 0;
2214}
2215
David Hildenbranddb0758b2016-02-15 09:42:25 +01002216/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2217static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2218{
2219 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002220 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002221 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002222 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002223}
2224
2225/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2226static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2227{
2228 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002229 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002230 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2231 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002232 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002233}
2234
2235/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2236static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2237{
2238 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2239 vcpu->arch.cputm_enabled = true;
2240 __start_cpu_timer_accounting(vcpu);
2241}
2242
2243/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2244static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2245{
2246 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2247 __stop_cpu_timer_accounting(vcpu);
2248 vcpu->arch.cputm_enabled = false;
2249}
2250
2251static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2252{
2253 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2254 __enable_cpu_timer_accounting(vcpu);
2255 preempt_enable();
2256}
2257
2258static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2259{
2260 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2261 __disable_cpu_timer_accounting(vcpu);
2262 preempt_enable();
2263}
2264
David Hildenbrand4287f242016-02-15 09:40:12 +01002265/* set the cpu timer - may only be called from the VCPU thread itself */
2266void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2267{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002268 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002269 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002270 if (vcpu->arch.cputm_enabled)
2271 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002272 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002273 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002274 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002275}
2276
David Hildenbranddb0758b2016-02-15 09:42:25 +01002277/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002278__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2279{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002280 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002281 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002282
2283 if (unlikely(!vcpu->arch.cputm_enabled))
2284 return vcpu->arch.sie_block->cputm;
2285
David Hildenbrand9c23a132016-02-17 21:53:33 +01002286 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2287 do {
2288 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2289 /*
2290 * If the writer would ever execute a read in the critical
2291 * section, e.g. in irq context, we have a deadlock.
2292 */
2293 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2294 value = vcpu->arch.sie_block->cputm;
2295 /* if cputm_start is 0, accounting is being started/stopped */
2296 if (likely(vcpu->arch.cputm_start))
2297 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2298 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2299 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002300 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002301}
2302
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002303void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2304{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002305
David Hildenbrand37d9df92015-03-11 16:47:33 +01002306 gmap_enable(vcpu->arch.enabled_gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002307 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002308 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002309 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002310 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002311}
2312
2313void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2314{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002315 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002316 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002317 __stop_cpu_timer_accounting(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002318 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002319 vcpu->arch.enabled_gmap = gmap_get_enabled();
2320 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002321
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002322}
2323
2324static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2325{
2326 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2327 vcpu->arch.sie_block->gpsw.mask = 0UL;
2328 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002329 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002330 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002331 vcpu->arch.sie_block->ckc = 0UL;
2332 vcpu->arch.sie_block->todpr = 0;
2333 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
2334 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
2335 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002336 /* make sure the new fpc will be lazily loaded */
2337 save_fpu_regs();
2338 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002339 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002340 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002341 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2342 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002343 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2344 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002345 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002346}
2347
Dominik Dingel31928aa2014-12-04 15:47:07 +01002348void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002349{
Jason J. Herne72f25022014-11-25 09:46:02 -05002350 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002351 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002352 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02002353 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002354 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002355 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002356 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002357 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002358 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002359 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2360 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002361 /* make vcpu_load load the right gmap on the first trigger */
2362 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002363}
2364
Tony Krowiak5102ee82014-06-27 14:46:01 -04002365static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2366{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002367 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002368 return;
2369
Tony Krowiaka374e892014-09-03 10:13:53 +02002370 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
2371
2372 if (vcpu->kvm->arch.crypto.aes_kw)
2373 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2374 if (vcpu->kvm->arch.crypto.dea_kw)
2375 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
2376
Tony Krowiak5102ee82014-06-27 14:46:01 -04002377 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
2378}
2379
Dominik Dingelb31605c2014-03-25 13:47:11 +01002380void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2381{
2382 free_page(vcpu->arch.sie_block->cbrlo);
2383 vcpu->arch.sie_block->cbrlo = 0;
2384}
2385
2386int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2387{
2388 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2389 if (!vcpu->arch.sie_block->cbrlo)
2390 return -ENOMEM;
2391
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002392 vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002393 return 0;
2394}
2395
Michael Mueller91520f12015-02-27 14:32:11 +01002396static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2397{
2398 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2399
Michael Mueller91520f12015-02-27 14:32:11 +01002400 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002401 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002402 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002403}
2404
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002405int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2406{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002407 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002408
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002409 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2410 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002411 CPUSTAT_STOPPED);
2412
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002413 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002414 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002415 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002416 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002417
Michael Mueller91520f12015-02-27 14:32:11 +01002418 kvm_s390_vcpu_setup_model(vcpu);
2419
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002420 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2421 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002422 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002423 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002424 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002425 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002426 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002427
David Hildenbrand873b4252016-04-04 15:53:47 +02002428 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002429 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002430 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002431 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2432 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002433 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002434 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002435 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002436 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002437 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002438 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002439 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002440 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002441 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002442 vcpu->arch.sie_block->eca |= ECA_VX;
2443 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002444 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04002445 if (test_kvm_facility(vcpu->kvm, 139))
2446 vcpu->arch.sie_block->ecd |= ECD_MEF;
2447
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002448 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2449 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002450 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002451
2452 if (sclp.has_kss)
2453 atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
2454 else
2455 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002456
Dominik Dingele6db1d62015-05-07 15:41:57 +02002457 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002458 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2459 if (rc)
2460 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002461 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01002462 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002463 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002464
Tony Krowiak5102ee82014-06-27 14:46:01 -04002465 kvm_s390_vcpu_crypto_setup(vcpu);
2466
Dominik Dingelb31605c2014-03-25 13:47:11 +01002467 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002468}
2469
2470struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2471 unsigned int id)
2472{
Carsten Otte4d475552011-10-18 12:27:12 +02002473 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002474 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02002475 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002476
David Hildenbrand42158252015-10-12 12:57:22 +02002477 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02002478 goto out;
2479
2480 rc = -ENOMEM;
2481
Michael Muellerb110fea2013-06-12 13:54:54 +02002482 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002483 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02002484 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002485
QingFeng Haoda72ca42017-06-07 11:41:19 +02002486 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002487 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2488 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002489 goto out_free_cpu;
2490
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002491 vcpu->arch.sie_block = &sie_page->sie_block;
2492 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2493
David Hildenbrandefed1102015-04-16 12:32:41 +02002494 /* the real guest size will always be smaller than msl */
2495 vcpu->arch.sie_block->mso = 0;
2496 vcpu->arch.sie_block->msl = sclp.hamax;
2497
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002498 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002499 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002500 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02002501 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01002502 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002503 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002504
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002505 rc = kvm_vcpu_init(vcpu, kvm, id);
2506 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002507 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002508 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002509 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02002510 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002511
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002512 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08002513out_free_sie_block:
2514 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002515out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02002516 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02002517out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002518 return ERR_PTR(rc);
2519}
2520
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002521int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2522{
David Hildenbrand9a022062014-08-05 17:40:47 +02002523 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002524}
2525
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002526bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
2527{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08002528 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002529}
2530
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002531void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002532{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002533 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002534 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002535}
2536
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002537void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002538{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002539 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002540}
2541
Christian Borntraeger8e236542015-04-09 13:49:04 +02002542static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2543{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002544 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002545 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002546}
2547
2548static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2549{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04002550 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002551}
2552
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002553/*
2554 * Kick a guest cpu out of SIE and wait until SIE is not running.
2555 * If the CPU is not running (e.g. waiting as idle) the function will
2556 * return immediately. */
2557void exit_sie(struct kvm_vcpu *vcpu)
2558{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002559 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002560 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2561 cpu_relax();
2562}
2563
Christian Borntraeger8e236542015-04-09 13:49:04 +02002564/* Kick a guest cpu out of SIE to process a request synchronously */
2565void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002566{
Christian Borntraeger8e236542015-04-09 13:49:04 +02002567 kvm_make_request(req, vcpu);
2568 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002569}
2570
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002571static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2572 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002573{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002574 struct kvm *kvm = gmap->private;
2575 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002576 unsigned long prefix;
2577 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002578
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02002579 if (gmap_is_shadow(gmap))
2580 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002581 if (start >= 1UL << 31)
2582 /* We are only interested in prefix pages */
2583 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002584 kvm_for_each_vcpu(i, vcpu, kvm) {
2585 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002586 prefix = kvm_s390_get_prefix(vcpu);
2587 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2588 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2589 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002590 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002591 }
2592 }
2593}
2594
Christoffer Dallb6d33832012-03-08 16:44:24 -05002595int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2596{
2597 /* kvm common code refers to this, but never calls it */
2598 BUG();
2599 return 0;
2600}
2601
Carsten Otte14eebd92012-05-15 14:15:26 +02002602static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2603 struct kvm_one_reg *reg)
2604{
2605 int r = -EINVAL;
2606
2607 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002608 case KVM_REG_S390_TODPR:
2609 r = put_user(vcpu->arch.sie_block->todpr,
2610 (u32 __user *)reg->addr);
2611 break;
2612 case KVM_REG_S390_EPOCHDIFF:
2613 r = put_user(vcpu->arch.sie_block->epoch,
2614 (u64 __user *)reg->addr);
2615 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002616 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002617 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002618 (u64 __user *)reg->addr);
2619 break;
2620 case KVM_REG_S390_CLOCK_COMP:
2621 r = put_user(vcpu->arch.sie_block->ckc,
2622 (u64 __user *)reg->addr);
2623 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002624 case KVM_REG_S390_PFTOKEN:
2625 r = put_user(vcpu->arch.pfault_token,
2626 (u64 __user *)reg->addr);
2627 break;
2628 case KVM_REG_S390_PFCOMPARE:
2629 r = put_user(vcpu->arch.pfault_compare,
2630 (u64 __user *)reg->addr);
2631 break;
2632 case KVM_REG_S390_PFSELECT:
2633 r = put_user(vcpu->arch.pfault_select,
2634 (u64 __user *)reg->addr);
2635 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002636 case KVM_REG_S390_PP:
2637 r = put_user(vcpu->arch.sie_block->pp,
2638 (u64 __user *)reg->addr);
2639 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002640 case KVM_REG_S390_GBEA:
2641 r = put_user(vcpu->arch.sie_block->gbea,
2642 (u64 __user *)reg->addr);
2643 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002644 default:
2645 break;
2646 }
2647
2648 return r;
2649}
2650
2651static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2652 struct kvm_one_reg *reg)
2653{
2654 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002655 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002656
2657 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002658 case KVM_REG_S390_TODPR:
2659 r = get_user(vcpu->arch.sie_block->todpr,
2660 (u32 __user *)reg->addr);
2661 break;
2662 case KVM_REG_S390_EPOCHDIFF:
2663 r = get_user(vcpu->arch.sie_block->epoch,
2664 (u64 __user *)reg->addr);
2665 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002666 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002667 r = get_user(val, (u64 __user *)reg->addr);
2668 if (!r)
2669 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002670 break;
2671 case KVM_REG_S390_CLOCK_COMP:
2672 r = get_user(vcpu->arch.sie_block->ckc,
2673 (u64 __user *)reg->addr);
2674 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002675 case KVM_REG_S390_PFTOKEN:
2676 r = get_user(vcpu->arch.pfault_token,
2677 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002678 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2679 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002680 break;
2681 case KVM_REG_S390_PFCOMPARE:
2682 r = get_user(vcpu->arch.pfault_compare,
2683 (u64 __user *)reg->addr);
2684 break;
2685 case KVM_REG_S390_PFSELECT:
2686 r = get_user(vcpu->arch.pfault_select,
2687 (u64 __user *)reg->addr);
2688 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002689 case KVM_REG_S390_PP:
2690 r = get_user(vcpu->arch.sie_block->pp,
2691 (u64 __user *)reg->addr);
2692 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002693 case KVM_REG_S390_GBEA:
2694 r = get_user(vcpu->arch.sie_block->gbea,
2695 (u64 __user *)reg->addr);
2696 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002697 default:
2698 break;
2699 }
2700
2701 return r;
2702}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002703
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002704static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2705{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002706 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002707 return 0;
2708}
2709
2710int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2711{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002712 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002713 return 0;
2714}
2715
2716int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2717{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01002718 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002719 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01002720 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002721 return 0;
2722}
2723
2724int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2725 struct kvm_sregs *sregs)
2726{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002727 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002728 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002729 return 0;
2730}
2731
2732int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2733 struct kvm_sregs *sregs)
2734{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002735 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002736 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002737 return 0;
2738}
2739
2740int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2741{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02002742 if (test_fp_ctl(fpu->fpc))
2743 return -EINVAL;
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002744 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002745 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002746 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
2747 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002748 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002749 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002750 return 0;
2751}
2752
2753int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2754{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002755 /* make sure we have the latest values */
2756 save_fpu_regs();
2757 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002758 convert_vx_to_fp((freg_t *) fpu->fprs,
2759 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002760 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002761 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002762 fpu->fpc = vcpu->run->s.regs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002763 return 0;
2764}
2765
2766static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2767{
2768 int rc = 0;
2769
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02002770 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002771 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002772 else {
2773 vcpu->run->psw_mask = psw.mask;
2774 vcpu->run->psw_addr = psw.addr;
2775 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002776 return rc;
2777}
2778
2779int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2780 struct kvm_translation *tr)
2781{
2782 return -EINVAL; /* not implemented yet */
2783}
2784
David Hildenbrand27291e22014-01-23 12:26:52 +01002785#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2786 KVM_GUESTDBG_USE_HW_BP | \
2787 KVM_GUESTDBG_ENABLE)
2788
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002789int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2790 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002791{
David Hildenbrand27291e22014-01-23 12:26:52 +01002792 int rc = 0;
2793
2794 vcpu->guest_debug = 0;
2795 kvm_s390_clear_bp_data(vcpu);
2796
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02002797 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01002798 return -EINVAL;
David Hildenbrand89b5b4d2015-11-24 13:47:13 +01002799 if (!sclp.has_gpere)
2800 return -EINVAL;
David Hildenbrand27291e22014-01-23 12:26:52 +01002801
2802 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2803 vcpu->guest_debug = dbg->control;
2804 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002805 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002806
2807 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2808 rc = kvm_s390_import_bp_data(vcpu, dbg);
2809 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002810 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002811 vcpu->arch.guestdbg.last_bp = 0;
2812 }
2813
2814 if (rc) {
2815 vcpu->guest_debug = 0;
2816 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002817 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002818 }
2819
2820 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002821}
2822
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002823int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2824 struct kvm_mp_state *mp_state)
2825{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002826 /* CHECK_STOP and LOAD are not supported yet */
2827 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2828 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002829}
2830
2831int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2832 struct kvm_mp_state *mp_state)
2833{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002834 int rc = 0;
2835
2836 /* user space knows about this interface - let it control the state */
2837 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2838
2839 switch (mp_state->mp_state) {
2840 case KVM_MP_STATE_STOPPED:
2841 kvm_s390_vcpu_stop(vcpu);
2842 break;
2843 case KVM_MP_STATE_OPERATING:
2844 kvm_s390_vcpu_start(vcpu);
2845 break;
2846 case KVM_MP_STATE_LOAD:
2847 case KVM_MP_STATE_CHECK_STOP:
2848 /* fall through - CHECK_STOP and LOAD are not supported yet */
2849 default:
2850 rc = -ENXIO;
2851 }
2852
2853 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002854}
2855
David Hildenbrand8ad35752014-03-14 11:00:21 +01002856static bool ibs_enabled(struct kvm_vcpu *vcpu)
2857{
2858 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2859}
2860
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002861static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2862{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002863retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02002864 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02002865 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02002866 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002867 /*
2868 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002869 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002870 * This ensures that the ipte instruction for this request has
2871 * already finished. We might race against a second unmapper that
2872 * wants to set the blocking bit. Lets just retry the request loop.
2873 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01002874 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002875 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002876 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2877 kvm_s390_get_prefix(vcpu),
2878 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02002879 if (rc) {
2880 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002881 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02002882 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002883 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002884 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002885
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002886 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2887 vcpu->arch.sie_block->ihcpu = 0xffff;
2888 goto retry;
2889 }
2890
David Hildenbrand8ad35752014-03-14 11:00:21 +01002891 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2892 if (!ibs_enabled(vcpu)) {
2893 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002894 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002895 &vcpu->arch.sie_block->cpuflags);
2896 }
2897 goto retry;
2898 }
2899
2900 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2901 if (ibs_enabled(vcpu)) {
2902 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002903 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002904 &vcpu->arch.sie_block->cpuflags);
2905 }
2906 goto retry;
2907 }
2908
David Hildenbrand6502a342016-06-21 14:19:51 +02002909 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
2910 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2911 goto retry;
2912 }
2913
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02002914 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
2915 /*
2916 * Disable CMMA virtualization; we will emulate the ESSA
2917 * instruction manually, in order to provide additional
2918 * functionalities needed for live migration.
2919 */
2920 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
2921 goto retry;
2922 }
2923
2924 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
2925 /*
2926 * Re-enable CMMA virtualization if CMMA is available and
2927 * was used.
2928 */
2929 if ((vcpu->kvm->arch.use_cmma) &&
2930 (vcpu->kvm->mm->context.use_cmma))
2931 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
2932 goto retry;
2933 }
2934
David Hildenbrand0759d062014-05-13 16:54:32 +02002935 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02002936 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02002937
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002938 return 0;
2939}
2940
Collin L. Walling8fa16962016-07-26 15:29:44 -04002941void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
2942 const struct kvm_s390_vm_tod_clock *gtod)
2943{
2944 struct kvm_vcpu *vcpu;
2945 struct kvm_s390_tod_clock_ext htod;
2946 int i;
2947
2948 mutex_lock(&kvm->lock);
2949 preempt_disable();
2950
2951 get_tod_clock_ext((char *)&htod);
2952
2953 kvm->arch.epoch = gtod->tod - htod.tod;
2954 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
2955
2956 if (kvm->arch.epoch > gtod->tod)
2957 kvm->arch.epdx -= 1;
2958
2959 kvm_s390_vcpu_block_all(kvm);
2960 kvm_for_each_vcpu(i, vcpu, kvm) {
2961 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2962 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
2963 }
2964
2965 kvm_s390_vcpu_unblock_all(kvm);
2966 preempt_enable();
2967 mutex_unlock(&kvm->lock);
2968}
2969
David Hildenbrand25ed1672015-05-12 09:49:14 +02002970void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2971{
2972 struct kvm_vcpu *vcpu;
2973 int i;
2974
2975 mutex_lock(&kvm->lock);
2976 preempt_disable();
2977 kvm->arch.epoch = tod - get_tod_clock();
2978 kvm_s390_vcpu_block_all(kvm);
2979 kvm_for_each_vcpu(i, vcpu, kvm)
2980 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2981 kvm_s390_vcpu_unblock_all(kvm);
2982 preempt_enable();
2983 mutex_unlock(&kvm->lock);
2984}
2985
Thomas Huthfa576c52014-05-06 17:20:16 +02002986/**
2987 * kvm_arch_fault_in_page - fault-in guest page if necessary
2988 * @vcpu: The corresponding virtual cpu
2989 * @gpa: Guest physical address
2990 * @writable: Whether the page should be writable or not
2991 *
2992 * Make sure that a guest page has been faulted-in on the host.
2993 *
2994 * Return: Zero on success, negative error code otherwise.
2995 */
2996long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002997{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002998 return gmap_fault(vcpu->arch.gmap, gpa,
2999 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003000}
3001
Dominik Dingel3c038e62013-10-07 17:11:48 +02003002static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3003 unsigned long token)
3004{
3005 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003006 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003007
3008 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003009 irq.u.ext.ext_params2 = token;
3010 irq.type = KVM_S390_INT_PFAULT_INIT;
3011 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003012 } else {
3013 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003014 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003015 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3016 }
3017}
3018
3019void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3020 struct kvm_async_pf *work)
3021{
3022 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3023 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3024}
3025
3026void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3027 struct kvm_async_pf *work)
3028{
3029 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3030 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3031}
3032
3033void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3034 struct kvm_async_pf *work)
3035{
3036 /* s390 will always inject the page directly */
3037}
3038
3039bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3040{
3041 /*
3042 * s390 will always inject the page directly,
3043 * but we still want check_async_completion to cleanup
3044 */
3045 return true;
3046}
3047
3048static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3049{
3050 hva_t hva;
3051 struct kvm_arch_async_pf arch;
3052 int rc;
3053
3054 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3055 return 0;
3056 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3057 vcpu->arch.pfault_compare)
3058 return 0;
3059 if (psw_extint_disabled(vcpu))
3060 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003061 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003062 return 0;
3063 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
3064 return 0;
3065 if (!vcpu->arch.gmap->pfault_enabled)
3066 return 0;
3067
Heiko Carstens81480cc2014-01-01 16:36:07 +01003068 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3069 hva += current->thread.gmap_addr & ~PAGE_MASK;
3070 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003071 return 0;
3072
3073 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3074 return rc;
3075}
3076
Thomas Huth3fb4c402013-09-12 10:33:43 +02003077static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003078{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003079 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003080
Dominik Dingel3c038e62013-10-07 17:11:48 +02003081 /*
3082 * On s390 notifications for arriving pages will be delivered directly
3083 * to the guest but the house keeping for completed pfaults is
3084 * handled outside the worker.
3085 */
3086 kvm_check_async_pf_completion(vcpu);
3087
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003088 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3089 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003090
3091 if (need_resched())
3092 schedule();
3093
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003094 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003095 s390_handle_mcck();
3096
Jens Freimann79395032014-04-17 10:10:30 +02003097 if (!kvm_is_ucontrol(vcpu->kvm)) {
3098 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3099 if (rc)
3100 return rc;
3101 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003102
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003103 rc = kvm_s390_handle_requests(vcpu);
3104 if (rc)
3105 return rc;
3106
David Hildenbrand27291e22014-01-23 12:26:52 +01003107 if (guestdbg_enabled(vcpu)) {
3108 kvm_s390_backup_guest_per_regs(vcpu);
3109 kvm_s390_patch_guest_per_regs(vcpu);
3110 }
3111
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003112 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003113 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3114 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3115 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003116
Thomas Huth3fb4c402013-09-12 10:33:43 +02003117 return 0;
3118}
3119
Thomas Huth492d8642015-02-10 16:11:01 +01003120static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3121{
David Hildenbrand56317922016-01-12 17:37:58 +01003122 struct kvm_s390_pgm_info pgm_info = {
3123 .code = PGM_ADDRESSING,
3124 };
3125 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003126 int rc;
3127
3128 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3129 trace_kvm_s390_sie_fault(vcpu);
3130
3131 /*
3132 * We want to inject an addressing exception, which is defined as a
3133 * suppressing or terminating exception. However, since we came here
3134 * by a DAT access exception, the PSW still points to the faulting
3135 * instruction since DAT exceptions are nullifying. So we've got
3136 * to look up the current opcode to get the length of the instruction
3137 * to be able to forward the PSW.
3138 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003139 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003140 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003141 if (rc < 0) {
3142 return rc;
3143 } else if (rc) {
3144 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3145 * Forward by arbitrary ilc, injection will take care of
3146 * nullification if necessary.
3147 */
3148 pgm_info = vcpu->arch.pgm;
3149 ilen = 4;
3150 }
David Hildenbrand56317922016-01-12 17:37:58 +01003151 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3152 kvm_s390_forward_psw(vcpu, ilen);
3153 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003154}
3155
Thomas Huth3fb4c402013-09-12 10:33:43 +02003156static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3157{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003158 struct mcck_volatile_info *mcck_info;
3159 struct sie_page *sie_page;
3160
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003161 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3162 vcpu->arch.sie_block->icptcode);
3163 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3164
David Hildenbrand27291e22014-01-23 12:26:52 +01003165 if (guestdbg_enabled(vcpu))
3166 kvm_s390_restore_guest_per_regs(vcpu);
3167
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003168 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3169 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003170
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003171 if (exit_reason == -EINTR) {
3172 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3173 sie_page = container_of(vcpu->arch.sie_block,
3174 struct sie_page, sie_block);
3175 mcck_info = &sie_page->mcck_info;
3176 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3177 return 0;
3178 }
3179
David Hildenbrand71f116b2015-10-19 16:24:28 +02003180 if (vcpu->arch.sie_block->icptcode > 0) {
3181 int rc = kvm_handle_sie_intercept(vcpu);
3182
3183 if (rc != -EOPNOTSUPP)
3184 return rc;
3185 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3186 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3187 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3188 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3189 return -EREMOTE;
3190 } else if (exit_reason != -EFAULT) {
3191 vcpu->stat.exit_null++;
3192 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003193 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3194 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3195 vcpu->run->s390_ucontrol.trans_exc_code =
3196 current->thread.gmap_addr;
3197 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003198 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003199 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003200 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003201 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003202 if (kvm_arch_setup_async_pf(vcpu))
3203 return 0;
3204 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003205 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003206 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003207}
3208
3209static int __vcpu_run(struct kvm_vcpu *vcpu)
3210{
3211 int rc, exit_reason;
3212
Thomas Huth800c1062013-09-12 10:33:45 +02003213 /*
3214 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3215 * ning the guest), so that memslots (and other stuff) are protected
3216 */
3217 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3218
Thomas Hutha76ccff2013-09-12 10:33:44 +02003219 do {
3220 rc = vcpu_pre_run(vcpu);
3221 if (rc)
3222 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003223
Thomas Huth800c1062013-09-12 10:33:45 +02003224 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003225 /*
3226 * As PF_VCPU will be used in fault handler, between
3227 * guest_enter and guest_exit should be no uaccess.
3228 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003229 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003230 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003231 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003232 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003233 exit_reason = sie64a(vcpu->arch.sie_block,
3234 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003235 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003236 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003237 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003238 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003239 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003240
Thomas Hutha76ccff2013-09-12 10:33:44 +02003241 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003242 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003243
Thomas Huth800c1062013-09-12 10:33:45 +02003244 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003245 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003246}
3247
David Hildenbrandb028ee32014-07-17 10:47:43 +02003248static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3249{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003250 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003251 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003252
3253 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003254 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003255 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3256 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3257 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3258 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3259 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3260 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003261 /* some control register changes require a tlb flush */
3262 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003263 }
3264 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003265 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003266 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3267 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3268 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3269 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3270 }
3271 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3272 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3273 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3274 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003275 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3276 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003277 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003278 /*
3279 * If userspace sets the riccb (e.g. after migration) to a valid state,
3280 * we should enable RI here instead of doing the lazy enablement.
3281 */
3282 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003283 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003284 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003285 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003286 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003287 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003288 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003289 /*
3290 * If userspace sets the gscb (e.g. after migration) to non-zero,
3291 * we should enable GS here instead of doing the lazy enablement.
3292 */
3293 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3294 test_kvm_facility(vcpu->kvm, 133) &&
3295 gscb->gssm &&
3296 !vcpu->arch.gs_enabled) {
3297 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3298 vcpu->arch.sie_block->ecb |= ECB_GS;
3299 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3300 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003301 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003302 save_access_regs(vcpu->arch.host_acrs);
3303 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003304 /* save host (userspace) fprs/vrs */
3305 save_fpu_regs();
3306 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3307 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3308 if (MACHINE_HAS_VX)
3309 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3310 else
3311 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3312 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3313 if (test_fp_ctl(current->thread.fpu.fpc))
3314 /* User space provided an invalid FPC, let's clear it */
3315 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003316 if (MACHINE_HAS_GS) {
3317 preempt_disable();
3318 __ctl_set_bit(2, 4);
3319 if (current->thread.gs_cb) {
3320 vcpu->arch.host_gscb = current->thread.gs_cb;
3321 save_gs_cb(vcpu->arch.host_gscb);
3322 }
3323 if (vcpu->arch.gs_enabled) {
3324 current->thread.gs_cb = (struct gs_cb *)
3325 &vcpu->run->s.regs.gscb;
3326 restore_gs_cb(current->thread.gs_cb);
3327 }
3328 preempt_enable();
3329 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003330
David Hildenbrandb028ee32014-07-17 10:47:43 +02003331 kvm_run->kvm_dirty_regs = 0;
3332}
3333
3334static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3335{
3336 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3337 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3338 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3339 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003340 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003341 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3342 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3343 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3344 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3345 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3346 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3347 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003348 save_access_regs(vcpu->run->s.regs.acrs);
3349 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003350 /* Save guest register state */
3351 save_fpu_regs();
3352 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3353 /* Restore will be done lazily at return */
3354 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3355 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003356 if (MACHINE_HAS_GS) {
3357 __ctl_set_bit(2, 4);
3358 if (vcpu->arch.gs_enabled)
3359 save_gs_cb(current->thread.gs_cb);
3360 preempt_disable();
3361 current->thread.gs_cb = vcpu->arch.host_gscb;
3362 restore_gs_cb(vcpu->arch.host_gscb);
3363 preempt_enable();
3364 if (!vcpu->arch.host_gscb)
3365 __ctl_clear_bit(2, 4);
3366 vcpu->arch.host_gscb = NULL;
3367 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003368
David Hildenbrandb028ee32014-07-17 10:47:43 +02003369}
3370
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003371int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3372{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003373 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003374
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003375 if (kvm_run->immediate_exit)
3376 return -EINTR;
3377
Christoffer Dallaccb7572017-12-04 21:35:25 +01003378 vcpu_load(vcpu);
3379
David Hildenbrand27291e22014-01-23 12:26:52 +01003380 if (guestdbg_exit_pending(vcpu)) {
3381 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01003382 rc = 0;
3383 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01003384 }
3385
Jan H. Schönherr20b70352017-11-24 22:39:01 +01003386 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003387
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003388 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3389 kvm_s390_vcpu_start(vcpu);
3390 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003391 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003392 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01003393 rc = -EINVAL;
3394 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003395 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003396
David Hildenbrandb028ee32014-07-17 10:47:43 +02003397 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003398 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003399
Heiko Carstensdab4079d2009-06-12 10:26:32 +02003400 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003401 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02003402
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003403 if (signal_pending(current) && !rc) {
3404 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003405 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003406 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003407
David Hildenbrand27291e22014-01-23 12:26:52 +01003408 if (guestdbg_exit_pending(vcpu) && !rc) {
3409 kvm_s390_prepare_debug_exit(vcpu);
3410 rc = 0;
3411 }
3412
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003413 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02003414 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003415 rc = 0;
3416 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003417
David Hildenbranddb0758b2016-02-15 09:42:25 +01003418 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003419 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003420
Jan H. Schönherr20b70352017-11-24 22:39:01 +01003421 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003422
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003423 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01003424out:
3425 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02003426 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003427}
3428
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003429/*
3430 * store status at address
3431 * we use have two special cases:
3432 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3433 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3434 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01003435int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003436{
Carsten Otte092670c2011-07-24 10:48:22 +02003437 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003438 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02003439 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01003440 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003441 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003442
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003443 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01003444 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
3445 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003446 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003447 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003448 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
3449 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003450 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003451 gpa = px;
3452 } else
3453 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003454
3455 /* manually convert vector registers if necessary */
3456 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01003457 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003458 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3459 fprs, 128);
3460 } else {
3461 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01003462 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003463 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003464 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003465 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003466 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003467 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003468 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02003469 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003470 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003471 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003472 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003473 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01003474 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003475 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01003476 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01003477 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003478 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003479 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003480 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003481 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003482 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003483 &vcpu->arch.sie_block->gcr, 128);
3484 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003485}
3486
Thomas Huthe8798922013-11-06 15:46:33 +01003487int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
3488{
3489 /*
3490 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003491 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01003492 * it into the save area
3493 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02003494 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003495 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01003496 save_access_regs(vcpu->run->s.regs.acrs);
3497
3498 return kvm_s390_store_status_unloaded(vcpu, addr);
3499}
3500
David Hildenbrand8ad35752014-03-14 11:00:21 +01003501static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3502{
3503 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003504 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003505}
3506
3507static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
3508{
3509 unsigned int i;
3510 struct kvm_vcpu *vcpu;
3511
3512 kvm_for_each_vcpu(i, vcpu, kvm) {
3513 __disable_ibs_on_vcpu(vcpu);
3514 }
3515}
3516
3517static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3518{
David Hildenbrand09a400e2016-04-04 15:57:08 +02003519 if (!sclp.has_ibs)
3520 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01003521 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003522 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003523}
3524
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003525void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
3526{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003527 int i, online_vcpus, started_vcpus = 0;
3528
3529 if (!is_vcpu_stopped(vcpu))
3530 return;
3531
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003532 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003533 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003534 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003535 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3536
3537 for (i = 0; i < online_vcpus; i++) {
3538 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
3539 started_vcpus++;
3540 }
3541
3542 if (started_vcpus == 0) {
3543 /* we're the only active VCPU -> speed it up */
3544 __enable_ibs_on_vcpu(vcpu);
3545 } else if (started_vcpus == 1) {
3546 /*
3547 * As we are starting a second VCPU, we have to disable
3548 * the IBS facility on all VCPUs to remove potentially
3549 * oustanding ENABLE requests.
3550 */
3551 __disable_ibs_on_all_vcpus(vcpu->kvm);
3552 }
3553
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003554 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003555 /*
3556 * Another VCPU might have used IBS while we were offline.
3557 * Let's play safe and flush the VCPU at startup.
3558 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003559 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003560 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003561 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003562}
3563
3564void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
3565{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003566 int i, online_vcpus, started_vcpus = 0;
3567 struct kvm_vcpu *started_vcpu = NULL;
3568
3569 if (is_vcpu_stopped(vcpu))
3570 return;
3571
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003572 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003573 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003574 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003575 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3576
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003577 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02003578 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003579
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003580 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003581 __disable_ibs_on_vcpu(vcpu);
3582
3583 for (i = 0; i < online_vcpus; i++) {
3584 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3585 started_vcpus++;
3586 started_vcpu = vcpu->kvm->vcpus[i];
3587 }
3588 }
3589
3590 if (started_vcpus == 1) {
3591 /*
3592 * As we only have one VCPU left, we want to enable the
3593 * IBS facility for that VCPU to speed it up.
3594 */
3595 __enable_ibs_on_vcpu(started_vcpu);
3596 }
3597
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003598 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003599 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003600}
3601
Cornelia Huckd6712df2012-12-20 15:32:11 +01003602static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3603 struct kvm_enable_cap *cap)
3604{
3605 int r;
3606
3607 if (cap->flags)
3608 return -EINVAL;
3609
3610 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003611 case KVM_CAP_S390_CSS_SUPPORT:
3612 if (!vcpu->kvm->arch.css_support) {
3613 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02003614 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003615 trace_kvm_s390_enable_css(vcpu->kvm);
3616 }
3617 r = 0;
3618 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01003619 default:
3620 r = -EINVAL;
3621 break;
3622 }
3623 return r;
3624}
3625
Thomas Huth41408c282015-02-06 15:01:21 +01003626static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3627 struct kvm_s390_mem_op *mop)
3628{
3629 void __user *uaddr = (void __user *)mop->buf;
3630 void *tmpbuf = NULL;
3631 int r, srcu_idx;
3632 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3633 | KVM_S390_MEMOP_F_CHECK_ONLY;
3634
3635 if (mop->flags & ~supported_flags)
3636 return -EINVAL;
3637
3638 if (mop->size > MEM_OP_MAX_SIZE)
3639 return -E2BIG;
3640
3641 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3642 tmpbuf = vmalloc(mop->size);
3643 if (!tmpbuf)
3644 return -ENOMEM;
3645 }
3646
3647 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3648
3649 switch (mop->op) {
3650 case KVM_S390_MEMOP_LOGICAL_READ:
3651 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003652 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3653 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01003654 break;
3655 }
3656 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3657 if (r == 0) {
3658 if (copy_to_user(uaddr, tmpbuf, mop->size))
3659 r = -EFAULT;
3660 }
3661 break;
3662 case KVM_S390_MEMOP_LOGICAL_WRITE:
3663 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003664 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3665 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01003666 break;
3667 }
3668 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3669 r = -EFAULT;
3670 break;
3671 }
3672 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3673 break;
3674 default:
3675 r = -EINVAL;
3676 }
3677
3678 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3679
3680 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3681 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3682
3683 vfree(tmpbuf);
3684 return r;
3685}
3686
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003687long kvm_arch_vcpu_ioctl(struct file *filp,
3688 unsigned int ioctl, unsigned long arg)
3689{
3690 struct kvm_vcpu *vcpu = filp->private_data;
3691 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02003692 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03003693 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003694
Avi Kivity93736622010-05-13 12:35:17 +03003695 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01003696 case KVM_S390_IRQ: {
3697 struct kvm_s390_irq s390irq;
3698
3699 r = -EFAULT;
3700 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3701 break;
3702 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3703 break;
3704 }
Avi Kivity93736622010-05-13 12:35:17 +03003705 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01003706 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02003707 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003708
Avi Kivity93736622010-05-13 12:35:17 +03003709 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003710 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03003711 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02003712 if (s390int_to_s390irq(&s390int, &s390irq))
3713 return -EINVAL;
3714 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity93736622010-05-13 12:35:17 +03003715 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003716 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003717 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02003718 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003719 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02003720 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003721 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003722 case KVM_S390_SET_INITIAL_PSW: {
3723 psw_t psw;
3724
Avi Kivitybc923cc2010-05-13 12:21:46 +03003725 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003726 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03003727 break;
3728 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3729 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003730 }
3731 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03003732 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3733 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003734 case KVM_SET_ONE_REG:
3735 case KVM_GET_ONE_REG: {
3736 struct kvm_one_reg reg;
3737 r = -EFAULT;
3738 if (copy_from_user(&reg, argp, sizeof(reg)))
3739 break;
3740 if (ioctl == KVM_SET_ONE_REG)
3741 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3742 else
3743 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3744 break;
3745 }
Carsten Otte27e03932012-01-04 10:25:21 +01003746#ifdef CONFIG_KVM_S390_UCONTROL
3747 case KVM_S390_UCAS_MAP: {
3748 struct kvm_s390_ucas_mapping ucasmap;
3749
3750 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3751 r = -EFAULT;
3752 break;
3753 }
3754
3755 if (!kvm_is_ucontrol(vcpu->kvm)) {
3756 r = -EINVAL;
3757 break;
3758 }
3759
3760 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3761 ucasmap.vcpu_addr, ucasmap.length);
3762 break;
3763 }
3764 case KVM_S390_UCAS_UNMAP: {
3765 struct kvm_s390_ucas_mapping ucasmap;
3766
3767 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3768 r = -EFAULT;
3769 break;
3770 }
3771
3772 if (!kvm_is_ucontrol(vcpu->kvm)) {
3773 r = -EINVAL;
3774 break;
3775 }
3776
3777 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3778 ucasmap.length);
3779 break;
3780 }
3781#endif
Carsten Otteccc79102012-01-04 10:25:26 +01003782 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003783 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01003784 break;
3785 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01003786 case KVM_ENABLE_CAP:
3787 {
3788 struct kvm_enable_cap cap;
3789 r = -EFAULT;
3790 if (copy_from_user(&cap, argp, sizeof(cap)))
3791 break;
3792 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3793 break;
3794 }
Thomas Huth41408c282015-02-06 15:01:21 +01003795 case KVM_S390_MEM_OP: {
3796 struct kvm_s390_mem_op mem_op;
3797
3798 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3799 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3800 else
3801 r = -EFAULT;
3802 break;
3803 }
Jens Freimann816c7662014-11-24 17:13:46 +01003804 case KVM_S390_SET_IRQ_STATE: {
3805 struct kvm_s390_irq_state irq_state;
3806
3807 r = -EFAULT;
3808 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3809 break;
3810 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3811 irq_state.len == 0 ||
3812 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3813 r = -EINVAL;
3814 break;
3815 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003816 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01003817 r = kvm_s390_set_irq_state(vcpu,
3818 (void __user *) irq_state.buf,
3819 irq_state.len);
3820 break;
3821 }
3822 case KVM_S390_GET_IRQ_STATE: {
3823 struct kvm_s390_irq_state irq_state;
3824
3825 r = -EFAULT;
3826 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3827 break;
3828 if (irq_state.len == 0) {
3829 r = -EINVAL;
3830 break;
3831 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003832 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01003833 r = kvm_s390_get_irq_state(vcpu,
3834 (__u8 __user *) irq_state.buf,
3835 irq_state.len);
3836 break;
3837 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003838 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01003839 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003840 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03003841 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003842}
3843
Carsten Otte5b1c1492012-01-04 10:25:23 +01003844int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3845{
3846#ifdef CONFIG_KVM_S390_UCONTROL
3847 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3848 && (kvm_is_ucontrol(vcpu->kvm))) {
3849 vmf->page = virt_to_page(vcpu->arch.sie_block);
3850 get_page(vmf->page);
3851 return 0;
3852 }
3853#endif
3854 return VM_FAULT_SIGBUS;
3855}
3856
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05303857int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3858 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09003859{
3860 return 0;
3861}
3862
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003863/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003864int kvm_arch_prepare_memory_region(struct kvm *kvm,
3865 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003866 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09003867 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003868{
Nick Wangdd2887e2013-03-25 17:22:57 +01003869 /* A few sanity checks. We can have memory slots which have to be
3870 located/ended at a segment boundary (1MB). The memory in userland is
3871 ok to be fragmented into various different vmas. It is okay to mmap()
3872 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003873
Carsten Otte598841c2011-07-24 10:48:21 +02003874 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003875 return -EINVAL;
3876
Carsten Otte598841c2011-07-24 10:48:21 +02003877 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003878 return -EINVAL;
3879
Dominik Dingela3a92c32014-12-01 17:24:42 +01003880 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3881 return -EINVAL;
3882
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003883 return 0;
3884}
3885
3886void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003887 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003888 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02003889 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003890 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003891{
Carsten Ottef7850c92011-07-24 10:48:23 +02003892 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003893
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01003894 /* If the basics of the memslot do not change, we do not want
3895 * to update the gmap. Every update causes several unnecessary
3896 * segment translation exceptions. This is usually handled just
3897 * fine by the normal fault handler + gmap, but it will also
3898 * cause faults on the prefix page of running guest CPUs.
3899 */
3900 if (old->userspace_addr == mem->userspace_addr &&
3901 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3902 old->npages * PAGE_SIZE == mem->memory_size)
3903 return;
Carsten Otte598841c2011-07-24 10:48:21 +02003904
3905 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3906 mem->guest_phys_addr, mem->memory_size);
3907 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003908 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02003909 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003910}
3911
Alexander Yarygin60a37702016-04-01 15:38:57 +03003912static inline unsigned long nonhyp_mask(int i)
3913{
3914 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3915
3916 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3917}
3918
Christian Borntraeger3491caf2016-05-13 12:16:35 +02003919void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3920{
3921 vcpu->valid_wakeup = false;
3922}
3923
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003924static int __init kvm_s390_init(void)
3925{
Alexander Yarygin60a37702016-04-01 15:38:57 +03003926 int i;
3927
David Hildenbrand07197fd2015-01-30 16:01:38 +01003928 if (!sclp.has_sief2) {
3929 pr_info("SIE not available\n");
3930 return -ENODEV;
3931 }
3932
Alexander Yarygin60a37702016-04-01 15:38:57 +03003933 for (i = 0; i < 16; i++)
3934 kvm_s390_fac_list_mask[i] |=
3935 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3936
Michael Mueller9d8d5782015-02-02 15:42:51 +01003937 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003938}
3939
3940static void __exit kvm_s390_exit(void)
3941{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003942 kvm_exit();
3943}
3944
3945module_init(kvm_s390_init);
3946module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02003947
3948/*
3949 * Enable autoloading of the kvm module.
3950 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3951 * since x86 takes a different approach.
3952 */
3953#include <linux/miscdevice.h>
3954MODULE_ALIAS_MISCDEV(KVM_MINOR);
3955MODULE_ALIAS("devname:kvm");