blob: c2b3914993748070e6c7e0ad087d8c0bf581c1c6 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
33
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020034#include <linux/string.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010046#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010047#include "gaccess.h"
48
David Hildenbrandea2cdd22015-05-20 13:24:02 +020049#define KMSG_COMPONENT "kvm-s390"
50#undef pr_fmt
51#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
52
Cornelia Huck5786fff2012-07-23 17:20:29 +020053#define CREATE_TRACE_POINTS
54#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020055#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020056
Thomas Huth41408c282015-02-06 15:01:21 +010057#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010058#define LOCAL_IRQS 32
59#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
60 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010061
Heiko Carstensb0c632d2008-03-25 18:47:20 +010062#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
63
64struct kvm_stats_debugfs_item debugfs_entries[] = {
65 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020066 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010067 { "exit_validity", VCPU_STAT(exit_validity) },
68 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
69 { "exit_external_request", VCPU_STAT(exit_external_request) },
70 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010071 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030072 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010073 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
74 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020075 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010076 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020077 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020078 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020079 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020080 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010082 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010084 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020085 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010086 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
87 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
88 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
89 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
90 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
91 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
92 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020093 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010094 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
95 { "instruction_spx", VCPU_STAT(instruction_spx) },
96 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
97 { "instruction_stap", VCPU_STAT(instruction_stap) },
98 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010099 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100100 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
101 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200102 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100103 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
104 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200105 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200106 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200107 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100108 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100109 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200110 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100111 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200112 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
113 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100114 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200115 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
116 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500117 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100118 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
119 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
120 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200121 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
122 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
123 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100124 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100125 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200126 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200127 { "diagnose_258", VCPU_STAT(diagnose_258) },
128 { "diagnose_308", VCPU_STAT(diagnose_308) },
129 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100130 { NULL }
131};
132
David Hildenbranda411edf2016-02-02 15:41:22 +0100133/* allow nested virtualization in KVM (if enabled by user space) */
134static int nested;
135module_param(nested, int, S_IRUGO);
136MODULE_PARM_DESC(nested, "Nested virtualization support");
137
Michael Mueller9d8d5782015-02-02 15:42:51 +0100138/* upper facilities limit for kvm */
Heiko Carstensf6c1d352016-08-16 10:31:10 +0200139unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100140
Michael Mueller9d8d5782015-02-02 15:42:51 +0100141unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200142{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100143 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
144 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b592013-07-26 15:04:04 +0200145}
146
David Hildenbrand15c97052015-03-19 17:36:43 +0100147/* available cpu features supported by kvm */
148static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200149/* available subfunctions indicated via query / "test bit" */
150static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100151
Michael Mueller9d8d5782015-02-02 15:42:51 +0100152static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200153static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200154debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100155
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100156/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200157int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100158{
159 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200160 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100161}
162
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100163static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
164 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200165
Fan Zhangfdf03652015-05-13 10:58:41 +0200166/*
167 * This callback is executed during stop_machine(). All CPUs are therefore
168 * temporarily stopped. In order not to change guest behavior, we have to
169 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
170 * so a CPU won't be stopped while calculating with the epoch.
171 */
172static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
173 void *v)
174{
175 struct kvm *kvm;
176 struct kvm_vcpu *vcpu;
177 int i;
178 unsigned long long *delta = v;
179
180 list_for_each_entry(kvm, &vm_list, vm_list) {
181 kvm->arch.epoch -= *delta;
182 kvm_for_each_vcpu(i, vcpu, kvm) {
183 vcpu->arch.sie_block->epoch -= *delta;
David Hildenbranddb0758b2016-02-15 09:42:25 +0100184 if (vcpu->arch.cputm_enabled)
185 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100186 if (vcpu->arch.vsie_block)
187 vcpu->arch.vsie_block->epoch -= *delta;
Fan Zhangfdf03652015-05-13 10:58:41 +0200188 }
189 }
190 return NOTIFY_OK;
191}
192
193static struct notifier_block kvm_clock_notifier = {
194 .notifier_call = kvm_clock_sync,
195};
196
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100197int kvm_arch_hardware_setup(void)
198{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200199 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100200 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200201 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
202 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200203 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
204 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100205 return 0;
206}
207
208void kvm_arch_hardware_unsetup(void)
209{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100210 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200211 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200212 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
213 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100214}
215
David Hildenbrand22be5a132016-01-21 13:22:54 +0100216static void allow_cpu_feat(unsigned long nr)
217{
218 set_bit_inv(nr, kvm_s390_available_cpu_feat);
219}
220
David Hildenbrand0a763c72016-05-18 16:03:47 +0200221static inline int plo_test_bit(unsigned char nr)
222{
223 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100224 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200225
226 asm volatile(
227 /* Parameter registers are ignored for "test bit" */
228 " plo 0,0,0,0(0)\n"
229 " ipm %0\n"
230 " srl %0,28\n"
231 : "=d" (cc)
232 : "d" (r0)
233 : "cc");
234 return cc == 0;
235}
236
David Hildenbrand22be5a132016-01-21 13:22:54 +0100237static void kvm_s390_cpu_feat_init(void)
238{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200239 int i;
240
241 for (i = 0; i < 256; ++i) {
242 if (plo_test_bit(i))
243 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
244 }
245
246 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400247 ptff(kvm_s390_available_subfunc.ptff,
248 sizeof(kvm_s390_available_subfunc.ptff),
249 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200250
251 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200252 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
253 kvm_s390_available_subfunc.kmac);
254 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
255 kvm_s390_available_subfunc.kmc);
256 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
257 kvm_s390_available_subfunc.km);
258 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
259 kvm_s390_available_subfunc.kimd);
260 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
261 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200262 }
263 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200264 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
265 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200266 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200267 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
268 kvm_s390_available_subfunc.kmctr);
269 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
270 kvm_s390_available_subfunc.kmf);
271 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
272 kvm_s390_available_subfunc.kmo);
273 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
274 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200275 }
276 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100277 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200278 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200279
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400280 if (test_facility(146)) /* MSA8 */
281 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
282 kvm_s390_available_subfunc.kma);
283
David Hildenbrand22be5a132016-01-21 13:22:54 +0100284 if (MACHINE_HAS_ESOP)
285 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200286 /*
287 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
288 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
289 */
290 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100291 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200292 return;
293 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100294 if (sclp.has_64bscao)
295 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100296 if (sclp.has_siif)
297 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100298 if (sclp.has_gpere)
299 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100300 if (sclp.has_gsls)
301 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100302 if (sclp.has_ib)
303 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100304 if (sclp.has_cei)
305 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100306 if (sclp.has_ibs)
307 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500308 if (sclp.has_kss)
309 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200310 /*
311 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
312 * all skey handling functions read/set the skey from the PGSTE
313 * instead of the real storage key.
314 *
315 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
316 * pages being detected as preserved although they are resident.
317 *
318 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
319 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
320 *
321 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
322 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
323 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
324 *
325 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
326 * cannot easily shadow the SCA because of the ipte lock.
327 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100328}
329
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100330int kvm_arch_init(void *opaque)
331{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200332 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
333 if (!kvm_s390_dbf)
334 return -ENOMEM;
335
336 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
337 debug_unregister(kvm_s390_dbf);
338 return -ENOMEM;
339 }
340
David Hildenbrand22be5a132016-01-21 13:22:54 +0100341 kvm_s390_cpu_feat_init();
342
Cornelia Huck84877d92014-09-02 10:27:35 +0100343 /* Register floating interrupt controller interface. */
344 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100345}
346
Christian Borntraeger78f26132015-07-22 15:50:58 +0200347void kvm_arch_exit(void)
348{
349 debug_unregister(kvm_s390_dbf);
350}
351
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100352/* Section: device related */
353long kvm_arch_dev_ioctl(struct file *filp,
354 unsigned int ioctl, unsigned long arg)
355{
356 if (ioctl == KVM_S390_ENABLE_SIE)
357 return s390_enable_sie();
358 return -EINVAL;
359}
360
Alexander Graf784aa3d2014-07-14 18:27:35 +0200361int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100362{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100363 int r;
364
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200365 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100366 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200367 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100368 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100369#ifdef CONFIG_KVM_S390_UCONTROL
370 case KVM_CAP_S390_UCONTROL:
371#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200372 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100373 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200374 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100375 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100376 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100377 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200378 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200379 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200380 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200381 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200382 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100383 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100384 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200385 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100386 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400387 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100388 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200389 case KVM_CAP_S390_USER_INSTR0:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100390 case KVM_CAP_S390_AIS:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100391 r = 1;
392 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100393 case KVM_CAP_S390_MEM_OP:
394 r = MEM_OP_MAX_SIZE;
395 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200396 case KVM_CAP_NR_VCPUS:
397 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100398 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200399 if (!kvm_s390_use_sca_entries())
400 r = KVM_MAX_VCPUS;
401 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100402 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200403 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100404 case KVM_CAP_NR_MEMSLOTS:
405 r = KVM_USER_MEM_SLOTS;
406 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200407 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100408 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200409 break;
Eric Farman68c55752014-06-09 10:57:26 -0400410 case KVM_CAP_S390_VECTOR_REGISTERS:
411 r = MACHINE_HAS_VX;
412 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800413 case KVM_CAP_S390_RI:
414 r = test_facility(64);
415 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100416 case KVM_CAP_S390_GS:
417 r = test_facility(133);
418 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200419 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100420 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200421 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100422 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100423}
424
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400425static void kvm_s390_sync_dirty_log(struct kvm *kvm,
426 struct kvm_memory_slot *memslot)
427{
428 gfn_t cur_gfn, last_gfn;
429 unsigned long address;
430 struct gmap *gmap = kvm->arch.gmap;
431
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400432 /* Loop over all guest pages */
433 last_gfn = memslot->base_gfn + memslot->npages;
434 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
435 address = gfn_to_hva_memslot(memslot, cur_gfn);
436
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100437 if (test_and_clear_guest_dirty(gmap->mm, address))
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400438 mark_page_dirty(kvm, cur_gfn);
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100439 if (fatal_signal_pending(current))
440 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100441 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400442 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400443}
444
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100445/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200446static void sca_del_vcpu(struct kvm_vcpu *vcpu);
447
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100448/*
449 * Get (and clear) the dirty memory log for a memory slot.
450 */
451int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
452 struct kvm_dirty_log *log)
453{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400454 int r;
455 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200456 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400457 struct kvm_memory_slot *memslot;
458 int is_dirty = 0;
459
Janosch Franke1e8a962017-02-02 16:39:31 +0100460 if (kvm_is_ucontrol(kvm))
461 return -EINVAL;
462
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400463 mutex_lock(&kvm->slots_lock);
464
465 r = -EINVAL;
466 if (log->slot >= KVM_USER_MEM_SLOTS)
467 goto out;
468
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200469 slots = kvm_memslots(kvm);
470 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400471 r = -ENOENT;
472 if (!memslot->dirty_bitmap)
473 goto out;
474
475 kvm_s390_sync_dirty_log(kvm, memslot);
476 r = kvm_get_dirty_log(kvm, log, &is_dirty);
477 if (r)
478 goto out;
479
480 /* Clear the dirty log */
481 if (is_dirty) {
482 n = kvm_dirty_bitmap_bytes(memslot);
483 memset(memslot->dirty_bitmap, 0, n);
484 }
485 r = 0;
486out:
487 mutex_unlock(&kvm->slots_lock);
488 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100489}
490
David Hildenbrand6502a342016-06-21 14:19:51 +0200491static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
492{
493 unsigned int i;
494 struct kvm_vcpu *vcpu;
495
496 kvm_for_each_vcpu(i, vcpu, kvm) {
497 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
498 }
499}
500
Cornelia Huckd938dc52013-10-23 18:26:34 +0200501static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
502{
503 int r;
504
505 if (cap->flags)
506 return -EINVAL;
507
508 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200509 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200510 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200511 kvm->arch.use_irqchip = 1;
512 r = 0;
513 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200514 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200515 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200516 kvm->arch.user_sigp = 1;
517 r = 0;
518 break;
Eric Farman68c55752014-06-09 10:57:26 -0400519 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100520 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200521 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100522 r = -EBUSY;
523 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100524 set_kvm_facility(kvm->arch.model.fac_mask, 129);
525 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200526 if (test_facility(134)) {
527 set_kvm_facility(kvm->arch.model.fac_mask, 134);
528 set_kvm_facility(kvm->arch.model.fac_list, 134);
529 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100530 if (test_facility(135)) {
531 set_kvm_facility(kvm->arch.model.fac_mask, 135);
532 set_kvm_facility(kvm->arch.model.fac_list, 135);
533 }
Michael Mueller18280d82015-03-16 16:05:41 +0100534 r = 0;
535 } else
536 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100537 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200538 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
539 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400540 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800541 case KVM_CAP_S390_RI:
542 r = -EINVAL;
543 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200544 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800545 r = -EBUSY;
546 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100547 set_kvm_facility(kvm->arch.model.fac_mask, 64);
548 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800549 r = 0;
550 }
551 mutex_unlock(&kvm->lock);
552 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
553 r ? "(not available)" : "(success)");
554 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100555 case KVM_CAP_S390_AIS:
556 mutex_lock(&kvm->lock);
557 if (kvm->created_vcpus) {
558 r = -EBUSY;
559 } else {
560 set_kvm_facility(kvm->arch.model.fac_mask, 72);
561 set_kvm_facility(kvm->arch.model.fac_list, 72);
562 kvm->arch.float_int.ais_enabled = 1;
563 r = 0;
564 }
565 mutex_unlock(&kvm->lock);
566 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
567 r ? "(not available)" : "(success)");
568 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100569 case KVM_CAP_S390_GS:
570 r = -EINVAL;
571 mutex_lock(&kvm->lock);
572 if (atomic_read(&kvm->online_vcpus)) {
573 r = -EBUSY;
574 } else if (test_facility(133)) {
575 set_kvm_facility(kvm->arch.model.fac_mask, 133);
576 set_kvm_facility(kvm->arch.model.fac_list, 133);
577 r = 0;
578 }
579 mutex_unlock(&kvm->lock);
580 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
581 r ? "(not available)" : "(success)");
582 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100583 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200584 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100585 kvm->arch.user_stsi = 1;
586 r = 0;
587 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200588 case KVM_CAP_S390_USER_INSTR0:
589 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
590 kvm->arch.user_instr0 = 1;
591 icpt_operexc_on_all_vcpus(kvm);
592 r = 0;
593 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200594 default:
595 r = -EINVAL;
596 break;
597 }
598 return r;
599}
600
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100601static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
602{
603 int ret;
604
605 switch (attr->attr) {
606 case KVM_S390_VM_MEM_LIMIT_SIZE:
607 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200608 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100609 kvm->arch.mem_limit);
610 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100611 ret = -EFAULT;
612 break;
613 default:
614 ret = -ENXIO;
615 break;
616 }
617 return ret;
618}
619
620static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200621{
622 int ret;
623 unsigned int idx;
624 switch (attr->attr) {
625 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100626 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100627 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200628 break;
629
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200630 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200631 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200632 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200633 if (!kvm->created_vcpus) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200634 kvm->arch.use_cmma = 1;
635 ret = 0;
636 }
637 mutex_unlock(&kvm->lock);
638 break;
639 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100640 ret = -ENXIO;
641 if (!sclp.has_cmma)
642 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200643 ret = -EINVAL;
644 if (!kvm->arch.use_cmma)
645 break;
646
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200647 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200648 mutex_lock(&kvm->lock);
649 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200650 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200651 srcu_read_unlock(&kvm->srcu, idx);
652 mutex_unlock(&kvm->lock);
653 ret = 0;
654 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100655 case KVM_S390_VM_MEM_LIMIT_SIZE: {
656 unsigned long new_limit;
657
658 if (kvm_is_ucontrol(kvm))
659 return -EINVAL;
660
661 if (get_user(new_limit, (u64 __user *)attr->addr))
662 return -EFAULT;
663
Dominik Dingela3a92c32014-12-01 17:24:42 +0100664 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
665 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100666 return -E2BIG;
667
Dominik Dingela3a92c32014-12-01 17:24:42 +0100668 if (!new_limit)
669 return -EINVAL;
670
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100671 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100672 if (new_limit != KVM_S390_NO_MEM_LIMIT)
673 new_limit -= 1;
674
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100675 ret = -EBUSY;
676 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200677 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100678 /* gmap_create will round the limit up */
679 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100680
681 if (!new) {
682 ret = -ENOMEM;
683 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100684 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100685 new->private = kvm;
686 kvm->arch.gmap = new;
687 ret = 0;
688 }
689 }
690 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100691 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
692 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
693 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100694 break;
695 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200696 default:
697 ret = -ENXIO;
698 break;
699 }
700 return ret;
701}
702
Tony Krowiaka374e892014-09-03 10:13:53 +0200703static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
704
705static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
706{
707 struct kvm_vcpu *vcpu;
708 int i;
709
Michael Mueller9d8d5782015-02-02 15:42:51 +0100710 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200711 return -EINVAL;
712
713 mutex_lock(&kvm->lock);
714 switch (attr->attr) {
715 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
716 get_random_bytes(
717 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
718 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
719 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200720 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200721 break;
722 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
723 get_random_bytes(
724 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
725 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
726 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200727 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200728 break;
729 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
730 kvm->arch.crypto.aes_kw = 0;
731 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
732 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200733 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200734 break;
735 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
736 kvm->arch.crypto.dea_kw = 0;
737 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
738 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200739 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200740 break;
741 default:
742 mutex_unlock(&kvm->lock);
743 return -ENXIO;
744 }
745
746 kvm_for_each_vcpu(i, vcpu, kvm) {
747 kvm_s390_vcpu_crypto_setup(vcpu);
748 exit_sie(vcpu);
749 }
750 mutex_unlock(&kvm->lock);
751 return 0;
752}
753
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200754static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
755{
756 int cx;
757 struct kvm_vcpu *vcpu;
758
759 kvm_for_each_vcpu(cx, vcpu, kvm)
760 kvm_s390_sync_request(req, vcpu);
761}
762
763/*
764 * Must be called with kvm->srcu held to avoid races on memslots, and with
765 * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
766 */
767static int kvm_s390_vm_start_migration(struct kvm *kvm)
768{
769 struct kvm_s390_migration_state *mgs;
770 struct kvm_memory_slot *ms;
771 /* should be the only one */
772 struct kvm_memslots *slots;
773 unsigned long ram_pages;
774 int slotnr;
775
776 /* migration mode already enabled */
777 if (kvm->arch.migration_state)
778 return 0;
779
780 slots = kvm_memslots(kvm);
781 if (!slots || !slots->used_slots)
782 return -EINVAL;
783
784 mgs = kzalloc(sizeof(*mgs), GFP_KERNEL);
785 if (!mgs)
786 return -ENOMEM;
787 kvm->arch.migration_state = mgs;
788
789 if (kvm->arch.use_cmma) {
790 /*
791 * Get the last slot. They should be sorted by base_gfn, so the
792 * last slot is also the one at the end of the address space.
793 * We have verified above that at least one slot is present.
794 */
795 ms = slots->memslots + slots->used_slots - 1;
796 /* round up so we only use full longs */
797 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
798 /* allocate enough bytes to store all the bits */
799 mgs->pgste_bitmap = vmalloc(ram_pages / 8);
800 if (!mgs->pgste_bitmap) {
801 kfree(mgs);
802 kvm->arch.migration_state = NULL;
803 return -ENOMEM;
804 }
805
806 mgs->bitmap_size = ram_pages;
807 atomic64_set(&mgs->dirty_pages, ram_pages);
808 /* mark all the pages in active slots as dirty */
809 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
810 ms = slots->memslots + slotnr;
811 bitmap_set(mgs->pgste_bitmap, ms->base_gfn, ms->npages);
812 }
813
814 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
815 }
816 return 0;
817}
818
819/*
820 * Must be called with kvm->lock to avoid races with ourselves and
821 * kvm_s390_vm_start_migration.
822 */
823static int kvm_s390_vm_stop_migration(struct kvm *kvm)
824{
825 struct kvm_s390_migration_state *mgs;
826
827 /* migration mode already disabled */
828 if (!kvm->arch.migration_state)
829 return 0;
830 mgs = kvm->arch.migration_state;
831 kvm->arch.migration_state = NULL;
832
833 if (kvm->arch.use_cmma) {
834 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
835 vfree(mgs->pgste_bitmap);
836 }
837 kfree(mgs);
838 return 0;
839}
840
841static int kvm_s390_vm_set_migration(struct kvm *kvm,
842 struct kvm_device_attr *attr)
843{
844 int idx, res = -ENXIO;
845
846 mutex_lock(&kvm->lock);
847 switch (attr->attr) {
848 case KVM_S390_VM_MIGRATION_START:
849 idx = srcu_read_lock(&kvm->srcu);
850 res = kvm_s390_vm_start_migration(kvm);
851 srcu_read_unlock(&kvm->srcu, idx);
852 break;
853 case KVM_S390_VM_MIGRATION_STOP:
854 res = kvm_s390_vm_stop_migration(kvm);
855 break;
856 default:
857 break;
858 }
859 mutex_unlock(&kvm->lock);
860
861 return res;
862}
863
864static int kvm_s390_vm_get_migration(struct kvm *kvm,
865 struct kvm_device_attr *attr)
866{
867 u64 mig = (kvm->arch.migration_state != NULL);
868
869 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
870 return -ENXIO;
871
872 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
873 return -EFAULT;
874 return 0;
875}
876
Jason J. Herne72f25022014-11-25 09:46:02 -0500877static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
878{
879 u8 gtod_high;
880
881 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
882 sizeof(gtod_high)))
883 return -EFAULT;
884
885 if (gtod_high != 0)
886 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200887 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500888
889 return 0;
890}
891
892static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
893{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200894 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500895
896 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
897 return -EFAULT;
898
David Hildenbrand25ed1672015-05-12 09:49:14 +0200899 kvm_s390_set_tod_clock(kvm, gtod);
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200900 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500901 return 0;
902}
903
904static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
905{
906 int ret;
907
908 if (attr->flags)
909 return -EINVAL;
910
911 switch (attr->attr) {
912 case KVM_S390_VM_TOD_HIGH:
913 ret = kvm_s390_set_tod_high(kvm, attr);
914 break;
915 case KVM_S390_VM_TOD_LOW:
916 ret = kvm_s390_set_tod_low(kvm, attr);
917 break;
918 default:
919 ret = -ENXIO;
920 break;
921 }
922 return ret;
923}
924
925static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
926{
927 u8 gtod_high = 0;
928
929 if (copy_to_user((void __user *)attr->addr, &gtod_high,
930 sizeof(gtod_high)))
931 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200932 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500933
934 return 0;
935}
936
937static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
938{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200939 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500940
David Hildenbrand60417fc2015-09-29 16:20:36 +0200941 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -0500942 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
943 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200944 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500945
946 return 0;
947}
948
949static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
950{
951 int ret;
952
953 if (attr->flags)
954 return -EINVAL;
955
956 switch (attr->attr) {
957 case KVM_S390_VM_TOD_HIGH:
958 ret = kvm_s390_get_tod_high(kvm, attr);
959 break;
960 case KVM_S390_VM_TOD_LOW:
961 ret = kvm_s390_get_tod_low(kvm, attr);
962 break;
963 default:
964 ret = -ENXIO;
965 break;
966 }
967 return ret;
968}
969
Michael Mueller658b6ed2015-02-02 15:49:35 +0100970static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
971{
972 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +0200973 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100974 int ret = 0;
975
976 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200977 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +0100978 ret = -EBUSY;
979 goto out;
980 }
981 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
982 if (!proc) {
983 ret = -ENOMEM;
984 goto out;
985 }
986 if (!copy_from_user(proc, (void __user *)attr->addr,
987 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200988 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +0200989 lowest_ibc = sclp.ibc >> 16 & 0xfff;
990 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +0200991 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +0200992 if (proc->ibc > unblocked_ibc)
993 kvm->arch.model.ibc = unblocked_ibc;
994 else if (proc->ibc < lowest_ibc)
995 kvm->arch.model.ibc = lowest_ibc;
996 else
997 kvm->arch.model.ibc = proc->ibc;
998 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100999 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001000 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001001 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1002 kvm->arch.model.ibc,
1003 kvm->arch.model.cpuid);
1004 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1005 kvm->arch.model.fac_list[0],
1006 kvm->arch.model.fac_list[1],
1007 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001008 } else
1009 ret = -EFAULT;
1010 kfree(proc);
1011out:
1012 mutex_unlock(&kvm->lock);
1013 return ret;
1014}
1015
David Hildenbrand15c97052015-03-19 17:36:43 +01001016static int kvm_s390_set_processor_feat(struct kvm *kvm,
1017 struct kvm_device_attr *attr)
1018{
1019 struct kvm_s390_vm_cpu_feat data;
1020 int ret = -EBUSY;
1021
1022 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1023 return -EFAULT;
1024 if (!bitmap_subset((unsigned long *) data.feat,
1025 kvm_s390_available_cpu_feat,
1026 KVM_S390_VM_CPU_FEAT_NR_BITS))
1027 return -EINVAL;
1028
1029 mutex_lock(&kvm->lock);
1030 if (!atomic_read(&kvm->online_vcpus)) {
1031 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1032 KVM_S390_VM_CPU_FEAT_NR_BITS);
1033 ret = 0;
1034 }
1035 mutex_unlock(&kvm->lock);
1036 return ret;
1037}
1038
David Hildenbrand0a763c72016-05-18 16:03:47 +02001039static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1040 struct kvm_device_attr *attr)
1041{
1042 /*
1043 * Once supported by kernel + hw, we have to store the subfunctions
1044 * in kvm->arch and remember that user space configured them.
1045 */
1046 return -ENXIO;
1047}
1048
Michael Mueller658b6ed2015-02-02 15:49:35 +01001049static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1050{
1051 int ret = -ENXIO;
1052
1053 switch (attr->attr) {
1054 case KVM_S390_VM_CPU_PROCESSOR:
1055 ret = kvm_s390_set_processor(kvm, attr);
1056 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001057 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1058 ret = kvm_s390_set_processor_feat(kvm, attr);
1059 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001060 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1061 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1062 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001063 }
1064 return ret;
1065}
1066
1067static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1068{
1069 struct kvm_s390_vm_cpu_processor *proc;
1070 int ret = 0;
1071
1072 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1073 if (!proc) {
1074 ret = -ENOMEM;
1075 goto out;
1076 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001077 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001078 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001079 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1080 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001081 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1082 kvm->arch.model.ibc,
1083 kvm->arch.model.cpuid);
1084 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1085 kvm->arch.model.fac_list[0],
1086 kvm->arch.model.fac_list[1],
1087 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001088 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1089 ret = -EFAULT;
1090 kfree(proc);
1091out:
1092 return ret;
1093}
1094
1095static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1096{
1097 struct kvm_s390_vm_cpu_machine *mach;
1098 int ret = 0;
1099
1100 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1101 if (!mach) {
1102 ret = -ENOMEM;
1103 goto out;
1104 }
1105 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001106 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001107 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001108 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001109 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001110 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001111 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1112 kvm->arch.model.ibc,
1113 kvm->arch.model.cpuid);
1114 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1115 mach->fac_mask[0],
1116 mach->fac_mask[1],
1117 mach->fac_mask[2]);
1118 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1119 mach->fac_list[0],
1120 mach->fac_list[1],
1121 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001122 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1123 ret = -EFAULT;
1124 kfree(mach);
1125out:
1126 return ret;
1127}
1128
David Hildenbrand15c97052015-03-19 17:36:43 +01001129static int kvm_s390_get_processor_feat(struct kvm *kvm,
1130 struct kvm_device_attr *attr)
1131{
1132 struct kvm_s390_vm_cpu_feat data;
1133
1134 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1135 KVM_S390_VM_CPU_FEAT_NR_BITS);
1136 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1137 return -EFAULT;
1138 return 0;
1139}
1140
1141static int kvm_s390_get_machine_feat(struct kvm *kvm,
1142 struct kvm_device_attr *attr)
1143{
1144 struct kvm_s390_vm_cpu_feat data;
1145
1146 bitmap_copy((unsigned long *) data.feat,
1147 kvm_s390_available_cpu_feat,
1148 KVM_S390_VM_CPU_FEAT_NR_BITS);
1149 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1150 return -EFAULT;
1151 return 0;
1152}
1153
David Hildenbrand0a763c72016-05-18 16:03:47 +02001154static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1155 struct kvm_device_attr *attr)
1156{
1157 /*
1158 * Once we can actually configure subfunctions (kernel + hw support),
1159 * we have to check if they were already set by user space, if so copy
1160 * them from kvm->arch.
1161 */
1162 return -ENXIO;
1163}
1164
1165static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1166 struct kvm_device_attr *attr)
1167{
1168 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1169 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1170 return -EFAULT;
1171 return 0;
1172}
Michael Mueller658b6ed2015-02-02 15:49:35 +01001173static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1174{
1175 int ret = -ENXIO;
1176
1177 switch (attr->attr) {
1178 case KVM_S390_VM_CPU_PROCESSOR:
1179 ret = kvm_s390_get_processor(kvm, attr);
1180 break;
1181 case KVM_S390_VM_CPU_MACHINE:
1182 ret = kvm_s390_get_machine(kvm, attr);
1183 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001184 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1185 ret = kvm_s390_get_processor_feat(kvm, attr);
1186 break;
1187 case KVM_S390_VM_CPU_MACHINE_FEAT:
1188 ret = kvm_s390_get_machine_feat(kvm, attr);
1189 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001190 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1191 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1192 break;
1193 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1194 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1195 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001196 }
1197 return ret;
1198}
1199
Dominik Dingelf2061652014-04-09 13:13:00 +02001200static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1201{
1202 int ret;
1203
1204 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001205 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001206 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001207 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001208 case KVM_S390_VM_TOD:
1209 ret = kvm_s390_set_tod(kvm, attr);
1210 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001211 case KVM_S390_VM_CPU_MODEL:
1212 ret = kvm_s390_set_cpu_model(kvm, attr);
1213 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001214 case KVM_S390_VM_CRYPTO:
1215 ret = kvm_s390_vm_set_crypto(kvm, attr);
1216 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001217 case KVM_S390_VM_MIGRATION:
1218 ret = kvm_s390_vm_set_migration(kvm, attr);
1219 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001220 default:
1221 ret = -ENXIO;
1222 break;
1223 }
1224
1225 return ret;
1226}
1227
1228static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1229{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001230 int ret;
1231
1232 switch (attr->group) {
1233 case KVM_S390_VM_MEM_CTRL:
1234 ret = kvm_s390_get_mem_control(kvm, attr);
1235 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001236 case KVM_S390_VM_TOD:
1237 ret = kvm_s390_get_tod(kvm, attr);
1238 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001239 case KVM_S390_VM_CPU_MODEL:
1240 ret = kvm_s390_get_cpu_model(kvm, attr);
1241 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001242 case KVM_S390_VM_MIGRATION:
1243 ret = kvm_s390_vm_get_migration(kvm, attr);
1244 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001245 default:
1246 ret = -ENXIO;
1247 break;
1248 }
1249
1250 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001251}
1252
1253static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1254{
1255 int ret;
1256
1257 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001258 case KVM_S390_VM_MEM_CTRL:
1259 switch (attr->attr) {
1260 case KVM_S390_VM_MEM_ENABLE_CMMA:
1261 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001262 ret = sclp.has_cmma ? 0 : -ENXIO;
1263 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001264 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001265 ret = 0;
1266 break;
1267 default:
1268 ret = -ENXIO;
1269 break;
1270 }
1271 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001272 case KVM_S390_VM_TOD:
1273 switch (attr->attr) {
1274 case KVM_S390_VM_TOD_LOW:
1275 case KVM_S390_VM_TOD_HIGH:
1276 ret = 0;
1277 break;
1278 default:
1279 ret = -ENXIO;
1280 break;
1281 }
1282 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001283 case KVM_S390_VM_CPU_MODEL:
1284 switch (attr->attr) {
1285 case KVM_S390_VM_CPU_PROCESSOR:
1286 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001287 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1288 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001289 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001290 ret = 0;
1291 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001292 /* configuring subfunctions is not supported yet */
1293 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001294 default:
1295 ret = -ENXIO;
1296 break;
1297 }
1298 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001299 case KVM_S390_VM_CRYPTO:
1300 switch (attr->attr) {
1301 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1302 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1303 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1304 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1305 ret = 0;
1306 break;
1307 default:
1308 ret = -ENXIO;
1309 break;
1310 }
1311 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001312 case KVM_S390_VM_MIGRATION:
1313 ret = 0;
1314 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001315 default:
1316 ret = -ENXIO;
1317 break;
1318 }
1319
1320 return ret;
1321}
1322
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001323static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1324{
1325 uint8_t *keys;
1326 uint64_t hva;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001327 int i, r = 0;
1328
1329 if (args->flags != 0)
1330 return -EINVAL;
1331
1332 /* Is this guest using storage keys? */
1333 if (!mm_use_skey(current->mm))
1334 return KVM_S390_GET_SKEYS_NONE;
1335
1336 /* Enforce sane limit on memory allocation */
1337 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1338 return -EINVAL;
1339
Michal Hocko752ade62017-05-08 15:57:27 -07001340 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001341 if (!keys)
1342 return -ENOMEM;
1343
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001344 down_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001345 for (i = 0; i < args->count; i++) {
1346 hva = gfn_to_hva(kvm, args->start_gfn + i);
1347 if (kvm_is_error_hva(hva)) {
1348 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001349 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001350 }
1351
David Hildenbrand154c8c12016-05-09 11:22:34 +02001352 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1353 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001354 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001355 }
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001356 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001357
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001358 if (!r) {
1359 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1360 sizeof(uint8_t) * args->count);
1361 if (r)
1362 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001363 }
1364
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001365 kvfree(keys);
1366 return r;
1367}
1368
1369static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1370{
1371 uint8_t *keys;
1372 uint64_t hva;
1373 int i, r = 0;
1374
1375 if (args->flags != 0)
1376 return -EINVAL;
1377
1378 /* Enforce sane limit on memory allocation */
1379 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1380 return -EINVAL;
1381
Michal Hocko752ade62017-05-08 15:57:27 -07001382 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001383 if (!keys)
1384 return -ENOMEM;
1385
1386 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1387 sizeof(uint8_t) * args->count);
1388 if (r) {
1389 r = -EFAULT;
1390 goto out;
1391 }
1392
1393 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001394 r = s390_enable_skey();
1395 if (r)
1396 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001397
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001398 down_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001399 for (i = 0; i < args->count; i++) {
1400 hva = gfn_to_hva(kvm, args->start_gfn + i);
1401 if (kvm_is_error_hva(hva)) {
1402 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001403 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001404 }
1405
1406 /* Lowest order bit is reserved */
1407 if (keys[i] & 0x01) {
1408 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001409 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001410 }
1411
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001412 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001413 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001414 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001415 }
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001416 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001417out:
1418 kvfree(keys);
1419 return r;
1420}
1421
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001422long kvm_arch_vm_ioctl(struct file *filp,
1423 unsigned int ioctl, unsigned long arg)
1424{
1425 struct kvm *kvm = filp->private_data;
1426 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001427 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001428 int r;
1429
1430 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001431 case KVM_S390_INTERRUPT: {
1432 struct kvm_s390_interrupt s390int;
1433
1434 r = -EFAULT;
1435 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1436 break;
1437 r = kvm_s390_inject_vm(kvm, &s390int);
1438 break;
1439 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001440 case KVM_ENABLE_CAP: {
1441 struct kvm_enable_cap cap;
1442 r = -EFAULT;
1443 if (copy_from_user(&cap, argp, sizeof(cap)))
1444 break;
1445 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1446 break;
1447 }
Cornelia Huck84223592013-07-15 13:36:01 +02001448 case KVM_CREATE_IRQCHIP: {
1449 struct kvm_irq_routing_entry routing;
1450
1451 r = -EINVAL;
1452 if (kvm->arch.use_irqchip) {
1453 /* Set up dummy routing. */
1454 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001455 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001456 }
1457 break;
1458 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001459 case KVM_SET_DEVICE_ATTR: {
1460 r = -EFAULT;
1461 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1462 break;
1463 r = kvm_s390_vm_set_attr(kvm, &attr);
1464 break;
1465 }
1466 case KVM_GET_DEVICE_ATTR: {
1467 r = -EFAULT;
1468 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1469 break;
1470 r = kvm_s390_vm_get_attr(kvm, &attr);
1471 break;
1472 }
1473 case KVM_HAS_DEVICE_ATTR: {
1474 r = -EFAULT;
1475 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1476 break;
1477 r = kvm_s390_vm_has_attr(kvm, &attr);
1478 break;
1479 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001480 case KVM_S390_GET_SKEYS: {
1481 struct kvm_s390_skeys args;
1482
1483 r = -EFAULT;
1484 if (copy_from_user(&args, argp,
1485 sizeof(struct kvm_s390_skeys)))
1486 break;
1487 r = kvm_s390_get_skeys(kvm, &args);
1488 break;
1489 }
1490 case KVM_S390_SET_SKEYS: {
1491 struct kvm_s390_skeys args;
1492
1493 r = -EFAULT;
1494 if (copy_from_user(&args, argp,
1495 sizeof(struct kvm_s390_skeys)))
1496 break;
1497 r = kvm_s390_set_skeys(kvm, &args);
1498 break;
1499 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001500 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001501 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001502 }
1503
1504 return r;
1505}
1506
Tony Krowiak45c9b472015-01-13 11:33:26 -05001507static int kvm_s390_query_ap_config(u8 *config)
1508{
1509 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001510 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001511
Christian Borntraeger86044c82015-02-26 13:53:47 +01001512 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001513 asm volatile(
1514 "lgr 0,%1\n"
1515 "lgr 2,%2\n"
1516 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001517 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001518 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001519 "1:\n"
1520 EX_TABLE(0b, 1b)
1521 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001522 : "r" (fcn_code), "r" (config)
1523 : "cc", "0", "2", "memory"
1524 );
1525
1526 return cc;
1527}
1528
1529static int kvm_s390_apxa_installed(void)
1530{
1531 u8 config[128];
1532 int cc;
1533
Heiko Carstensa6aacc32015-11-24 14:28:12 +01001534 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05001535 cc = kvm_s390_query_ap_config(config);
1536
1537 if (cc)
1538 pr_err("PQAP(QCI) failed with cc=%d", cc);
1539 else
1540 return config[0] & 0x40;
1541 }
1542
1543 return 0;
1544}
1545
1546static void kvm_s390_set_crycb_format(struct kvm *kvm)
1547{
1548 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1549
1550 if (kvm_s390_apxa_installed())
1551 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1552 else
1553 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1554}
1555
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001556static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01001557{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001558 struct cpuid cpuid;
1559
1560 get_cpu_id(&cpuid);
1561 cpuid.version = 0xff;
1562 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001563}
1564
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001565static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04001566{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001567 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001568 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001569
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001570 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001571 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001572
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001573 /* Enable AES/DEA protected key functions by default */
1574 kvm->arch.crypto.aes_kw = 1;
1575 kvm->arch.crypto.dea_kw = 1;
1576 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1577 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1578 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1579 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04001580}
1581
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001582static void sca_dispose(struct kvm *kvm)
1583{
1584 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001585 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001586 else
1587 free_page((unsigned long)(kvm->arch.sca));
1588 kvm->arch.sca = NULL;
1589}
1590
Carsten Ottee08b9632012-01-04 10:25:20 +01001591int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001592{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001593 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001594 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001595 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001596 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001597
Carsten Ottee08b9632012-01-04 10:25:20 +01001598 rc = -EINVAL;
1599#ifdef CONFIG_KVM_S390_UCONTROL
1600 if (type & ~KVM_VM_S390_UCONTROL)
1601 goto out_err;
1602 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1603 goto out_err;
1604#else
1605 if (type)
1606 goto out_err;
1607#endif
1608
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001609 rc = s390_enable_sie();
1610 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001611 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001612
Carsten Otteb2904112011-10-18 12:27:13 +02001613 rc = -ENOMEM;
1614
Janosch Frank7d0a5e62016-05-10 15:03:42 +02001615 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1616
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001617 kvm->arch.use_esca = 0; /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001618 if (!sclp.has_64bscao)
1619 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001620 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001621 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001622 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001623 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001624 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001625 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001626 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001627 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001628 kvm->arch.sca = (struct bsca_block *)
1629 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001630 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001631
1632 sprintf(debug_name, "kvm-%u", current->pid);
1633
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001634 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001635 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001636 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001637
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001638 kvm->arch.sie_page2 =
1639 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1640 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001641 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001642
Michael Muellerfb5bf932015-02-27 14:25:10 +01001643 /* Populate the facility mask initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001644 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001645 sizeof(S390_lowcore.stfle_fac_list));
Michael Mueller9d8d5782015-02-02 15:42:51 +01001646 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1647 if (i < kvm_s390_fac_list_mask_size())
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001648 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001649 else
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001650 kvm->arch.model.fac_mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001651 }
1652
Michael Mueller981467c2015-02-24 13:51:04 +01001653 /* Populate the facility list initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001654 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1655 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001656 S390_ARCH_FAC_LIST_SIZE_BYTE);
1657
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001658 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1659 set_kvm_facility(kvm->arch.model.fac_list, 74);
1660
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001661 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001662 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001663
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001664 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001665
Fei Li51978392017-02-17 17:06:26 +08001666 mutex_init(&kvm->arch.float_int.ais_lock);
1667 kvm->arch.float_int.simm = 0;
1668 kvm->arch.float_int.nimm = 0;
1669 kvm->arch.float_int.ais_enabled = 0;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001670 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001671 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1672 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001673 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001674 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001675
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001676 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001677 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001678
Carsten Ottee08b9632012-01-04 10:25:20 +01001679 if (type & KVM_VM_S390_UCONTROL) {
1680 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01001681 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01001682 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001683 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001684 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001685 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001686 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001687 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001688 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001689 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001690 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001691 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001692 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001693 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001694
1695 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001696 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001697 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001698
David Hildenbrand8ad35752014-03-14 11:00:21 +01001699 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001700 kvm_s390_vsie_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001701 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001702
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001703 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001704out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001705 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01001706 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001707 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001708 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001709 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001710}
1711
Luiz Capitulino235539b2016-09-07 14:47:23 -04001712bool kvm_arch_has_vcpu_debugfs(void)
1713{
1714 return false;
1715}
1716
1717int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
1718{
1719 return 0;
1720}
1721
Christian Borntraegerd329c032008-11-26 14:50:27 +01001722void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1723{
1724 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001725 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001726 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001727 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001728 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001729 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01001730
1731 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001732 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01001733
Dominik Dingele6db1d62015-05-07 15:41:57 +02001734 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01001735 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001736 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001737
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001738 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001739 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001740}
1741
1742static void kvm_free_vcpus(struct kvm *kvm)
1743{
1744 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001745 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001746
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001747 kvm_for_each_vcpu(i, vcpu, kvm)
1748 kvm_arch_vcpu_destroy(vcpu);
1749
1750 mutex_lock(&kvm->lock);
1751 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1752 kvm->vcpus[i] = NULL;
1753
1754 atomic_set(&kvm->online_vcpus, 0);
1755 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001756}
1757
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001758void kvm_arch_destroy_vm(struct kvm *kvm)
1759{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001760 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001761 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001762 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001763 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01001764 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001765 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001766 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001767 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001768 kvm_s390_vsie_destroy(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001769 if (kvm->arch.migration_state) {
1770 vfree(kvm->arch.migration_state->pgste_bitmap);
1771 kfree(kvm->arch.migration_state);
1772 }
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001773 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001774}
1775
1776/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001777static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1778{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001779 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01001780 if (!vcpu->arch.gmap)
1781 return -ENOMEM;
1782 vcpu->arch.gmap->private = vcpu->kvm;
1783
1784 return 0;
1785}
1786
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001787static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1788{
David Hildenbranda6940672016-08-08 22:39:32 +02001789 if (!kvm_s390_use_sca_entries())
1790 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001791 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001792 if (vcpu->kvm->arch.use_esca) {
1793 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001794
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001795 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001796 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001797 } else {
1798 struct bsca_block *sca = vcpu->kvm->arch.sca;
1799
1800 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001801 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001802 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001803 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001804}
1805
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001806static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001807{
David Hildenbranda6940672016-08-08 22:39:32 +02001808 if (!kvm_s390_use_sca_entries()) {
1809 struct bsca_block *sca = vcpu->kvm->arch.sca;
1810
1811 /* we still need the basic sca for the ipte control */
1812 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1813 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1814 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001815 read_lock(&vcpu->kvm->arch.sca_lock);
1816 if (vcpu->kvm->arch.use_esca) {
1817 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001818
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001819 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001820 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1821 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01001822 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001823 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001824 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001825 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001826
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001827 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001828 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1829 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001830 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001831 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001832 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001833}
1834
1835/* Basic SCA to Extended SCA data copy routines */
1836static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1837{
1838 d->sda = s->sda;
1839 d->sigp_ctrl.c = s->sigp_ctrl.c;
1840 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1841}
1842
1843static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1844{
1845 int i;
1846
1847 d->ipte_control = s->ipte_control;
1848 d->mcn[0] = s->mcn;
1849 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1850 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1851}
1852
1853static int sca_switch_to_extended(struct kvm *kvm)
1854{
1855 struct bsca_block *old_sca = kvm->arch.sca;
1856 struct esca_block *new_sca;
1857 struct kvm_vcpu *vcpu;
1858 unsigned int vcpu_idx;
1859 u32 scaol, scaoh;
1860
1861 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1862 if (!new_sca)
1863 return -ENOMEM;
1864
1865 scaoh = (u32)((u64)(new_sca) >> 32);
1866 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1867
1868 kvm_s390_vcpu_block_all(kvm);
1869 write_lock(&kvm->arch.sca_lock);
1870
1871 sca_copy_b_to_e(new_sca, old_sca);
1872
1873 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1874 vcpu->arch.sie_block->scaoh = scaoh;
1875 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01001876 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001877 }
1878 kvm->arch.sca = new_sca;
1879 kvm->arch.use_esca = 1;
1880
1881 write_unlock(&kvm->arch.sca_lock);
1882 kvm_s390_vcpu_unblock_all(kvm);
1883
1884 free_page((unsigned long)old_sca);
1885
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001886 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1887 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001888 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001889}
1890
1891static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1892{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001893 int rc;
1894
David Hildenbranda6940672016-08-08 22:39:32 +02001895 if (!kvm_s390_use_sca_entries()) {
1896 if (id < KVM_MAX_VCPUS)
1897 return true;
1898 return false;
1899 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001900 if (id < KVM_S390_BSCA_CPU_SLOTS)
1901 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001902 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001903 return false;
1904
1905 mutex_lock(&kvm->lock);
1906 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1907 mutex_unlock(&kvm->lock);
1908
1909 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001910}
1911
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001912int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1913{
Dominik Dingel3c038e62013-10-07 17:11:48 +02001914 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1915 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001916 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1917 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001918 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02001919 KVM_SYNC_CRS |
1920 KVM_SYNC_ARCH0 |
1921 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02001922 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08001923 if (test_kvm_facility(vcpu->kvm, 64))
1924 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01001925 if (test_kvm_facility(vcpu->kvm, 133))
1926 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01001927 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1928 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1929 */
1930 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04001931 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01001932 else
1933 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01001934
1935 if (kvm_is_ucontrol(vcpu->kvm))
1936 return __kvm_ucontrol_vcpu_init(vcpu);
1937
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001938 return 0;
1939}
1940
David Hildenbranddb0758b2016-02-15 09:42:25 +01001941/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1942static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1943{
1944 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01001945 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001946 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01001947 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001948}
1949
1950/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1951static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1952{
1953 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01001954 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001955 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1956 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001957 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001958}
1959
1960/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1961static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1962{
1963 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1964 vcpu->arch.cputm_enabled = true;
1965 __start_cpu_timer_accounting(vcpu);
1966}
1967
1968/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1969static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1970{
1971 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1972 __stop_cpu_timer_accounting(vcpu);
1973 vcpu->arch.cputm_enabled = false;
1974}
1975
1976static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1977{
1978 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1979 __enable_cpu_timer_accounting(vcpu);
1980 preempt_enable();
1981}
1982
1983static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1984{
1985 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1986 __disable_cpu_timer_accounting(vcpu);
1987 preempt_enable();
1988}
1989
David Hildenbrand4287f242016-02-15 09:40:12 +01001990/* set the cpu timer - may only be called from the VCPU thread itself */
1991void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1992{
David Hildenbranddb0758b2016-02-15 09:42:25 +01001993 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01001994 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001995 if (vcpu->arch.cputm_enabled)
1996 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01001997 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001998 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001999 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002000}
2001
David Hildenbranddb0758b2016-02-15 09:42:25 +01002002/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002003__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2004{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002005 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002006 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002007
2008 if (unlikely(!vcpu->arch.cputm_enabled))
2009 return vcpu->arch.sie_block->cputm;
2010
David Hildenbrand9c23a132016-02-17 21:53:33 +01002011 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2012 do {
2013 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2014 /*
2015 * If the writer would ever execute a read in the critical
2016 * section, e.g. in irq context, we have a deadlock.
2017 */
2018 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2019 value = vcpu->arch.sie_block->cputm;
2020 /* if cputm_start is 0, accounting is being started/stopped */
2021 if (likely(vcpu->arch.cputm_start))
2022 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2023 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2024 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002025 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002026}
2027
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002028void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2029{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002030
David Hildenbrand37d9df92015-03-11 16:47:33 +01002031 gmap_enable(vcpu->arch.enabled_gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002032 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002033 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002034 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002035 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002036}
2037
2038void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2039{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002040 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002041 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002042 __stop_cpu_timer_accounting(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002043 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002044 vcpu->arch.enabled_gmap = gmap_get_enabled();
2045 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002046
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002047}
2048
2049static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2050{
2051 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2052 vcpu->arch.sie_block->gpsw.mask = 0UL;
2053 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002054 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002055 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002056 vcpu->arch.sie_block->ckc = 0UL;
2057 vcpu->arch.sie_block->todpr = 0;
2058 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
2059 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
2060 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002061 /* make sure the new fpc will be lazily loaded */
2062 save_fpu_regs();
2063 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002064 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002065 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002066 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2067 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002068 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2069 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002070 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002071}
2072
Dominik Dingel31928aa2014-12-04 15:47:07 +01002073void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002074{
Jason J. Herne72f25022014-11-25 09:46:02 -05002075 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002076 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002077 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02002078 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002079 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002080 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002081 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002082 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002083 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002084 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2085 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002086 /* make vcpu_load load the right gmap on the first trigger */
2087 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002088}
2089
Tony Krowiak5102ee82014-06-27 14:46:01 -04002090static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2091{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002092 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002093 return;
2094
Tony Krowiaka374e892014-09-03 10:13:53 +02002095 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
2096
2097 if (vcpu->kvm->arch.crypto.aes_kw)
2098 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2099 if (vcpu->kvm->arch.crypto.dea_kw)
2100 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
2101
Tony Krowiak5102ee82014-06-27 14:46:01 -04002102 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
2103}
2104
Dominik Dingelb31605c2014-03-25 13:47:11 +01002105void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2106{
2107 free_page(vcpu->arch.sie_block->cbrlo);
2108 vcpu->arch.sie_block->cbrlo = 0;
2109}
2110
2111int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2112{
2113 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2114 if (!vcpu->arch.sie_block->cbrlo)
2115 return -ENOMEM;
2116
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002117 vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002118 return 0;
2119}
2120
Michael Mueller91520f12015-02-27 14:32:11 +01002121static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2122{
2123 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2124
Michael Mueller91520f12015-02-27 14:32:11 +01002125 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002126 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002127 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002128}
2129
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002130int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2131{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002132 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002133
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002134 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2135 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002136 CPUSTAT_STOPPED);
2137
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002138 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002139 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002140 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002141 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002142
Michael Mueller91520f12015-02-27 14:32:11 +01002143 kvm_s390_vcpu_setup_model(vcpu);
2144
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002145 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2146 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002147 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002148 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002149 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002150 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002151 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002152
David Hildenbrand873b4252016-04-04 15:53:47 +02002153 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002154 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002155 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002156 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2157 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002158 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002159 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002160 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002161 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002162 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002163 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002164 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002165 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002166 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002167 vcpu->arch.sie_block->eca |= ECA_VX;
2168 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002169 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002170 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2171 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002172 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002173
2174 if (sclp.has_kss)
2175 atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
2176 else
2177 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002178
Dominik Dingele6db1d62015-05-07 15:41:57 +02002179 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002180 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2181 if (rc)
2182 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002183 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01002184 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002185 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002186
Tony Krowiak5102ee82014-06-27 14:46:01 -04002187 kvm_s390_vcpu_crypto_setup(vcpu);
2188
Dominik Dingelb31605c2014-03-25 13:47:11 +01002189 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002190}
2191
2192struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2193 unsigned int id)
2194{
Carsten Otte4d475552011-10-18 12:27:12 +02002195 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002196 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02002197 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002198
David Hildenbrand42158252015-10-12 12:57:22 +02002199 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02002200 goto out;
2201
2202 rc = -ENOMEM;
2203
Michael Muellerb110fea2013-06-12 13:54:54 +02002204 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002205 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02002206 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002207
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002208 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2209 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002210 goto out_free_cpu;
2211
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002212 vcpu->arch.sie_block = &sie_page->sie_block;
2213 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2214
David Hildenbrandefed1102015-04-16 12:32:41 +02002215 /* the real guest size will always be smaller than msl */
2216 vcpu->arch.sie_block->mso = 0;
2217 vcpu->arch.sie_block->msl = sclp.hamax;
2218
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002219 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002220 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002221 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02002222 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01002223 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002224 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002225
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002226 rc = kvm_vcpu_init(vcpu, kvm, id);
2227 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002228 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002229 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002230 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02002231 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002232
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002233 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08002234out_free_sie_block:
2235 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002236out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02002237 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02002238out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002239 return ERR_PTR(rc);
2240}
2241
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002242int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2243{
David Hildenbrand9a022062014-08-05 17:40:47 +02002244 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002245}
2246
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002247void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002248{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002249 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002250 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002251}
2252
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002253void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002254{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002255 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002256}
2257
Christian Borntraeger8e236542015-04-09 13:49:04 +02002258static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2259{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002260 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002261 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002262}
2263
2264static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2265{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04002266 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002267}
2268
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002269/*
2270 * Kick a guest cpu out of SIE and wait until SIE is not running.
2271 * If the CPU is not running (e.g. waiting as idle) the function will
2272 * return immediately. */
2273void exit_sie(struct kvm_vcpu *vcpu)
2274{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002275 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002276 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2277 cpu_relax();
2278}
2279
Christian Borntraeger8e236542015-04-09 13:49:04 +02002280/* Kick a guest cpu out of SIE to process a request synchronously */
2281void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002282{
Christian Borntraeger8e236542015-04-09 13:49:04 +02002283 kvm_make_request(req, vcpu);
2284 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002285}
2286
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002287static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2288 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002289{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002290 struct kvm *kvm = gmap->private;
2291 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002292 unsigned long prefix;
2293 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002294
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02002295 if (gmap_is_shadow(gmap))
2296 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002297 if (start >= 1UL << 31)
2298 /* We are only interested in prefix pages */
2299 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002300 kvm_for_each_vcpu(i, vcpu, kvm) {
2301 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002302 prefix = kvm_s390_get_prefix(vcpu);
2303 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2304 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2305 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002306 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002307 }
2308 }
2309}
2310
Christoffer Dallb6d33832012-03-08 16:44:24 -05002311int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2312{
2313 /* kvm common code refers to this, but never calls it */
2314 BUG();
2315 return 0;
2316}
2317
Carsten Otte14eebd92012-05-15 14:15:26 +02002318static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2319 struct kvm_one_reg *reg)
2320{
2321 int r = -EINVAL;
2322
2323 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002324 case KVM_REG_S390_TODPR:
2325 r = put_user(vcpu->arch.sie_block->todpr,
2326 (u32 __user *)reg->addr);
2327 break;
2328 case KVM_REG_S390_EPOCHDIFF:
2329 r = put_user(vcpu->arch.sie_block->epoch,
2330 (u64 __user *)reg->addr);
2331 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002332 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002333 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002334 (u64 __user *)reg->addr);
2335 break;
2336 case KVM_REG_S390_CLOCK_COMP:
2337 r = put_user(vcpu->arch.sie_block->ckc,
2338 (u64 __user *)reg->addr);
2339 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002340 case KVM_REG_S390_PFTOKEN:
2341 r = put_user(vcpu->arch.pfault_token,
2342 (u64 __user *)reg->addr);
2343 break;
2344 case KVM_REG_S390_PFCOMPARE:
2345 r = put_user(vcpu->arch.pfault_compare,
2346 (u64 __user *)reg->addr);
2347 break;
2348 case KVM_REG_S390_PFSELECT:
2349 r = put_user(vcpu->arch.pfault_select,
2350 (u64 __user *)reg->addr);
2351 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002352 case KVM_REG_S390_PP:
2353 r = put_user(vcpu->arch.sie_block->pp,
2354 (u64 __user *)reg->addr);
2355 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002356 case KVM_REG_S390_GBEA:
2357 r = put_user(vcpu->arch.sie_block->gbea,
2358 (u64 __user *)reg->addr);
2359 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002360 default:
2361 break;
2362 }
2363
2364 return r;
2365}
2366
2367static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2368 struct kvm_one_reg *reg)
2369{
2370 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002371 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002372
2373 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002374 case KVM_REG_S390_TODPR:
2375 r = get_user(vcpu->arch.sie_block->todpr,
2376 (u32 __user *)reg->addr);
2377 break;
2378 case KVM_REG_S390_EPOCHDIFF:
2379 r = get_user(vcpu->arch.sie_block->epoch,
2380 (u64 __user *)reg->addr);
2381 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002382 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002383 r = get_user(val, (u64 __user *)reg->addr);
2384 if (!r)
2385 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002386 break;
2387 case KVM_REG_S390_CLOCK_COMP:
2388 r = get_user(vcpu->arch.sie_block->ckc,
2389 (u64 __user *)reg->addr);
2390 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002391 case KVM_REG_S390_PFTOKEN:
2392 r = get_user(vcpu->arch.pfault_token,
2393 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002394 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2395 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002396 break;
2397 case KVM_REG_S390_PFCOMPARE:
2398 r = get_user(vcpu->arch.pfault_compare,
2399 (u64 __user *)reg->addr);
2400 break;
2401 case KVM_REG_S390_PFSELECT:
2402 r = get_user(vcpu->arch.pfault_select,
2403 (u64 __user *)reg->addr);
2404 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002405 case KVM_REG_S390_PP:
2406 r = get_user(vcpu->arch.sie_block->pp,
2407 (u64 __user *)reg->addr);
2408 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002409 case KVM_REG_S390_GBEA:
2410 r = get_user(vcpu->arch.sie_block->gbea,
2411 (u64 __user *)reg->addr);
2412 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002413 default:
2414 break;
2415 }
2416
2417 return r;
2418}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002419
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002420static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2421{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002422 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002423 return 0;
2424}
2425
2426int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2427{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002428 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002429 return 0;
2430}
2431
2432int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2433{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002434 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002435 return 0;
2436}
2437
2438int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2439 struct kvm_sregs *sregs)
2440{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002441 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002442 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002443 return 0;
2444}
2445
2446int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2447 struct kvm_sregs *sregs)
2448{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002449 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002450 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002451 return 0;
2452}
2453
2454int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2455{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02002456 if (test_fp_ctl(fpu->fpc))
2457 return -EINVAL;
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002458 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002459 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002460 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
2461 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002462 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002463 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002464 return 0;
2465}
2466
2467int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2468{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002469 /* make sure we have the latest values */
2470 save_fpu_regs();
2471 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002472 convert_vx_to_fp((freg_t *) fpu->fprs,
2473 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002474 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002475 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002476 fpu->fpc = vcpu->run->s.regs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002477 return 0;
2478}
2479
2480static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2481{
2482 int rc = 0;
2483
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02002484 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002485 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002486 else {
2487 vcpu->run->psw_mask = psw.mask;
2488 vcpu->run->psw_addr = psw.addr;
2489 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002490 return rc;
2491}
2492
2493int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2494 struct kvm_translation *tr)
2495{
2496 return -EINVAL; /* not implemented yet */
2497}
2498
David Hildenbrand27291e22014-01-23 12:26:52 +01002499#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2500 KVM_GUESTDBG_USE_HW_BP | \
2501 KVM_GUESTDBG_ENABLE)
2502
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002503int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2504 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002505{
David Hildenbrand27291e22014-01-23 12:26:52 +01002506 int rc = 0;
2507
2508 vcpu->guest_debug = 0;
2509 kvm_s390_clear_bp_data(vcpu);
2510
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02002511 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01002512 return -EINVAL;
David Hildenbrand89b5b4d2015-11-24 13:47:13 +01002513 if (!sclp.has_gpere)
2514 return -EINVAL;
David Hildenbrand27291e22014-01-23 12:26:52 +01002515
2516 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2517 vcpu->guest_debug = dbg->control;
2518 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002519 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002520
2521 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2522 rc = kvm_s390_import_bp_data(vcpu, dbg);
2523 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002524 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002525 vcpu->arch.guestdbg.last_bp = 0;
2526 }
2527
2528 if (rc) {
2529 vcpu->guest_debug = 0;
2530 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002531 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002532 }
2533
2534 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002535}
2536
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002537int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2538 struct kvm_mp_state *mp_state)
2539{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002540 /* CHECK_STOP and LOAD are not supported yet */
2541 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2542 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002543}
2544
2545int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2546 struct kvm_mp_state *mp_state)
2547{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002548 int rc = 0;
2549
2550 /* user space knows about this interface - let it control the state */
2551 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2552
2553 switch (mp_state->mp_state) {
2554 case KVM_MP_STATE_STOPPED:
2555 kvm_s390_vcpu_stop(vcpu);
2556 break;
2557 case KVM_MP_STATE_OPERATING:
2558 kvm_s390_vcpu_start(vcpu);
2559 break;
2560 case KVM_MP_STATE_LOAD:
2561 case KVM_MP_STATE_CHECK_STOP:
2562 /* fall through - CHECK_STOP and LOAD are not supported yet */
2563 default:
2564 rc = -ENXIO;
2565 }
2566
2567 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002568}
2569
David Hildenbrand8ad35752014-03-14 11:00:21 +01002570static bool ibs_enabled(struct kvm_vcpu *vcpu)
2571{
2572 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2573}
2574
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002575static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2576{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002577retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02002578 kvm_s390_vcpu_request_handled(vcpu);
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02002579 if (!vcpu->requests)
2580 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002581 /*
2582 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002583 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002584 * This ensures that the ipte instruction for this request has
2585 * already finished. We might race against a second unmapper that
2586 * wants to set the blocking bit. Lets just retry the request loop.
2587 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01002588 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002589 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002590 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2591 kvm_s390_get_prefix(vcpu),
2592 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02002593 if (rc) {
2594 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002595 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02002596 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002597 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002598 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002599
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002600 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2601 vcpu->arch.sie_block->ihcpu = 0xffff;
2602 goto retry;
2603 }
2604
David Hildenbrand8ad35752014-03-14 11:00:21 +01002605 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2606 if (!ibs_enabled(vcpu)) {
2607 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002608 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002609 &vcpu->arch.sie_block->cpuflags);
2610 }
2611 goto retry;
2612 }
2613
2614 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2615 if (ibs_enabled(vcpu)) {
2616 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002617 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002618 &vcpu->arch.sie_block->cpuflags);
2619 }
2620 goto retry;
2621 }
2622
David Hildenbrand6502a342016-06-21 14:19:51 +02002623 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
2624 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2625 goto retry;
2626 }
2627
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02002628 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
2629 /*
2630 * Disable CMMA virtualization; we will emulate the ESSA
2631 * instruction manually, in order to provide additional
2632 * functionalities needed for live migration.
2633 */
2634 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
2635 goto retry;
2636 }
2637
2638 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
2639 /*
2640 * Re-enable CMMA virtualization if CMMA is available and
2641 * was used.
2642 */
2643 if ((vcpu->kvm->arch.use_cmma) &&
2644 (vcpu->kvm->mm->context.use_cmma))
2645 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
2646 goto retry;
2647 }
2648
David Hildenbrand0759d062014-05-13 16:54:32 +02002649 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02002650 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02002651
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002652 return 0;
2653}
2654
David Hildenbrand25ed1672015-05-12 09:49:14 +02002655void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2656{
2657 struct kvm_vcpu *vcpu;
2658 int i;
2659
2660 mutex_lock(&kvm->lock);
2661 preempt_disable();
2662 kvm->arch.epoch = tod - get_tod_clock();
2663 kvm_s390_vcpu_block_all(kvm);
2664 kvm_for_each_vcpu(i, vcpu, kvm)
2665 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2666 kvm_s390_vcpu_unblock_all(kvm);
2667 preempt_enable();
2668 mutex_unlock(&kvm->lock);
2669}
2670
Thomas Huthfa576c52014-05-06 17:20:16 +02002671/**
2672 * kvm_arch_fault_in_page - fault-in guest page if necessary
2673 * @vcpu: The corresponding virtual cpu
2674 * @gpa: Guest physical address
2675 * @writable: Whether the page should be writable or not
2676 *
2677 * Make sure that a guest page has been faulted-in on the host.
2678 *
2679 * Return: Zero on success, negative error code otherwise.
2680 */
2681long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002682{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002683 return gmap_fault(vcpu->arch.gmap, gpa,
2684 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002685}
2686
Dominik Dingel3c038e62013-10-07 17:11:48 +02002687static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2688 unsigned long token)
2689{
2690 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02002691 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002692
2693 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02002694 irq.u.ext.ext_params2 = token;
2695 irq.type = KVM_S390_INT_PFAULT_INIT;
2696 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02002697 } else {
2698 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02002699 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002700 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2701 }
2702}
2703
2704void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2705 struct kvm_async_pf *work)
2706{
2707 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2708 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2709}
2710
2711void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2712 struct kvm_async_pf *work)
2713{
2714 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2715 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2716}
2717
2718void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2719 struct kvm_async_pf *work)
2720{
2721 /* s390 will always inject the page directly */
2722}
2723
2724bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2725{
2726 /*
2727 * s390 will always inject the page directly,
2728 * but we still want check_async_completion to cleanup
2729 */
2730 return true;
2731}
2732
2733static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2734{
2735 hva_t hva;
2736 struct kvm_arch_async_pf arch;
2737 int rc;
2738
2739 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2740 return 0;
2741 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2742 vcpu->arch.pfault_compare)
2743 return 0;
2744 if (psw_extint_disabled(vcpu))
2745 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02002746 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002747 return 0;
2748 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2749 return 0;
2750 if (!vcpu->arch.gmap->pfault_enabled)
2751 return 0;
2752
Heiko Carstens81480cc2014-01-01 16:36:07 +01002753 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2754 hva += current->thread.gmap_addr & ~PAGE_MASK;
2755 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002756 return 0;
2757
2758 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2759 return rc;
2760}
2761
Thomas Huth3fb4c402013-09-12 10:33:43 +02002762static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002763{
Thomas Huth3fb4c402013-09-12 10:33:43 +02002764 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01002765
Dominik Dingel3c038e62013-10-07 17:11:48 +02002766 /*
2767 * On s390 notifications for arriving pages will be delivered directly
2768 * to the guest but the house keeping for completed pfaults is
2769 * handled outside the worker.
2770 */
2771 kvm_check_async_pf_completion(vcpu);
2772
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002773 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2774 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002775
2776 if (need_resched())
2777 schedule();
2778
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02002779 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02002780 s390_handle_mcck();
2781
Jens Freimann79395032014-04-17 10:10:30 +02002782 if (!kvm_is_ucontrol(vcpu->kvm)) {
2783 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2784 if (rc)
2785 return rc;
2786 }
Carsten Otte0ff31862008-05-21 13:37:37 +02002787
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002788 rc = kvm_s390_handle_requests(vcpu);
2789 if (rc)
2790 return rc;
2791
David Hildenbrand27291e22014-01-23 12:26:52 +01002792 if (guestdbg_enabled(vcpu)) {
2793 kvm_s390_backup_guest_per_regs(vcpu);
2794 kvm_s390_patch_guest_per_regs(vcpu);
2795 }
2796
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002797 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002798 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2799 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2800 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002801
Thomas Huth3fb4c402013-09-12 10:33:43 +02002802 return 0;
2803}
2804
Thomas Huth492d8642015-02-10 16:11:01 +01002805static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2806{
David Hildenbrand56317922016-01-12 17:37:58 +01002807 struct kvm_s390_pgm_info pgm_info = {
2808 .code = PGM_ADDRESSING,
2809 };
2810 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01002811 int rc;
2812
2813 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2814 trace_kvm_s390_sie_fault(vcpu);
2815
2816 /*
2817 * We want to inject an addressing exception, which is defined as a
2818 * suppressing or terminating exception. However, since we came here
2819 * by a DAT access exception, the PSW still points to the faulting
2820 * instruction since DAT exceptions are nullifying. So we've got
2821 * to look up the current opcode to get the length of the instruction
2822 * to be able to forward the PSW.
2823 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02002824 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01002825 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01002826 if (rc < 0) {
2827 return rc;
2828 } else if (rc) {
2829 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2830 * Forward by arbitrary ilc, injection will take care of
2831 * nullification if necessary.
2832 */
2833 pgm_info = vcpu->arch.pgm;
2834 ilen = 4;
2835 }
David Hildenbrand56317922016-01-12 17:37:58 +01002836 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2837 kvm_s390_forward_psw(vcpu, ilen);
2838 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01002839}
2840
Thomas Huth3fb4c402013-09-12 10:33:43 +02002841static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2842{
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002843 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2844 vcpu->arch.sie_block->icptcode);
2845 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2846
David Hildenbrand27291e22014-01-23 12:26:52 +01002847 if (guestdbg_enabled(vcpu))
2848 kvm_s390_restore_guest_per_regs(vcpu);
2849
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002850 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2851 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002852
2853 if (vcpu->arch.sie_block->icptcode > 0) {
2854 int rc = kvm_handle_sie_intercept(vcpu);
2855
2856 if (rc != -EOPNOTSUPP)
2857 return rc;
2858 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2859 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2860 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2861 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2862 return -EREMOTE;
2863 } else if (exit_reason != -EFAULT) {
2864 vcpu->stat.exit_null++;
2865 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02002866 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2867 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2868 vcpu->run->s390_ucontrol.trans_exc_code =
2869 current->thread.gmap_addr;
2870 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002871 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002872 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02002873 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002874 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002875 if (kvm_arch_setup_async_pf(vcpu))
2876 return 0;
2877 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002878 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02002879 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002880}
2881
2882static int __vcpu_run(struct kvm_vcpu *vcpu)
2883{
2884 int rc, exit_reason;
2885
Thomas Huth800c1062013-09-12 10:33:45 +02002886 /*
2887 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2888 * ning the guest), so that memslots (and other stuff) are protected
2889 */
2890 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2891
Thomas Hutha76ccff2013-09-12 10:33:44 +02002892 do {
2893 rc = vcpu_pre_run(vcpu);
2894 if (rc)
2895 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002896
Thomas Huth800c1062013-09-12 10:33:45 +02002897 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02002898 /*
2899 * As PF_VCPU will be used in fault handler, between
2900 * guest_enter and guest_exit should be no uaccess.
2901 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02002902 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02002903 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002904 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002905 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002906 exit_reason = sie64a(vcpu->arch.sie_block,
2907 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002908 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002909 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02002910 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02002911 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02002912 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002913
Thomas Hutha76ccff2013-09-12 10:33:44 +02002914 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01002915 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002916
Thomas Huth800c1062013-09-12 10:33:45 +02002917 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01002918 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002919}
2920
David Hildenbrandb028ee32014-07-17 10:47:43 +02002921static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2922{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01002923 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002924 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01002925
2926 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002927 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02002928 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2929 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2930 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2931 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2932 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2933 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002934 /* some control register changes require a tlb flush */
2935 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002936 }
2937 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01002938 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002939 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2940 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2941 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2942 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2943 }
2944 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2945 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2946 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2947 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002948 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2949 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002950 }
Fan Zhang80cd8762016-08-15 04:53:22 +02002951 /*
2952 * If userspace sets the riccb (e.g. after migration) to a valid state,
2953 * we should enable RI here instead of doing the lazy enablement.
2954 */
2955 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01002956 test_kvm_facility(vcpu->kvm, 64) &&
2957 riccb->valid &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002958 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01002959 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002960 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02002961 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002962 /*
2963 * If userspace sets the gscb (e.g. after migration) to non-zero,
2964 * we should enable GS here instead of doing the lazy enablement.
2965 */
2966 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
2967 test_kvm_facility(vcpu->kvm, 133) &&
2968 gscb->gssm &&
2969 !vcpu->arch.gs_enabled) {
2970 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
2971 vcpu->arch.sie_block->ecb |= ECB_GS;
2972 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
2973 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02002974 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01002975 save_access_regs(vcpu->arch.host_acrs);
2976 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002977 /* save host (userspace) fprs/vrs */
2978 save_fpu_regs();
2979 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
2980 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
2981 if (MACHINE_HAS_VX)
2982 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
2983 else
2984 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
2985 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
2986 if (test_fp_ctl(current->thread.fpu.fpc))
2987 /* User space provided an invalid FPC, let's clear it */
2988 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002989 if (MACHINE_HAS_GS) {
2990 preempt_disable();
2991 __ctl_set_bit(2, 4);
2992 if (current->thread.gs_cb) {
2993 vcpu->arch.host_gscb = current->thread.gs_cb;
2994 save_gs_cb(vcpu->arch.host_gscb);
2995 }
2996 if (vcpu->arch.gs_enabled) {
2997 current->thread.gs_cb = (struct gs_cb *)
2998 &vcpu->run->s.regs.gscb;
2999 restore_gs_cb(current->thread.gs_cb);
3000 }
3001 preempt_enable();
3002 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003003
David Hildenbrandb028ee32014-07-17 10:47:43 +02003004 kvm_run->kvm_dirty_regs = 0;
3005}
3006
3007static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3008{
3009 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3010 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3011 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3012 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003013 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003014 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3015 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3016 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3017 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3018 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3019 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3020 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003021 save_access_regs(vcpu->run->s.regs.acrs);
3022 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003023 /* Save guest register state */
3024 save_fpu_regs();
3025 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3026 /* Restore will be done lazily at return */
3027 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3028 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003029 if (MACHINE_HAS_GS) {
3030 __ctl_set_bit(2, 4);
3031 if (vcpu->arch.gs_enabled)
3032 save_gs_cb(current->thread.gs_cb);
3033 preempt_disable();
3034 current->thread.gs_cb = vcpu->arch.host_gscb;
3035 restore_gs_cb(vcpu->arch.host_gscb);
3036 preempt_enable();
3037 if (!vcpu->arch.host_gscb)
3038 __ctl_clear_bit(2, 4);
3039 vcpu->arch.host_gscb = NULL;
3040 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003041
David Hildenbrandb028ee32014-07-17 10:47:43 +02003042}
3043
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003044int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3045{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003046 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003047 sigset_t sigsaved;
3048
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003049 if (kvm_run->immediate_exit)
3050 return -EINTR;
3051
David Hildenbrand27291e22014-01-23 12:26:52 +01003052 if (guestdbg_exit_pending(vcpu)) {
3053 kvm_s390_prepare_debug_exit(vcpu);
3054 return 0;
3055 }
3056
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003057 if (vcpu->sigset_active)
3058 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3059
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003060 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3061 kvm_s390_vcpu_start(vcpu);
3062 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003063 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003064 vcpu->vcpu_id);
3065 return -EINVAL;
3066 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003067
David Hildenbrandb028ee32014-07-17 10:47:43 +02003068 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003069 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003070
Heiko Carstensdab4079d2009-06-12 10:26:32 +02003071 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003072 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02003073
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003074 if (signal_pending(current) && !rc) {
3075 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003076 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003077 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003078
David Hildenbrand27291e22014-01-23 12:26:52 +01003079 if (guestdbg_exit_pending(vcpu) && !rc) {
3080 kvm_s390_prepare_debug_exit(vcpu);
3081 rc = 0;
3082 }
3083
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003084 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02003085 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003086 rc = 0;
3087 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003088
David Hildenbranddb0758b2016-02-15 09:42:25 +01003089 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003090 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003091
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003092 if (vcpu->sigset_active)
3093 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3094
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003095 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02003096 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003097}
3098
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003099/*
3100 * store status at address
3101 * we use have two special cases:
3102 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3103 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3104 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01003105int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003106{
Carsten Otte092670c2011-07-24 10:48:22 +02003107 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003108 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02003109 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01003110 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003111 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003112
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003113 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01003114 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
3115 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003116 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003117 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003118 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
3119 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003120 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003121 gpa = px;
3122 } else
3123 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003124
3125 /* manually convert vector registers if necessary */
3126 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01003127 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003128 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3129 fprs, 128);
3130 } else {
3131 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01003132 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003133 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003134 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003135 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003136 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003137 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003138 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02003139 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003140 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003141 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003142 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003143 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01003144 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003145 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01003146 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01003147 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003148 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003149 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003150 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003151 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003152 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003153 &vcpu->arch.sie_block->gcr, 128);
3154 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003155}
3156
Thomas Huthe8798922013-11-06 15:46:33 +01003157int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
3158{
3159 /*
3160 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003161 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01003162 * it into the save area
3163 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02003164 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003165 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01003166 save_access_regs(vcpu->run->s.regs.acrs);
3167
3168 return kvm_s390_store_status_unloaded(vcpu, addr);
3169}
3170
David Hildenbrand8ad35752014-03-14 11:00:21 +01003171static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3172{
3173 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003174 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003175}
3176
3177static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
3178{
3179 unsigned int i;
3180 struct kvm_vcpu *vcpu;
3181
3182 kvm_for_each_vcpu(i, vcpu, kvm) {
3183 __disable_ibs_on_vcpu(vcpu);
3184 }
3185}
3186
3187static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3188{
David Hildenbrand09a400e2016-04-04 15:57:08 +02003189 if (!sclp.has_ibs)
3190 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01003191 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003192 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003193}
3194
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003195void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
3196{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003197 int i, online_vcpus, started_vcpus = 0;
3198
3199 if (!is_vcpu_stopped(vcpu))
3200 return;
3201
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003202 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003203 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003204 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003205 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3206
3207 for (i = 0; i < online_vcpus; i++) {
3208 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
3209 started_vcpus++;
3210 }
3211
3212 if (started_vcpus == 0) {
3213 /* we're the only active VCPU -> speed it up */
3214 __enable_ibs_on_vcpu(vcpu);
3215 } else if (started_vcpus == 1) {
3216 /*
3217 * As we are starting a second VCPU, we have to disable
3218 * the IBS facility on all VCPUs to remove potentially
3219 * oustanding ENABLE requests.
3220 */
3221 __disable_ibs_on_all_vcpus(vcpu->kvm);
3222 }
3223
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003224 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003225 /*
3226 * Another VCPU might have used IBS while we were offline.
3227 * Let's play safe and flush the VCPU at startup.
3228 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003229 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003230 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003231 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003232}
3233
3234void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
3235{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003236 int i, online_vcpus, started_vcpus = 0;
3237 struct kvm_vcpu *started_vcpu = NULL;
3238
3239 if (is_vcpu_stopped(vcpu))
3240 return;
3241
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003242 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003243 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003244 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003245 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3246
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003247 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02003248 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003249
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003250 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003251 __disable_ibs_on_vcpu(vcpu);
3252
3253 for (i = 0; i < online_vcpus; i++) {
3254 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3255 started_vcpus++;
3256 started_vcpu = vcpu->kvm->vcpus[i];
3257 }
3258 }
3259
3260 if (started_vcpus == 1) {
3261 /*
3262 * As we only have one VCPU left, we want to enable the
3263 * IBS facility for that VCPU to speed it up.
3264 */
3265 __enable_ibs_on_vcpu(started_vcpu);
3266 }
3267
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003268 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003269 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003270}
3271
Cornelia Huckd6712df2012-12-20 15:32:11 +01003272static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3273 struct kvm_enable_cap *cap)
3274{
3275 int r;
3276
3277 if (cap->flags)
3278 return -EINVAL;
3279
3280 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003281 case KVM_CAP_S390_CSS_SUPPORT:
3282 if (!vcpu->kvm->arch.css_support) {
3283 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02003284 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003285 trace_kvm_s390_enable_css(vcpu->kvm);
3286 }
3287 r = 0;
3288 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01003289 default:
3290 r = -EINVAL;
3291 break;
3292 }
3293 return r;
3294}
3295
Thomas Huth41408c282015-02-06 15:01:21 +01003296static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3297 struct kvm_s390_mem_op *mop)
3298{
3299 void __user *uaddr = (void __user *)mop->buf;
3300 void *tmpbuf = NULL;
3301 int r, srcu_idx;
3302 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3303 | KVM_S390_MEMOP_F_CHECK_ONLY;
3304
3305 if (mop->flags & ~supported_flags)
3306 return -EINVAL;
3307
3308 if (mop->size > MEM_OP_MAX_SIZE)
3309 return -E2BIG;
3310
3311 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3312 tmpbuf = vmalloc(mop->size);
3313 if (!tmpbuf)
3314 return -ENOMEM;
3315 }
3316
3317 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3318
3319 switch (mop->op) {
3320 case KVM_S390_MEMOP_LOGICAL_READ:
3321 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003322 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3323 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01003324 break;
3325 }
3326 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3327 if (r == 0) {
3328 if (copy_to_user(uaddr, tmpbuf, mop->size))
3329 r = -EFAULT;
3330 }
3331 break;
3332 case KVM_S390_MEMOP_LOGICAL_WRITE:
3333 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003334 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3335 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01003336 break;
3337 }
3338 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3339 r = -EFAULT;
3340 break;
3341 }
3342 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3343 break;
3344 default:
3345 r = -EINVAL;
3346 }
3347
3348 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3349
3350 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3351 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3352
3353 vfree(tmpbuf);
3354 return r;
3355}
3356
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003357long kvm_arch_vcpu_ioctl(struct file *filp,
3358 unsigned int ioctl, unsigned long arg)
3359{
3360 struct kvm_vcpu *vcpu = filp->private_data;
3361 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02003362 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03003363 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003364
Avi Kivity93736622010-05-13 12:35:17 +03003365 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01003366 case KVM_S390_IRQ: {
3367 struct kvm_s390_irq s390irq;
3368
3369 r = -EFAULT;
3370 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3371 break;
3372 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3373 break;
3374 }
Avi Kivity93736622010-05-13 12:35:17 +03003375 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01003376 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02003377 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003378
Avi Kivity93736622010-05-13 12:35:17 +03003379 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003380 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03003381 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02003382 if (s390int_to_s390irq(&s390int, &s390irq))
3383 return -EINVAL;
3384 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity93736622010-05-13 12:35:17 +03003385 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003386 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003387 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02003388 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003389 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02003390 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003391 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003392 case KVM_S390_SET_INITIAL_PSW: {
3393 psw_t psw;
3394
Avi Kivitybc923cc2010-05-13 12:21:46 +03003395 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003396 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03003397 break;
3398 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3399 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003400 }
3401 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03003402 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3403 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003404 case KVM_SET_ONE_REG:
3405 case KVM_GET_ONE_REG: {
3406 struct kvm_one_reg reg;
3407 r = -EFAULT;
3408 if (copy_from_user(&reg, argp, sizeof(reg)))
3409 break;
3410 if (ioctl == KVM_SET_ONE_REG)
3411 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3412 else
3413 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3414 break;
3415 }
Carsten Otte27e03932012-01-04 10:25:21 +01003416#ifdef CONFIG_KVM_S390_UCONTROL
3417 case KVM_S390_UCAS_MAP: {
3418 struct kvm_s390_ucas_mapping ucasmap;
3419
3420 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3421 r = -EFAULT;
3422 break;
3423 }
3424
3425 if (!kvm_is_ucontrol(vcpu->kvm)) {
3426 r = -EINVAL;
3427 break;
3428 }
3429
3430 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3431 ucasmap.vcpu_addr, ucasmap.length);
3432 break;
3433 }
3434 case KVM_S390_UCAS_UNMAP: {
3435 struct kvm_s390_ucas_mapping ucasmap;
3436
3437 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3438 r = -EFAULT;
3439 break;
3440 }
3441
3442 if (!kvm_is_ucontrol(vcpu->kvm)) {
3443 r = -EINVAL;
3444 break;
3445 }
3446
3447 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3448 ucasmap.length);
3449 break;
3450 }
3451#endif
Carsten Otteccc79102012-01-04 10:25:26 +01003452 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003453 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01003454 break;
3455 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01003456 case KVM_ENABLE_CAP:
3457 {
3458 struct kvm_enable_cap cap;
3459 r = -EFAULT;
3460 if (copy_from_user(&cap, argp, sizeof(cap)))
3461 break;
3462 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3463 break;
3464 }
Thomas Huth41408c282015-02-06 15:01:21 +01003465 case KVM_S390_MEM_OP: {
3466 struct kvm_s390_mem_op mem_op;
3467
3468 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3469 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3470 else
3471 r = -EFAULT;
3472 break;
3473 }
Jens Freimann816c7662014-11-24 17:13:46 +01003474 case KVM_S390_SET_IRQ_STATE: {
3475 struct kvm_s390_irq_state irq_state;
3476
3477 r = -EFAULT;
3478 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3479 break;
3480 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3481 irq_state.len == 0 ||
3482 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3483 r = -EINVAL;
3484 break;
3485 }
3486 r = kvm_s390_set_irq_state(vcpu,
3487 (void __user *) irq_state.buf,
3488 irq_state.len);
3489 break;
3490 }
3491 case KVM_S390_GET_IRQ_STATE: {
3492 struct kvm_s390_irq_state irq_state;
3493
3494 r = -EFAULT;
3495 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3496 break;
3497 if (irq_state.len == 0) {
3498 r = -EINVAL;
3499 break;
3500 }
3501 r = kvm_s390_get_irq_state(vcpu,
3502 (__u8 __user *) irq_state.buf,
3503 irq_state.len);
3504 break;
3505 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003506 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01003507 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003508 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03003509 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003510}
3511
Carsten Otte5b1c1492012-01-04 10:25:23 +01003512int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3513{
3514#ifdef CONFIG_KVM_S390_UCONTROL
3515 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3516 && (kvm_is_ucontrol(vcpu->kvm))) {
3517 vmf->page = virt_to_page(vcpu->arch.sie_block);
3518 get_page(vmf->page);
3519 return 0;
3520 }
3521#endif
3522 return VM_FAULT_SIGBUS;
3523}
3524
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05303525int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3526 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09003527{
3528 return 0;
3529}
3530
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003531/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003532int kvm_arch_prepare_memory_region(struct kvm *kvm,
3533 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003534 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09003535 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003536{
Nick Wangdd2887e2013-03-25 17:22:57 +01003537 /* A few sanity checks. We can have memory slots which have to be
3538 located/ended at a segment boundary (1MB). The memory in userland is
3539 ok to be fragmented into various different vmas. It is okay to mmap()
3540 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003541
Carsten Otte598841c2011-07-24 10:48:21 +02003542 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003543 return -EINVAL;
3544
Carsten Otte598841c2011-07-24 10:48:21 +02003545 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003546 return -EINVAL;
3547
Dominik Dingela3a92c32014-12-01 17:24:42 +01003548 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3549 return -EINVAL;
3550
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003551 return 0;
3552}
3553
3554void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003555 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003556 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02003557 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003558 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003559{
Carsten Ottef7850c92011-07-24 10:48:23 +02003560 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003561
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01003562 /* If the basics of the memslot do not change, we do not want
3563 * to update the gmap. Every update causes several unnecessary
3564 * segment translation exceptions. This is usually handled just
3565 * fine by the normal fault handler + gmap, but it will also
3566 * cause faults on the prefix page of running guest CPUs.
3567 */
3568 if (old->userspace_addr == mem->userspace_addr &&
3569 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3570 old->npages * PAGE_SIZE == mem->memory_size)
3571 return;
Carsten Otte598841c2011-07-24 10:48:21 +02003572
3573 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3574 mem->guest_phys_addr, mem->memory_size);
3575 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003576 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02003577 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003578}
3579
Alexander Yarygin60a37702016-04-01 15:38:57 +03003580static inline unsigned long nonhyp_mask(int i)
3581{
3582 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3583
3584 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3585}
3586
Christian Borntraeger3491caf2016-05-13 12:16:35 +02003587void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3588{
3589 vcpu->valid_wakeup = false;
3590}
3591
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003592static int __init kvm_s390_init(void)
3593{
Alexander Yarygin60a37702016-04-01 15:38:57 +03003594 int i;
3595
David Hildenbrand07197fd2015-01-30 16:01:38 +01003596 if (!sclp.has_sief2) {
3597 pr_info("SIE not available\n");
3598 return -ENODEV;
3599 }
3600
Alexander Yarygin60a37702016-04-01 15:38:57 +03003601 for (i = 0; i < 16; i++)
3602 kvm_s390_fac_list_mask[i] |=
3603 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3604
Michael Mueller9d8d5782015-02-02 15:42:51 +01003605 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003606}
3607
3608static void __exit kvm_s390_exit(void)
3609{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003610 kvm_exit();
3611}
3612
3613module_init(kvm_s390_init);
3614module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02003615
3616/*
3617 * Enable autoloading of the kvm module.
3618 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3619 * since x86 takes a different approach.
3620 */
3621#include <linux/miscdevice.h>
3622MODULE_ALIAS_MISCDEV(KVM_MINOR);
3623MODULE_ALIAS("devname:kvm");