blob: 60bb3b7243d9a260b90bc372b70079f1dfe28b62 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Christian Borntraegera37cb072018-01-23 13:28:40 +01005 * Copyright IBM Corp. 2008, 2018
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
14#include <linux/compiler.h>
15#include <linux/err.h>
16#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020017#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010018#include <linux/init.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010021#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010022#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050023#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020024#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010026#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010027#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010028#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010029#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020030#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010031
Heiko Carstenscbb870c2010-02-26 22:37:43 +010032#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010033#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020034#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010036#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010037#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010038#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020039#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020040#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020041#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040042#include <asm/timex.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010043#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010044#include "gaccess.h"
45
David Hildenbrandea2cdd22015-05-20 13:24:02 +020046#define KMSG_COMPONENT "kvm-s390"
47#undef pr_fmt
48#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
49
Cornelia Huck5786fff2012-07-23 17:20:29 +020050#define CREATE_TRACE_POINTS
51#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020052#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020053
Thomas Huth41408c282015-02-06 15:01:21 +010054#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010055#define LOCAL_IRQS 32
56#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010058
Heiko Carstensb0c632d2008-03-25 18:47:20 +010059#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Christian Borntraegerccc40c52018-03-08 12:48:05 +000060#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061
62struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020064 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010065 { "exit_validity", VCPU_STAT(exit_validity) },
66 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 { "exit_external_request", VCPU_STAT(exit_external_request) },
Christian Borntraegera5e0ace2018-02-23 07:57:33 +000068 { "exit_io_request", VCPU_STAT(exit_io_request) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010069 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030071 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020074 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010075 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020076 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020077 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020078 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020079 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010080 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010081 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
82 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000083 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
84 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010085 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020086 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010087 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000088 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010089 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
90 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
91 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000092 { "deliver_program", VCPU_STAT(deliver_program) },
93 { "deliver_io", VCPU_STAT(deliver_io) },
QingFeng Hao32de0742018-03-02 11:56:47 +010094 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010095 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000096 { "inject_ckc", VCPU_STAT(inject_ckc) },
97 { "inject_cputm", VCPU_STAT(inject_cputm) },
98 { "inject_external_call", VCPU_STAT(inject_external_call) },
99 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
100 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
101 { "inject_io", VM_STAT(inject_io) },
102 { "inject_mchk", VCPU_STAT(inject_mchk) },
103 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
104 { "inject_program", VCPU_STAT(inject_program) },
105 { "inject_restart", VCPU_STAT(inject_restart) },
106 { "inject_service_signal", VM_STAT(inject_service_signal) },
107 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
108 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
109 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
110 { "inject_virtio", VM_STAT(inject_virtio) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100111 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
112 { "instruction_gs", VCPU_STAT(instruction_gs) },
113 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
114 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
115 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200116 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100117 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100118 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100119 { "instruction_sck", VCPU_STAT(instruction_sck) },
120 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100121 { "instruction_spx", VCPU_STAT(instruction_spx) },
122 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
123 { "instruction_stap", VCPU_STAT(instruction_stap) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100124 { "instruction_iske", VCPU_STAT(instruction_iske) },
125 { "instruction_ri", VCPU_STAT(instruction_ri) },
126 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
127 { "instruction_sske", VCPU_STAT(instruction_sske) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100128 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200129 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100130 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
131 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100132 { "instruction_tb", VCPU_STAT(instruction_tb) },
133 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200134 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100135 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200136 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200137 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100138 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100139 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200140 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100141 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200142 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
143 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100144 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200145 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
146 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500147 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100148 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
149 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
150 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200151 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
152 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
153 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100154 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
155 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
156 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
157 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
158 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
159 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100160 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100161 { NULL }
162};
163
Collin L. Walling8fa16962016-07-26 15:29:44 -0400164struct kvm_s390_tod_clock_ext {
165 __u8 epoch_idx;
166 __u64 tod;
167 __u8 reserved[7];
168} __packed;
169
David Hildenbranda411edf2016-02-02 15:41:22 +0100170/* allow nested virtualization in KVM (if enabled by user space) */
171static int nested;
172module_param(nested, int, S_IRUGO);
173MODULE_PARM_DESC(nested, "Nested virtualization support");
174
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100175
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000176/*
177 * For now we handle at most 16 double words as this is what the s390 base
178 * kernel handles and stores in the prefix page. If we ever need to go beyond
179 * this, this requires changes to code, but the external uapi can stay.
180 */
181#define SIZE_INTERNAL 16
182
183/*
184 * Base feature mask that defines default mask for facilities. Consists of the
185 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
186 */
187static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
188/*
189 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
190 * and defines the facilities that can be enabled via a cpu model.
191 */
192static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
193
194static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200195{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000196 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
197 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
198 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
199 sizeof(S390_lowcore.stfle_fac_list));
200
201 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200202}
203
David Hildenbrand15c97052015-03-19 17:36:43 +0100204/* available cpu features supported by kvm */
205static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200206/* available subfunctions indicated via query / "test bit" */
207static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100208
Michael Mueller9d8d5782015-02-02 15:42:51 +0100209static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200210static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200211debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100212
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100213/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200214int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100215{
216 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200217 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100218}
219
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100220static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
221 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200222
David Hildenbrand15757672018-02-07 12:46:45 +0100223static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
224{
225 u8 delta_idx = 0;
226
227 /*
228 * The TOD jumps by delta, we have to compensate this by adding
229 * -delta to the epoch.
230 */
231 delta = -delta;
232
233 /* sign-extension - we're adding to signed values below */
234 if ((s64)delta < 0)
235 delta_idx = -1;
236
237 scb->epoch += delta;
238 if (scb->ecd & ECD_MEF) {
239 scb->epdx += delta_idx;
240 if (scb->epoch < delta)
241 scb->epdx += 1;
242 }
243}
244
Fan Zhangfdf03652015-05-13 10:58:41 +0200245/*
246 * This callback is executed during stop_machine(). All CPUs are therefore
247 * temporarily stopped. In order not to change guest behavior, we have to
248 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
249 * so a CPU won't be stopped while calculating with the epoch.
250 */
251static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
252 void *v)
253{
254 struct kvm *kvm;
255 struct kvm_vcpu *vcpu;
256 int i;
257 unsigned long long *delta = v;
258
259 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200260 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100261 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
262 if (i == 0) {
263 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
264 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
265 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100266 if (vcpu->arch.cputm_enabled)
267 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100268 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100269 kvm_clock_sync_scb(vcpu->arch.vsie_block,
270 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200271 }
272 }
273 return NOTIFY_OK;
274}
275
276static struct notifier_block kvm_clock_notifier = {
277 .notifier_call = kvm_clock_sync,
278};
279
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100280int kvm_arch_hardware_setup(void)
281{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200282 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100283 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200284 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
285 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200286 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
287 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100288 return 0;
289}
290
291void kvm_arch_hardware_unsetup(void)
292{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100293 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200294 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200295 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
296 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100297}
298
David Hildenbrand22be5a12016-01-21 13:22:54 +0100299static void allow_cpu_feat(unsigned long nr)
300{
301 set_bit_inv(nr, kvm_s390_available_cpu_feat);
302}
303
David Hildenbrand0a763c72016-05-18 16:03:47 +0200304static inline int plo_test_bit(unsigned char nr)
305{
306 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100307 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200308
309 asm volatile(
310 /* Parameter registers are ignored for "test bit" */
311 " plo 0,0,0,0(0)\n"
312 " ipm %0\n"
313 " srl %0,28\n"
314 : "=d" (cc)
315 : "d" (r0)
316 : "cc");
317 return cc == 0;
318}
319
David Hildenbrand22be5a12016-01-21 13:22:54 +0100320static void kvm_s390_cpu_feat_init(void)
321{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200322 int i;
323
324 for (i = 0; i < 256; ++i) {
325 if (plo_test_bit(i))
326 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
327 }
328
329 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400330 ptff(kvm_s390_available_subfunc.ptff,
331 sizeof(kvm_s390_available_subfunc.ptff),
332 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200333
334 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200335 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
336 kvm_s390_available_subfunc.kmac);
337 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
338 kvm_s390_available_subfunc.kmc);
339 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
340 kvm_s390_available_subfunc.km);
341 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
342 kvm_s390_available_subfunc.kimd);
343 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
344 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200345 }
346 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200347 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
348 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200349 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200350 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
351 kvm_s390_available_subfunc.kmctr);
352 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
353 kvm_s390_available_subfunc.kmf);
354 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
355 kvm_s390_available_subfunc.kmo);
356 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
357 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200358 }
359 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100360 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200361 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200362
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400363 if (test_facility(146)) /* MSA8 */
364 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
365 kvm_s390_available_subfunc.kma);
366
David Hildenbrand22be5a12016-01-21 13:22:54 +0100367 if (MACHINE_HAS_ESOP)
368 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200369 /*
370 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
371 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
372 */
373 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100374 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200375 return;
376 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100377 if (sclp.has_64bscao)
378 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100379 if (sclp.has_siif)
380 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100381 if (sclp.has_gpere)
382 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100383 if (sclp.has_gsls)
384 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100385 if (sclp.has_ib)
386 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100387 if (sclp.has_cei)
388 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100389 if (sclp.has_ibs)
390 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500391 if (sclp.has_kss)
392 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200393 /*
394 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
395 * all skey handling functions read/set the skey from the PGSTE
396 * instead of the real storage key.
397 *
398 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
399 * pages being detected as preserved although they are resident.
400 *
401 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
402 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
403 *
404 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
405 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
406 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
407 *
408 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
409 * cannot easily shadow the SCA because of the ipte lock.
410 */
David Hildenbrand22be5a12016-01-21 13:22:54 +0100411}
412
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100413int kvm_arch_init(void *opaque)
414{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200415 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
416 if (!kvm_s390_dbf)
417 return -ENOMEM;
418
419 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
420 debug_unregister(kvm_s390_dbf);
421 return -ENOMEM;
422 }
423
David Hildenbrand22be5a12016-01-21 13:22:54 +0100424 kvm_s390_cpu_feat_init();
425
Cornelia Huck84877d92014-09-02 10:27:35 +0100426 /* Register floating interrupt controller interface. */
427 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100428}
429
Christian Borntraeger78f26132015-07-22 15:50:58 +0200430void kvm_arch_exit(void)
431{
432 debug_unregister(kvm_s390_dbf);
433}
434
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100435/* Section: device related */
436long kvm_arch_dev_ioctl(struct file *filp,
437 unsigned int ioctl, unsigned long arg)
438{
439 if (ioctl == KVM_S390_ENABLE_SIE)
440 return s390_enable_sie();
441 return -EINVAL;
442}
443
Alexander Graf784aa3d2014-07-14 18:27:35 +0200444int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100445{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100446 int r;
447
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200448 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100449 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200450 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100451 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100452#ifdef CONFIG_KVM_S390_UCONTROL
453 case KVM_CAP_S390_UCONTROL:
454#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200455 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100456 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200457 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100458 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100459 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100460 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200461 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200462 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200463 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200464 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200465 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100466 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100467 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200468 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100469 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400470 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100471 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200472 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200473 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100474 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100475 case KVM_CAP_S390_AIS_MIGRATION:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100476 r = 1;
477 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100478 case KVM_CAP_S390_MEM_OP:
479 r = MEM_OP_MAX_SIZE;
480 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200481 case KVM_CAP_NR_VCPUS:
482 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100483 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200484 if (!kvm_s390_use_sca_entries())
485 r = KVM_MAX_VCPUS;
486 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100487 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200488 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100489 case KVM_CAP_NR_MEMSLOTS:
490 r = KVM_USER_MEM_SLOTS;
491 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200492 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100493 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200494 break;
Eric Farman68c55752014-06-09 10:57:26 -0400495 case KVM_CAP_S390_VECTOR_REGISTERS:
496 r = MACHINE_HAS_VX;
497 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800498 case KVM_CAP_S390_RI:
499 r = test_facility(64);
500 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100501 case KVM_CAP_S390_GS:
502 r = test_facility(133);
503 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100504 case KVM_CAP_S390_BPB:
505 r = test_facility(82);
506 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200507 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100508 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200509 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100510 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100511}
512
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400513static void kvm_s390_sync_dirty_log(struct kvm *kvm,
514 struct kvm_memory_slot *memslot)
515{
516 gfn_t cur_gfn, last_gfn;
517 unsigned long address;
518 struct gmap *gmap = kvm->arch.gmap;
519
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400520 /* Loop over all guest pages */
521 last_gfn = memslot->base_gfn + memslot->npages;
522 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
523 address = gfn_to_hva_memslot(memslot, cur_gfn);
524
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100525 if (test_and_clear_guest_dirty(gmap->mm, address))
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400526 mark_page_dirty(kvm, cur_gfn);
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100527 if (fatal_signal_pending(current))
528 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100529 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400530 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400531}
532
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100533/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200534static void sca_del_vcpu(struct kvm_vcpu *vcpu);
535
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100536/*
537 * Get (and clear) the dirty memory log for a memory slot.
538 */
539int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
540 struct kvm_dirty_log *log)
541{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400542 int r;
543 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200544 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400545 struct kvm_memory_slot *memslot;
546 int is_dirty = 0;
547
Janosch Franke1e8a962017-02-02 16:39:31 +0100548 if (kvm_is_ucontrol(kvm))
549 return -EINVAL;
550
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400551 mutex_lock(&kvm->slots_lock);
552
553 r = -EINVAL;
554 if (log->slot >= KVM_USER_MEM_SLOTS)
555 goto out;
556
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200557 slots = kvm_memslots(kvm);
558 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400559 r = -ENOENT;
560 if (!memslot->dirty_bitmap)
561 goto out;
562
563 kvm_s390_sync_dirty_log(kvm, memslot);
564 r = kvm_get_dirty_log(kvm, log, &is_dirty);
565 if (r)
566 goto out;
567
568 /* Clear the dirty log */
569 if (is_dirty) {
570 n = kvm_dirty_bitmap_bytes(memslot);
571 memset(memslot->dirty_bitmap, 0, n);
572 }
573 r = 0;
574out:
575 mutex_unlock(&kvm->slots_lock);
576 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100577}
578
David Hildenbrand6502a342016-06-21 14:19:51 +0200579static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
580{
581 unsigned int i;
582 struct kvm_vcpu *vcpu;
583
584 kvm_for_each_vcpu(i, vcpu, kvm) {
585 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
586 }
587}
588
Cornelia Huckd938dc52013-10-23 18:26:34 +0200589static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
590{
591 int r;
592
593 if (cap->flags)
594 return -EINVAL;
595
596 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200597 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200598 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200599 kvm->arch.use_irqchip = 1;
600 r = 0;
601 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200602 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200603 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200604 kvm->arch.user_sigp = 1;
605 r = 0;
606 break;
Eric Farman68c55752014-06-09 10:57:26 -0400607 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100608 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200609 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100610 r = -EBUSY;
611 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100612 set_kvm_facility(kvm->arch.model.fac_mask, 129);
613 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200614 if (test_facility(134)) {
615 set_kvm_facility(kvm->arch.model.fac_mask, 134);
616 set_kvm_facility(kvm->arch.model.fac_list, 134);
617 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100618 if (test_facility(135)) {
619 set_kvm_facility(kvm->arch.model.fac_mask, 135);
620 set_kvm_facility(kvm->arch.model.fac_list, 135);
621 }
Michael Mueller18280d82015-03-16 16:05:41 +0100622 r = 0;
623 } else
624 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100625 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200626 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
627 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400628 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800629 case KVM_CAP_S390_RI:
630 r = -EINVAL;
631 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200632 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800633 r = -EBUSY;
634 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100635 set_kvm_facility(kvm->arch.model.fac_mask, 64);
636 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800637 r = 0;
638 }
639 mutex_unlock(&kvm->lock);
640 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
641 r ? "(not available)" : "(success)");
642 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100643 case KVM_CAP_S390_AIS:
644 mutex_lock(&kvm->lock);
645 if (kvm->created_vcpus) {
646 r = -EBUSY;
647 } else {
648 set_kvm_facility(kvm->arch.model.fac_mask, 72);
649 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100650 r = 0;
651 }
652 mutex_unlock(&kvm->lock);
653 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
654 r ? "(not available)" : "(success)");
655 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100656 case KVM_CAP_S390_GS:
657 r = -EINVAL;
658 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100659 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100660 r = -EBUSY;
661 } else if (test_facility(133)) {
662 set_kvm_facility(kvm->arch.model.fac_mask, 133);
663 set_kvm_facility(kvm->arch.model.fac_list, 133);
664 r = 0;
665 }
666 mutex_unlock(&kvm->lock);
667 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
668 r ? "(not available)" : "(success)");
669 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100670 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200671 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100672 kvm->arch.user_stsi = 1;
673 r = 0;
674 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200675 case KVM_CAP_S390_USER_INSTR0:
676 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
677 kvm->arch.user_instr0 = 1;
678 icpt_operexc_on_all_vcpus(kvm);
679 r = 0;
680 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200681 default:
682 r = -EINVAL;
683 break;
684 }
685 return r;
686}
687
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100688static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
689{
690 int ret;
691
692 switch (attr->attr) {
693 case KVM_S390_VM_MEM_LIMIT_SIZE:
694 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200695 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100696 kvm->arch.mem_limit);
697 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100698 ret = -EFAULT;
699 break;
700 default:
701 ret = -ENXIO;
702 break;
703 }
704 return ret;
705}
706
707static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200708{
709 int ret;
710 unsigned int idx;
711 switch (attr->attr) {
712 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100713 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100714 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200715 break;
716
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200717 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200718 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200719 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200720 if (!kvm->created_vcpus) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200721 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100722 /* Not compatible with cmma. */
723 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200724 ret = 0;
725 }
726 mutex_unlock(&kvm->lock);
727 break;
728 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100729 ret = -ENXIO;
730 if (!sclp.has_cmma)
731 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200732 ret = -EINVAL;
733 if (!kvm->arch.use_cmma)
734 break;
735
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200736 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200737 mutex_lock(&kvm->lock);
738 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200739 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200740 srcu_read_unlock(&kvm->srcu, idx);
741 mutex_unlock(&kvm->lock);
742 ret = 0;
743 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100744 case KVM_S390_VM_MEM_LIMIT_SIZE: {
745 unsigned long new_limit;
746
747 if (kvm_is_ucontrol(kvm))
748 return -EINVAL;
749
750 if (get_user(new_limit, (u64 __user *)attr->addr))
751 return -EFAULT;
752
Dominik Dingela3a92c32014-12-01 17:24:42 +0100753 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
754 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100755 return -E2BIG;
756
Dominik Dingela3a92c32014-12-01 17:24:42 +0100757 if (!new_limit)
758 return -EINVAL;
759
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100760 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100761 if (new_limit != KVM_S390_NO_MEM_LIMIT)
762 new_limit -= 1;
763
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100764 ret = -EBUSY;
765 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200766 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100767 /* gmap_create will round the limit up */
768 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100769
770 if (!new) {
771 ret = -ENOMEM;
772 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100773 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100774 new->private = kvm;
775 kvm->arch.gmap = new;
776 ret = 0;
777 }
778 }
779 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100780 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
781 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
782 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100783 break;
784 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200785 default:
786 ret = -ENXIO;
787 break;
788 }
789 return ret;
790}
791
Tony Krowiaka374e892014-09-03 10:13:53 +0200792static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
793
Tony Krowiak20c922f2018-04-22 11:37:03 -0400794void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200795{
796 struct kvm_vcpu *vcpu;
797 int i;
798
Tony Krowiak20c922f2018-04-22 11:37:03 -0400799 kvm_s390_vcpu_block_all(kvm);
800
801 kvm_for_each_vcpu(i, vcpu, kvm)
802 kvm_s390_vcpu_crypto_setup(vcpu);
803
804 kvm_s390_vcpu_unblock_all(kvm);
805}
806
807static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
808{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100809 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200810 return -EINVAL;
811
812 mutex_lock(&kvm->lock);
813 switch (attr->attr) {
814 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
815 get_random_bytes(
816 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
817 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
818 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200819 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200820 break;
821 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
822 get_random_bytes(
823 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
824 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
825 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200826 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200827 break;
828 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
829 kvm->arch.crypto.aes_kw = 0;
830 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
831 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200832 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200833 break;
834 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
835 kvm->arch.crypto.dea_kw = 0;
836 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
837 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200838 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200839 break;
840 default:
841 mutex_unlock(&kvm->lock);
842 return -ENXIO;
843 }
844
Tony Krowiak20c922f2018-04-22 11:37:03 -0400845 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200846 mutex_unlock(&kvm->lock);
847 return 0;
848}
849
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200850static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
851{
852 int cx;
853 struct kvm_vcpu *vcpu;
854
855 kvm_for_each_vcpu(cx, vcpu, kvm)
856 kvm_s390_sync_request(req, vcpu);
857}
858
859/*
860 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100861 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200862 */
863static int kvm_s390_vm_start_migration(struct kvm *kvm)
864{
865 struct kvm_s390_migration_state *mgs;
866 struct kvm_memory_slot *ms;
867 /* should be the only one */
868 struct kvm_memslots *slots;
869 unsigned long ram_pages;
870 int slotnr;
871
872 /* migration mode already enabled */
873 if (kvm->arch.migration_state)
874 return 0;
875
876 slots = kvm_memslots(kvm);
877 if (!slots || !slots->used_slots)
878 return -EINVAL;
879
880 mgs = kzalloc(sizeof(*mgs), GFP_KERNEL);
881 if (!mgs)
882 return -ENOMEM;
883 kvm->arch.migration_state = mgs;
884
885 if (kvm->arch.use_cmma) {
886 /*
Christian Borntraeger32aa1442017-12-15 13:14:31 +0100887 * Get the first slot. They are reverse sorted by base_gfn, so
888 * the first slot is also the one at the end of the address
889 * space. We have verified above that at least one slot is
890 * present.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200891 */
Christian Borntraeger32aa1442017-12-15 13:14:31 +0100892 ms = slots->memslots;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200893 /* round up so we only use full longs */
894 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
895 /* allocate enough bytes to store all the bits */
896 mgs->pgste_bitmap = vmalloc(ram_pages / 8);
897 if (!mgs->pgste_bitmap) {
898 kfree(mgs);
899 kvm->arch.migration_state = NULL;
900 return -ENOMEM;
901 }
902
903 mgs->bitmap_size = ram_pages;
904 atomic64_set(&mgs->dirty_pages, ram_pages);
905 /* mark all the pages in active slots as dirty */
906 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
907 ms = slots->memslots + slotnr;
908 bitmap_set(mgs->pgste_bitmap, ms->base_gfn, ms->npages);
909 }
910
911 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
912 }
913 return 0;
914}
915
916/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100917 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200918 * kvm_s390_vm_start_migration.
919 */
920static int kvm_s390_vm_stop_migration(struct kvm *kvm)
921{
922 struct kvm_s390_migration_state *mgs;
923
924 /* migration mode already disabled */
925 if (!kvm->arch.migration_state)
926 return 0;
927 mgs = kvm->arch.migration_state;
928 kvm->arch.migration_state = NULL;
929
930 if (kvm->arch.use_cmma) {
931 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100932 /* We have to wait for the essa emulation to finish */
933 synchronize_srcu(&kvm->srcu);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200934 vfree(mgs->pgste_bitmap);
935 }
936 kfree(mgs);
937 return 0;
938}
939
940static int kvm_s390_vm_set_migration(struct kvm *kvm,
941 struct kvm_device_attr *attr)
942{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100943 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200944
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100945 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200946 switch (attr->attr) {
947 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200948 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200949 break;
950 case KVM_S390_VM_MIGRATION_STOP:
951 res = kvm_s390_vm_stop_migration(kvm);
952 break;
953 default:
954 break;
955 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100956 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200957
958 return res;
959}
960
961static int kvm_s390_vm_get_migration(struct kvm *kvm,
962 struct kvm_device_attr *attr)
963{
964 u64 mig = (kvm->arch.migration_state != NULL);
965
966 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
967 return -ENXIO;
968
969 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
970 return -EFAULT;
971 return 0;
972}
973
Collin L. Walling8fa16962016-07-26 15:29:44 -0400974static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
975{
976 struct kvm_s390_vm_tod_clock gtod;
977
978 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
979 return -EFAULT;
980
David Hildenbrand0e7def52018-02-07 12:46:43 +0100981 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -0400982 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +0100983 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -0400984
985 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
986 gtod.epoch_idx, gtod.tod);
987
988 return 0;
989}
990
Jason J. Herne72f25022014-11-25 09:46:02 -0500991static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
992{
993 u8 gtod_high;
994
995 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
996 sizeof(gtod_high)))
997 return -EFAULT;
998
999 if (gtod_high != 0)
1000 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001001 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001002
1003 return 0;
1004}
1005
1006static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1007{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001008 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001009
David Hildenbrand0e7def52018-02-07 12:46:43 +01001010 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1011 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001012 return -EFAULT;
1013
David Hildenbrand0e7def52018-02-07 12:46:43 +01001014 kvm_s390_set_tod_clock(kvm, &gtod);
1015 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001016 return 0;
1017}
1018
1019static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1020{
1021 int ret;
1022
1023 if (attr->flags)
1024 return -EINVAL;
1025
1026 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001027 case KVM_S390_VM_TOD_EXT:
1028 ret = kvm_s390_set_tod_ext(kvm, attr);
1029 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001030 case KVM_S390_VM_TOD_HIGH:
1031 ret = kvm_s390_set_tod_high(kvm, attr);
1032 break;
1033 case KVM_S390_VM_TOD_LOW:
1034 ret = kvm_s390_set_tod_low(kvm, attr);
1035 break;
1036 default:
1037 ret = -ENXIO;
1038 break;
1039 }
1040 return ret;
1041}
1042
Collin L. Walling8fa16962016-07-26 15:29:44 -04001043static void kvm_s390_get_tod_clock_ext(struct kvm *kvm,
1044 struct kvm_s390_vm_tod_clock *gtod)
1045{
1046 struct kvm_s390_tod_clock_ext htod;
1047
1048 preempt_disable();
1049
1050 get_tod_clock_ext((char *)&htod);
1051
1052 gtod->tod = htod.tod + kvm->arch.epoch;
1053 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1054
1055 if (gtod->tod < htod.tod)
1056 gtod->epoch_idx += 1;
1057
1058 preempt_enable();
1059}
1060
1061static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1062{
1063 struct kvm_s390_vm_tod_clock gtod;
1064
1065 memset(&gtod, 0, sizeof(gtod));
1066
1067 if (test_kvm_facility(kvm, 139))
1068 kvm_s390_get_tod_clock_ext(kvm, &gtod);
1069 else
1070 gtod.tod = kvm_s390_get_tod_clock_fast(kvm);
1071
1072 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1073 return -EFAULT;
1074
1075 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1076 gtod.epoch_idx, gtod.tod);
1077 return 0;
1078}
1079
Jason J. Herne72f25022014-11-25 09:46:02 -05001080static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1081{
1082 u8 gtod_high = 0;
1083
1084 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1085 sizeof(gtod_high)))
1086 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001087 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001088
1089 return 0;
1090}
1091
1092static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1093{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001094 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001095
David Hildenbrand60417fc2015-09-29 16:20:36 +02001096 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001097 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1098 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001099 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001100
1101 return 0;
1102}
1103
1104static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1105{
1106 int ret;
1107
1108 if (attr->flags)
1109 return -EINVAL;
1110
1111 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001112 case KVM_S390_VM_TOD_EXT:
1113 ret = kvm_s390_get_tod_ext(kvm, attr);
1114 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001115 case KVM_S390_VM_TOD_HIGH:
1116 ret = kvm_s390_get_tod_high(kvm, attr);
1117 break;
1118 case KVM_S390_VM_TOD_LOW:
1119 ret = kvm_s390_get_tod_low(kvm, attr);
1120 break;
1121 default:
1122 ret = -ENXIO;
1123 break;
1124 }
1125 return ret;
1126}
1127
Michael Mueller658b6ed2015-02-02 15:49:35 +01001128static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1129{
1130 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001131 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001132 int ret = 0;
1133
1134 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001135 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001136 ret = -EBUSY;
1137 goto out;
1138 }
1139 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1140 if (!proc) {
1141 ret = -ENOMEM;
1142 goto out;
1143 }
1144 if (!copy_from_user(proc, (void __user *)attr->addr,
1145 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001146 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001147 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1148 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001149 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001150 if (proc->ibc > unblocked_ibc)
1151 kvm->arch.model.ibc = unblocked_ibc;
1152 else if (proc->ibc < lowest_ibc)
1153 kvm->arch.model.ibc = lowest_ibc;
1154 else
1155 kvm->arch.model.ibc = proc->ibc;
1156 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001157 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001158 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001159 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1160 kvm->arch.model.ibc,
1161 kvm->arch.model.cpuid);
1162 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1163 kvm->arch.model.fac_list[0],
1164 kvm->arch.model.fac_list[1],
1165 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001166 } else
1167 ret = -EFAULT;
1168 kfree(proc);
1169out:
1170 mutex_unlock(&kvm->lock);
1171 return ret;
1172}
1173
David Hildenbrand15c97052015-03-19 17:36:43 +01001174static int kvm_s390_set_processor_feat(struct kvm *kvm,
1175 struct kvm_device_attr *attr)
1176{
1177 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001178
1179 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1180 return -EFAULT;
1181 if (!bitmap_subset((unsigned long *) data.feat,
1182 kvm_s390_available_cpu_feat,
1183 KVM_S390_VM_CPU_FEAT_NR_BITS))
1184 return -EINVAL;
1185
1186 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001187 if (kvm->created_vcpus) {
1188 mutex_unlock(&kvm->lock);
1189 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001190 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001191 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1192 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001193 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001194 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1195 data.feat[0],
1196 data.feat[1],
1197 data.feat[2]);
1198 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001199}
1200
David Hildenbrand0a763c72016-05-18 16:03:47 +02001201static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1202 struct kvm_device_attr *attr)
1203{
1204 /*
1205 * Once supported by kernel + hw, we have to store the subfunctions
1206 * in kvm->arch and remember that user space configured them.
1207 */
1208 return -ENXIO;
1209}
1210
Michael Mueller658b6ed2015-02-02 15:49:35 +01001211static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1212{
1213 int ret = -ENXIO;
1214
1215 switch (attr->attr) {
1216 case KVM_S390_VM_CPU_PROCESSOR:
1217 ret = kvm_s390_set_processor(kvm, attr);
1218 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001219 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1220 ret = kvm_s390_set_processor_feat(kvm, attr);
1221 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001222 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1223 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1224 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001225 }
1226 return ret;
1227}
1228
1229static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1230{
1231 struct kvm_s390_vm_cpu_processor *proc;
1232 int ret = 0;
1233
1234 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1235 if (!proc) {
1236 ret = -ENOMEM;
1237 goto out;
1238 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001239 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001240 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001241 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1242 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001243 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1244 kvm->arch.model.ibc,
1245 kvm->arch.model.cpuid);
1246 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1247 kvm->arch.model.fac_list[0],
1248 kvm->arch.model.fac_list[1],
1249 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001250 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1251 ret = -EFAULT;
1252 kfree(proc);
1253out:
1254 return ret;
1255}
1256
1257static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1258{
1259 struct kvm_s390_vm_cpu_machine *mach;
1260 int ret = 0;
1261
1262 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1263 if (!mach) {
1264 ret = -ENOMEM;
1265 goto out;
1266 }
1267 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001268 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001269 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001270 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001271 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001272 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001273 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1274 kvm->arch.model.ibc,
1275 kvm->arch.model.cpuid);
1276 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1277 mach->fac_mask[0],
1278 mach->fac_mask[1],
1279 mach->fac_mask[2]);
1280 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1281 mach->fac_list[0],
1282 mach->fac_list[1],
1283 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001284 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1285 ret = -EFAULT;
1286 kfree(mach);
1287out:
1288 return ret;
1289}
1290
David Hildenbrand15c97052015-03-19 17:36:43 +01001291static int kvm_s390_get_processor_feat(struct kvm *kvm,
1292 struct kvm_device_attr *attr)
1293{
1294 struct kvm_s390_vm_cpu_feat data;
1295
1296 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1297 KVM_S390_VM_CPU_FEAT_NR_BITS);
1298 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1299 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001300 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1301 data.feat[0],
1302 data.feat[1],
1303 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001304 return 0;
1305}
1306
1307static int kvm_s390_get_machine_feat(struct kvm *kvm,
1308 struct kvm_device_attr *attr)
1309{
1310 struct kvm_s390_vm_cpu_feat data;
1311
1312 bitmap_copy((unsigned long *) data.feat,
1313 kvm_s390_available_cpu_feat,
1314 KVM_S390_VM_CPU_FEAT_NR_BITS);
1315 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1316 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001317 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1318 data.feat[0],
1319 data.feat[1],
1320 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001321 return 0;
1322}
1323
David Hildenbrand0a763c72016-05-18 16:03:47 +02001324static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1325 struct kvm_device_attr *attr)
1326{
1327 /*
1328 * Once we can actually configure subfunctions (kernel + hw support),
1329 * we have to check if they were already set by user space, if so copy
1330 * them from kvm->arch.
1331 */
1332 return -ENXIO;
1333}
1334
1335static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1336 struct kvm_device_attr *attr)
1337{
1338 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1339 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1340 return -EFAULT;
1341 return 0;
1342}
Michael Mueller658b6ed2015-02-02 15:49:35 +01001343static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1344{
1345 int ret = -ENXIO;
1346
1347 switch (attr->attr) {
1348 case KVM_S390_VM_CPU_PROCESSOR:
1349 ret = kvm_s390_get_processor(kvm, attr);
1350 break;
1351 case KVM_S390_VM_CPU_MACHINE:
1352 ret = kvm_s390_get_machine(kvm, attr);
1353 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001354 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1355 ret = kvm_s390_get_processor_feat(kvm, attr);
1356 break;
1357 case KVM_S390_VM_CPU_MACHINE_FEAT:
1358 ret = kvm_s390_get_machine_feat(kvm, attr);
1359 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001360 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1361 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1362 break;
1363 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1364 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1365 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001366 }
1367 return ret;
1368}
1369
Dominik Dingelf2061652014-04-09 13:13:00 +02001370static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1371{
1372 int ret;
1373
1374 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001375 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001376 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001377 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001378 case KVM_S390_VM_TOD:
1379 ret = kvm_s390_set_tod(kvm, attr);
1380 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001381 case KVM_S390_VM_CPU_MODEL:
1382 ret = kvm_s390_set_cpu_model(kvm, attr);
1383 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001384 case KVM_S390_VM_CRYPTO:
1385 ret = kvm_s390_vm_set_crypto(kvm, attr);
1386 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001387 case KVM_S390_VM_MIGRATION:
1388 ret = kvm_s390_vm_set_migration(kvm, attr);
1389 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001390 default:
1391 ret = -ENXIO;
1392 break;
1393 }
1394
1395 return ret;
1396}
1397
1398static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1399{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001400 int ret;
1401
1402 switch (attr->group) {
1403 case KVM_S390_VM_MEM_CTRL:
1404 ret = kvm_s390_get_mem_control(kvm, attr);
1405 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001406 case KVM_S390_VM_TOD:
1407 ret = kvm_s390_get_tod(kvm, attr);
1408 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001409 case KVM_S390_VM_CPU_MODEL:
1410 ret = kvm_s390_get_cpu_model(kvm, attr);
1411 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001412 case KVM_S390_VM_MIGRATION:
1413 ret = kvm_s390_vm_get_migration(kvm, attr);
1414 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001415 default:
1416 ret = -ENXIO;
1417 break;
1418 }
1419
1420 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001421}
1422
1423static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1424{
1425 int ret;
1426
1427 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001428 case KVM_S390_VM_MEM_CTRL:
1429 switch (attr->attr) {
1430 case KVM_S390_VM_MEM_ENABLE_CMMA:
1431 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001432 ret = sclp.has_cmma ? 0 : -ENXIO;
1433 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001434 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001435 ret = 0;
1436 break;
1437 default:
1438 ret = -ENXIO;
1439 break;
1440 }
1441 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001442 case KVM_S390_VM_TOD:
1443 switch (attr->attr) {
1444 case KVM_S390_VM_TOD_LOW:
1445 case KVM_S390_VM_TOD_HIGH:
1446 ret = 0;
1447 break;
1448 default:
1449 ret = -ENXIO;
1450 break;
1451 }
1452 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001453 case KVM_S390_VM_CPU_MODEL:
1454 switch (attr->attr) {
1455 case KVM_S390_VM_CPU_PROCESSOR:
1456 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001457 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1458 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001459 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001460 ret = 0;
1461 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001462 /* configuring subfunctions is not supported yet */
1463 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001464 default:
1465 ret = -ENXIO;
1466 break;
1467 }
1468 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001469 case KVM_S390_VM_CRYPTO:
1470 switch (attr->attr) {
1471 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1472 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1473 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1474 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1475 ret = 0;
1476 break;
1477 default:
1478 ret = -ENXIO;
1479 break;
1480 }
1481 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001482 case KVM_S390_VM_MIGRATION:
1483 ret = 0;
1484 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001485 default:
1486 ret = -ENXIO;
1487 break;
1488 }
1489
1490 return ret;
1491}
1492
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001493static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1494{
1495 uint8_t *keys;
1496 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001497 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001498
1499 if (args->flags != 0)
1500 return -EINVAL;
1501
1502 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001503 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001504 return KVM_S390_GET_SKEYS_NONE;
1505
1506 /* Enforce sane limit on memory allocation */
1507 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1508 return -EINVAL;
1509
Michal Hocko752ade62017-05-08 15:57:27 -07001510 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001511 if (!keys)
1512 return -ENOMEM;
1513
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001514 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001515 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001516 for (i = 0; i < args->count; i++) {
1517 hva = gfn_to_hva(kvm, args->start_gfn + i);
1518 if (kvm_is_error_hva(hva)) {
1519 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001520 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001521 }
1522
David Hildenbrand154c8c12016-05-09 11:22:34 +02001523 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1524 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001525 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001526 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001527 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001528 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001529
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001530 if (!r) {
1531 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1532 sizeof(uint8_t) * args->count);
1533 if (r)
1534 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001535 }
1536
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001537 kvfree(keys);
1538 return r;
1539}
1540
1541static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1542{
1543 uint8_t *keys;
1544 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001545 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001546
1547 if (args->flags != 0)
1548 return -EINVAL;
1549
1550 /* Enforce sane limit on memory allocation */
1551 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1552 return -EINVAL;
1553
Michal Hocko752ade62017-05-08 15:57:27 -07001554 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001555 if (!keys)
1556 return -ENOMEM;
1557
1558 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1559 sizeof(uint8_t) * args->count);
1560 if (r) {
1561 r = -EFAULT;
1562 goto out;
1563 }
1564
1565 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001566 r = s390_enable_skey();
1567 if (r)
1568 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001569
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001570 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001571 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001572 for (i = 0; i < args->count; i++) {
1573 hva = gfn_to_hva(kvm, args->start_gfn + i);
1574 if (kvm_is_error_hva(hva)) {
1575 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001576 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001577 }
1578
1579 /* Lowest order bit is reserved */
1580 if (keys[i] & 0x01) {
1581 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001582 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001583 }
1584
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001585 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001586 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001587 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001588 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001589 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001590 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001591out:
1592 kvfree(keys);
1593 return r;
1594}
1595
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001596/*
1597 * Base address and length must be sent at the start of each block, therefore
1598 * it's cheaper to send some clean data, as long as it's less than the size of
1599 * two longs.
1600 */
1601#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1602/* for consistency */
1603#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1604
1605/*
1606 * This function searches for the next page with dirty CMMA attributes, and
1607 * saves the attributes in the buffer up to either the end of the buffer or
1608 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
1609 * no trailing clean bytes are saved.
1610 * In case no dirty bits were found, or if CMMA was not enabled or used, the
1611 * output buffer will indicate 0 as length.
1612 */
1613static int kvm_s390_get_cmma_bits(struct kvm *kvm,
1614 struct kvm_s390_cmma_log *args)
1615{
1616 struct kvm_s390_migration_state *s = kvm->arch.migration_state;
1617 unsigned long bufsize, hva, pgstev, i, next, cur;
1618 int srcu_idx, peek, r = 0, rr;
1619 u8 *res;
1620
1621 cur = args->start_gfn;
1622 i = next = pgstev = 0;
1623
1624 if (unlikely(!kvm->arch.use_cmma))
1625 return -ENXIO;
1626 /* Invalid/unsupported flags were specified */
1627 if (args->flags & ~KVM_S390_CMMA_PEEK)
1628 return -EINVAL;
1629 /* Migration mode query, and we are not doing a migration */
1630 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
1631 if (!peek && !s)
1632 return -EINVAL;
1633 /* CMMA is disabled or was not used, or the buffer has length zero */
1634 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001635 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001636 memset(args, 0, sizeof(*args));
1637 return 0;
1638 }
1639
1640 if (!peek) {
1641 /* We are not peeking, and there are no dirty pages */
1642 if (!atomic64_read(&s->dirty_pages)) {
1643 memset(args, 0, sizeof(*args));
1644 return 0;
1645 }
1646 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size,
1647 args->start_gfn);
1648 if (cur >= s->bitmap_size) /* nothing found, loop back */
1649 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size, 0);
1650 if (cur >= s->bitmap_size) { /* again! (very unlikely) */
1651 memset(args, 0, sizeof(*args));
1652 return 0;
1653 }
1654 next = find_next_bit(s->pgste_bitmap, s->bitmap_size, cur + 1);
1655 }
1656
1657 res = vmalloc(bufsize);
1658 if (!res)
1659 return -ENOMEM;
1660
1661 args->start_gfn = cur;
1662
1663 down_read(&kvm->mm->mmap_sem);
1664 srcu_idx = srcu_read_lock(&kvm->srcu);
1665 while (i < bufsize) {
1666 hva = gfn_to_hva(kvm, cur);
1667 if (kvm_is_error_hva(hva)) {
1668 r = -EFAULT;
1669 break;
1670 }
1671 /* decrement only if we actually flipped the bit to 0 */
1672 if (!peek && test_and_clear_bit(cur, s->pgste_bitmap))
1673 atomic64_dec(&s->dirty_pages);
1674 r = get_pgste(kvm->mm, hva, &pgstev);
1675 if (r < 0)
1676 pgstev = 0;
1677 /* save the value */
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02001678 res[i++] = (pgstev >> 24) & 0x43;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001679 /*
1680 * if the next bit is too far away, stop.
1681 * if we reached the previous "next", find the next one
1682 */
1683 if (!peek) {
1684 if (next > cur + KVM_S390_MAX_BIT_DISTANCE)
1685 break;
1686 if (cur == next)
1687 next = find_next_bit(s->pgste_bitmap,
1688 s->bitmap_size, cur + 1);
1689 /* reached the end of the bitmap or of the buffer, stop */
1690 if ((next >= s->bitmap_size) ||
1691 (next >= args->start_gfn + bufsize))
1692 break;
1693 }
1694 cur++;
1695 }
1696 srcu_read_unlock(&kvm->srcu, srcu_idx);
1697 up_read(&kvm->mm->mmap_sem);
1698 args->count = i;
1699 args->remaining = s ? atomic64_read(&s->dirty_pages) : 0;
1700
1701 rr = copy_to_user((void __user *)args->values, res, args->count);
1702 if (rr)
1703 r = -EFAULT;
1704
1705 vfree(res);
1706 return r;
1707}
1708
1709/*
1710 * This function sets the CMMA attributes for the given pages. If the input
1711 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001712 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001713 */
1714static int kvm_s390_set_cmma_bits(struct kvm *kvm,
1715 const struct kvm_s390_cmma_log *args)
1716{
1717 unsigned long hva, mask, pgstev, i;
1718 uint8_t *bits;
1719 int srcu_idx, r = 0;
1720
1721 mask = args->mask;
1722
1723 if (!kvm->arch.use_cmma)
1724 return -ENXIO;
1725 /* invalid/unsupported flags */
1726 if (args->flags != 0)
1727 return -EINVAL;
1728 /* Enforce sane limit on memory allocation */
1729 if (args->count > KVM_S390_CMMA_SIZE_MAX)
1730 return -EINVAL;
1731 /* Nothing to do */
1732 if (args->count == 0)
1733 return 0;
1734
1735 bits = vmalloc(sizeof(*bits) * args->count);
1736 if (!bits)
1737 return -ENOMEM;
1738
1739 r = copy_from_user(bits, (void __user *)args->values, args->count);
1740 if (r) {
1741 r = -EFAULT;
1742 goto out;
1743 }
1744
1745 down_read(&kvm->mm->mmap_sem);
1746 srcu_idx = srcu_read_lock(&kvm->srcu);
1747 for (i = 0; i < args->count; i++) {
1748 hva = gfn_to_hva(kvm, args->start_gfn + i);
1749 if (kvm_is_error_hva(hva)) {
1750 r = -EFAULT;
1751 break;
1752 }
1753
1754 pgstev = bits[i];
1755 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02001756 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001757 set_pgste_bits(kvm->mm, hva, mask, pgstev);
1758 }
1759 srcu_read_unlock(&kvm->srcu, srcu_idx);
1760 up_read(&kvm->mm->mmap_sem);
1761
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001762 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001763 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001764 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001765 up_write(&kvm->mm->mmap_sem);
1766 }
1767out:
1768 vfree(bits);
1769 return r;
1770}
1771
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001772long kvm_arch_vm_ioctl(struct file *filp,
1773 unsigned int ioctl, unsigned long arg)
1774{
1775 struct kvm *kvm = filp->private_data;
1776 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001777 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001778 int r;
1779
1780 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001781 case KVM_S390_INTERRUPT: {
1782 struct kvm_s390_interrupt s390int;
1783
1784 r = -EFAULT;
1785 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1786 break;
1787 r = kvm_s390_inject_vm(kvm, &s390int);
1788 break;
1789 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001790 case KVM_ENABLE_CAP: {
1791 struct kvm_enable_cap cap;
1792 r = -EFAULT;
1793 if (copy_from_user(&cap, argp, sizeof(cap)))
1794 break;
1795 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1796 break;
1797 }
Cornelia Huck84223592013-07-15 13:36:01 +02001798 case KVM_CREATE_IRQCHIP: {
1799 struct kvm_irq_routing_entry routing;
1800
1801 r = -EINVAL;
1802 if (kvm->arch.use_irqchip) {
1803 /* Set up dummy routing. */
1804 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001805 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001806 }
1807 break;
1808 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001809 case KVM_SET_DEVICE_ATTR: {
1810 r = -EFAULT;
1811 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1812 break;
1813 r = kvm_s390_vm_set_attr(kvm, &attr);
1814 break;
1815 }
1816 case KVM_GET_DEVICE_ATTR: {
1817 r = -EFAULT;
1818 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1819 break;
1820 r = kvm_s390_vm_get_attr(kvm, &attr);
1821 break;
1822 }
1823 case KVM_HAS_DEVICE_ATTR: {
1824 r = -EFAULT;
1825 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1826 break;
1827 r = kvm_s390_vm_has_attr(kvm, &attr);
1828 break;
1829 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001830 case KVM_S390_GET_SKEYS: {
1831 struct kvm_s390_skeys args;
1832
1833 r = -EFAULT;
1834 if (copy_from_user(&args, argp,
1835 sizeof(struct kvm_s390_skeys)))
1836 break;
1837 r = kvm_s390_get_skeys(kvm, &args);
1838 break;
1839 }
1840 case KVM_S390_SET_SKEYS: {
1841 struct kvm_s390_skeys args;
1842
1843 r = -EFAULT;
1844 if (copy_from_user(&args, argp,
1845 sizeof(struct kvm_s390_skeys)))
1846 break;
1847 r = kvm_s390_set_skeys(kvm, &args);
1848 break;
1849 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001850 case KVM_S390_GET_CMMA_BITS: {
1851 struct kvm_s390_cmma_log args;
1852
1853 r = -EFAULT;
1854 if (copy_from_user(&args, argp, sizeof(args)))
1855 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001856 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001857 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001858 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001859 if (!r) {
1860 r = copy_to_user(argp, &args, sizeof(args));
1861 if (r)
1862 r = -EFAULT;
1863 }
1864 break;
1865 }
1866 case KVM_S390_SET_CMMA_BITS: {
1867 struct kvm_s390_cmma_log args;
1868
1869 r = -EFAULT;
1870 if (copy_from_user(&args, argp, sizeof(args)))
1871 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001872 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001873 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001874 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001875 break;
1876 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001877 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001878 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001879 }
1880
1881 return r;
1882}
1883
Tony Krowiak45c9b472015-01-13 11:33:26 -05001884static int kvm_s390_query_ap_config(u8 *config)
1885{
1886 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001887 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001888
Christian Borntraeger86044c82015-02-26 13:53:47 +01001889 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001890 asm volatile(
1891 "lgr 0,%1\n"
1892 "lgr 2,%2\n"
1893 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001894 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001895 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001896 "1:\n"
1897 EX_TABLE(0b, 1b)
1898 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001899 : "r" (fcn_code), "r" (config)
1900 : "cc", "0", "2", "memory"
1901 );
1902
1903 return cc;
1904}
1905
1906static int kvm_s390_apxa_installed(void)
1907{
1908 u8 config[128];
1909 int cc;
1910
Heiko Carstensa6aacc32015-11-24 14:28:12 +01001911 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05001912 cc = kvm_s390_query_ap_config(config);
1913
1914 if (cc)
1915 pr_err("PQAP(QCI) failed with cc=%d", cc);
1916 else
1917 return config[0] & 0x40;
1918 }
1919
1920 return 0;
1921}
1922
1923static void kvm_s390_set_crycb_format(struct kvm *kvm)
1924{
1925 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1926
1927 if (kvm_s390_apxa_installed())
1928 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1929 else
1930 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1931}
1932
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001933static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01001934{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001935 struct cpuid cpuid;
1936
1937 get_cpu_id(&cpuid);
1938 cpuid.version = 0xff;
1939 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001940}
1941
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001942static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04001943{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001944 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001945 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001946
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001947 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001948 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001949
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001950 /* Enable AES/DEA protected key functions by default */
1951 kvm->arch.crypto.aes_kw = 1;
1952 kvm->arch.crypto.dea_kw = 1;
1953 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1954 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1955 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1956 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04001957}
1958
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001959static void sca_dispose(struct kvm *kvm)
1960{
1961 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001962 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001963 else
1964 free_page((unsigned long)(kvm->arch.sca));
1965 kvm->arch.sca = NULL;
1966}
1967
Carsten Ottee08b9632012-01-04 10:25:20 +01001968int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001969{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001970 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001971 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001972 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001973 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001974
Carsten Ottee08b9632012-01-04 10:25:20 +01001975 rc = -EINVAL;
1976#ifdef CONFIG_KVM_S390_UCONTROL
1977 if (type & ~KVM_VM_S390_UCONTROL)
1978 goto out_err;
1979 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1980 goto out_err;
1981#else
1982 if (type)
1983 goto out_err;
1984#endif
1985
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001986 rc = s390_enable_sie();
1987 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001988 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001989
Carsten Otteb2904112011-10-18 12:27:13 +02001990 rc = -ENOMEM;
1991
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001992 kvm->arch.use_esca = 0; /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001993 if (!sclp.has_64bscao)
1994 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001995 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001996 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001997 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001998 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001999 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002000 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002001 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002002 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002003 kvm->arch.sca = (struct bsca_block *)
2004 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002005 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002006
2007 sprintf(debug_name, "kvm-%u", current->pid);
2008
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002009 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002010 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002011 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002012
Michael Mueller19114be2017-05-30 14:26:02 +02002013 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002014 kvm->arch.sie_page2 =
2015 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2016 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002017 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002018
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002019 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002020
2021 for (i = 0; i < kvm_s390_fac_size(); i++) {
2022 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2023 (kvm_s390_fac_base[i] |
2024 kvm_s390_fac_ext[i]);
2025 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2026 kvm_s390_fac_base[i];
2027 }
Michael Mueller981467c2015-02-24 13:51:04 +01002028
David Hildenbrand19352222017-08-29 16:31:08 +02002029 /* we are always in czam mode - even on pre z14 machines */
2030 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2031 set_kvm_facility(kvm->arch.model.fac_list, 138);
2032 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002033 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2034 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002035 if (MACHINE_HAS_TLB_GUEST) {
2036 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2037 set_kvm_facility(kvm->arch.model.fac_list, 147);
2038 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002039
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002040 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002041 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002042
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002043 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002044
Fei Li51978392017-02-17 17:06:26 +08002045 mutex_init(&kvm->arch.float_int.ais_lock);
2046 kvm->arch.float_int.simm = 0;
2047 kvm->arch.float_int.nimm = 0;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002048 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002049 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2050 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002051 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002052 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002053
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002054 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002055 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002056
Carsten Ottee08b9632012-01-04 10:25:20 +01002057 if (type & KVM_VM_S390_UCONTROL) {
2058 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002059 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002060 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002061 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002062 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002063 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002064 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002065 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002066 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002067 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002068 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002069 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002070 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002071 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002072
2073 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02002074 kvm->arch.use_irqchip = 0;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002075 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002076 kvm->arch.use_skf = sclp.has_skey;
Jason J. Herne72f25022014-11-25 09:46:02 -05002077 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002078
David Hildenbrand8ad35752014-03-14 11:00:21 +01002079 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002080 kvm_s390_vsie_init(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002081 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002082 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002083
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002084 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002085out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002086 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002087 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002088 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002089 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002090 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002091}
2092
Luiz Capitulino235539b2016-09-07 14:47:23 -04002093bool kvm_arch_has_vcpu_debugfs(void)
2094{
2095 return false;
2096}
2097
2098int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2099{
2100 return 0;
2101}
2102
Christian Borntraegerd329c032008-11-26 14:50:27 +01002103void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2104{
2105 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002106 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002107 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002108 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002109 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002110 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002111
2112 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002113 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002114
Dominik Dingele6db1d62015-05-07 15:41:57 +02002115 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002116 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002117 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002118
Christian Borntraeger6692cef2008-11-26 14:51:08 +01002119 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02002120 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002121}
2122
2123static void kvm_free_vcpus(struct kvm *kvm)
2124{
2125 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002126 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002127
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002128 kvm_for_each_vcpu(i, vcpu, kvm)
2129 kvm_arch_vcpu_destroy(vcpu);
2130
2131 mutex_lock(&kvm->lock);
2132 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2133 kvm->vcpus[i] = NULL;
2134
2135 atomic_set(&kvm->online_vcpus, 0);
2136 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002137}
2138
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002139void kvm_arch_destroy_vm(struct kvm *kvm)
2140{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002141 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002142 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002143 debug_unregister(kvm->arch.dbf);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002144 kvm_s390_gisa_destroy(kvm);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002145 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002146 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002147 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002148 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002149 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002150 kvm_s390_vsie_destroy(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02002151 if (kvm->arch.migration_state) {
2152 vfree(kvm->arch.migration_state->pgste_bitmap);
2153 kfree(kvm->arch.migration_state);
2154 }
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002155 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002156}
2157
2158/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002159static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2160{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002161 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002162 if (!vcpu->arch.gmap)
2163 return -ENOMEM;
2164 vcpu->arch.gmap->private = vcpu->kvm;
2165
2166 return 0;
2167}
2168
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002169static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2170{
David Hildenbranda6940672016-08-08 22:39:32 +02002171 if (!kvm_s390_use_sca_entries())
2172 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002173 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002174 if (vcpu->kvm->arch.use_esca) {
2175 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002176
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002177 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002178 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002179 } else {
2180 struct bsca_block *sca = vcpu->kvm->arch.sca;
2181
2182 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002183 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002184 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002185 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002186}
2187
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002188static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002189{
David Hildenbranda6940672016-08-08 22:39:32 +02002190 if (!kvm_s390_use_sca_entries()) {
2191 struct bsca_block *sca = vcpu->kvm->arch.sca;
2192
2193 /* we still need the basic sca for the ipte control */
2194 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2195 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002196 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002197 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002198 read_lock(&vcpu->kvm->arch.sca_lock);
2199 if (vcpu->kvm->arch.use_esca) {
2200 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002201
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002202 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002203 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2204 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002205 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002206 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002207 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002208 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002209
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002210 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002211 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2212 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002213 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002214 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002215 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002216}
2217
2218/* Basic SCA to Extended SCA data copy routines */
2219static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2220{
2221 d->sda = s->sda;
2222 d->sigp_ctrl.c = s->sigp_ctrl.c;
2223 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2224}
2225
2226static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2227{
2228 int i;
2229
2230 d->ipte_control = s->ipte_control;
2231 d->mcn[0] = s->mcn;
2232 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2233 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2234}
2235
2236static int sca_switch_to_extended(struct kvm *kvm)
2237{
2238 struct bsca_block *old_sca = kvm->arch.sca;
2239 struct esca_block *new_sca;
2240 struct kvm_vcpu *vcpu;
2241 unsigned int vcpu_idx;
2242 u32 scaol, scaoh;
2243
2244 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2245 if (!new_sca)
2246 return -ENOMEM;
2247
2248 scaoh = (u32)((u64)(new_sca) >> 32);
2249 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2250
2251 kvm_s390_vcpu_block_all(kvm);
2252 write_lock(&kvm->arch.sca_lock);
2253
2254 sca_copy_b_to_e(new_sca, old_sca);
2255
2256 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2257 vcpu->arch.sie_block->scaoh = scaoh;
2258 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002259 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002260 }
2261 kvm->arch.sca = new_sca;
2262 kvm->arch.use_esca = 1;
2263
2264 write_unlock(&kvm->arch.sca_lock);
2265 kvm_s390_vcpu_unblock_all(kvm);
2266
2267 free_page((unsigned long)old_sca);
2268
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002269 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2270 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002271 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002272}
2273
2274static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2275{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002276 int rc;
2277
David Hildenbranda6940672016-08-08 22:39:32 +02002278 if (!kvm_s390_use_sca_entries()) {
2279 if (id < KVM_MAX_VCPUS)
2280 return true;
2281 return false;
2282 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002283 if (id < KVM_S390_BSCA_CPU_SLOTS)
2284 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002285 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002286 return false;
2287
2288 mutex_lock(&kvm->lock);
2289 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2290 mutex_unlock(&kvm->lock);
2291
2292 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002293}
2294
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002295int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2296{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002297 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2298 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002299 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2300 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002301 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002302 KVM_SYNC_CRS |
2303 KVM_SYNC_ARCH0 |
2304 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002305 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002306 if (test_kvm_facility(vcpu->kvm, 64))
2307 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002308 if (test_kvm_facility(vcpu->kvm, 82))
2309 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002310 if (test_kvm_facility(vcpu->kvm, 133))
2311 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002312 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2313 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2314 */
2315 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002316 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002317 else
2318 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002319
2320 if (kvm_is_ucontrol(vcpu->kvm))
2321 return __kvm_ucontrol_vcpu_init(vcpu);
2322
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002323 return 0;
2324}
2325
David Hildenbranddb0758b2016-02-15 09:42:25 +01002326/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2327static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2328{
2329 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002330 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002331 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002332 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002333}
2334
2335/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2336static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2337{
2338 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002339 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002340 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2341 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002342 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002343}
2344
2345/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2346static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2347{
2348 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2349 vcpu->arch.cputm_enabled = true;
2350 __start_cpu_timer_accounting(vcpu);
2351}
2352
2353/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2354static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2355{
2356 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2357 __stop_cpu_timer_accounting(vcpu);
2358 vcpu->arch.cputm_enabled = false;
2359}
2360
2361static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2362{
2363 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2364 __enable_cpu_timer_accounting(vcpu);
2365 preempt_enable();
2366}
2367
2368static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2369{
2370 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2371 __disable_cpu_timer_accounting(vcpu);
2372 preempt_enable();
2373}
2374
David Hildenbrand4287f242016-02-15 09:40:12 +01002375/* set the cpu timer - may only be called from the VCPU thread itself */
2376void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2377{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002378 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002379 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002380 if (vcpu->arch.cputm_enabled)
2381 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002382 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002383 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002384 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002385}
2386
David Hildenbranddb0758b2016-02-15 09:42:25 +01002387/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002388__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2389{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002390 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002391 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002392
2393 if (unlikely(!vcpu->arch.cputm_enabled))
2394 return vcpu->arch.sie_block->cputm;
2395
David Hildenbrand9c23a132016-02-17 21:53:33 +01002396 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2397 do {
2398 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2399 /*
2400 * If the writer would ever execute a read in the critical
2401 * section, e.g. in irq context, we have a deadlock.
2402 */
2403 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2404 value = vcpu->arch.sie_block->cputm;
2405 /* if cputm_start is 0, accounting is being started/stopped */
2406 if (likely(vcpu->arch.cputm_start))
2407 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2408 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2409 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002410 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002411}
2412
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002413void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2414{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002415
David Hildenbrand37d9df92015-03-11 16:47:33 +01002416 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002417 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002418 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002419 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002420 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002421}
2422
2423void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2424{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002425 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002426 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002427 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01002428 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002429 vcpu->arch.enabled_gmap = gmap_get_enabled();
2430 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002431
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002432}
2433
2434static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2435{
2436 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2437 vcpu->arch.sie_block->gpsw.mask = 0UL;
2438 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002439 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002440 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002441 vcpu->arch.sie_block->ckc = 0UL;
2442 vcpu->arch.sie_block->todpr = 0;
2443 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
David Hildenbrandb9224cd2018-04-30 17:55:24 +02002444 vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 |
2445 CR0_INTERRUPT_KEY_SUBMASK |
2446 CR0_MEASUREMENT_ALERT_SUBMASK;
2447 vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
2448 CR14_UNUSED_33 |
2449 CR14_EXTERNAL_DAMAGE_SUBMASK;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002450 /* make sure the new fpc will be lazily loaded */
2451 save_fpu_regs();
2452 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002453 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002454 vcpu->arch.sie_block->pp = 0;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002455 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002456 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2457 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002458 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2459 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002460 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002461}
2462
Dominik Dingel31928aa2014-12-04 15:47:07 +01002463void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002464{
Jason J. Herne72f25022014-11-25 09:46:02 -05002465 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002466 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002467 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01002468 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02002469 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002470 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002471 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002472 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002473 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002474 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002475 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2476 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002477 /* make vcpu_load load the right gmap on the first trigger */
2478 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002479}
2480
Tony Krowiak5102ee82014-06-27 14:46:01 -04002481static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2482{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002483 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002484 return;
2485
Tony Krowiaka374e892014-09-03 10:13:53 +02002486 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
2487
2488 if (vcpu->kvm->arch.crypto.aes_kw)
2489 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2490 if (vcpu->kvm->arch.crypto.dea_kw)
2491 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
2492
Tony Krowiak5102ee82014-06-27 14:46:01 -04002493 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
2494}
2495
Dominik Dingelb31605c2014-03-25 13:47:11 +01002496void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2497{
2498 free_page(vcpu->arch.sie_block->cbrlo);
2499 vcpu->arch.sie_block->cbrlo = 0;
2500}
2501
2502int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2503{
2504 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2505 if (!vcpu->arch.sie_block->cbrlo)
2506 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002507 return 0;
2508}
2509
Michael Mueller91520f12015-02-27 14:32:11 +01002510static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2511{
2512 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2513
Michael Mueller91520f12015-02-27 14:32:11 +01002514 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002515 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002516 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002517}
2518
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002519int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2520{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002521 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002522
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002523 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2524 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002525 CPUSTAT_STOPPED);
2526
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002527 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002528 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002529 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002530 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002531
Michael Mueller91520f12015-02-27 14:32:11 +01002532 kvm_s390_vcpu_setup_model(vcpu);
2533
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002534 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2535 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002536 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002537 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002538 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002539 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002540 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002541
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002542 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002543 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002544 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002545 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2546 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002547 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002548 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002549 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002550 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002551 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002552 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002553 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002554 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002555 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002556 vcpu->arch.sie_block->eca |= ECA_VX;
2557 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002558 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04002559 if (test_kvm_facility(vcpu->kvm, 139))
2560 vcpu->arch.sie_block->ecd |= ECD_MEF;
2561
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002562 if (vcpu->arch.sie_block->gd) {
2563 vcpu->arch.sie_block->eca |= ECA_AIV;
2564 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
2565 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
2566 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002567 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2568 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002569 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002570
2571 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002572 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05002573 else
2574 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002575
Dominik Dingele6db1d62015-05-07 15:41:57 +02002576 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002577 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2578 if (rc)
2579 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002580 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01002581 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002582 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002583
Tony Krowiak5102ee82014-06-27 14:46:01 -04002584 kvm_s390_vcpu_crypto_setup(vcpu);
2585
Dominik Dingelb31605c2014-03-25 13:47:11 +01002586 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002587}
2588
2589struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2590 unsigned int id)
2591{
Carsten Otte4d475552011-10-18 12:27:12 +02002592 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002593 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02002594 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002595
David Hildenbrand42158252015-10-12 12:57:22 +02002596 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02002597 goto out;
2598
2599 rc = -ENOMEM;
2600
Michael Muellerb110fea2013-06-12 13:54:54 +02002601 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002602 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02002603 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002604
QingFeng Haoda72ca42017-06-07 11:41:19 +02002605 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002606 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2607 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002608 goto out_free_cpu;
2609
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002610 vcpu->arch.sie_block = &sie_page->sie_block;
2611 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2612
David Hildenbrandefed1102015-04-16 12:32:41 +02002613 /* the real guest size will always be smaller than msl */
2614 vcpu->arch.sie_block->mso = 0;
2615 vcpu->arch.sie_block->msl = sclp.hamax;
2616
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002617 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002618 spin_lock_init(&vcpu->arch.local_int.lock);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002619 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa;
Michael Mueller4b9f9522017-06-23 13:51:25 +02002620 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
2621 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002622 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002623
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002624 rc = kvm_vcpu_init(vcpu, kvm, id);
2625 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002626 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002627 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002628 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02002629 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002630
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002631 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08002632out_free_sie_block:
2633 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002634out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02002635 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02002636out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002637 return ERR_PTR(rc);
2638}
2639
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002640int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2641{
David Hildenbrand9a022062014-08-05 17:40:47 +02002642 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002643}
2644
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002645bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
2646{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08002647 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002648}
2649
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002650void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002651{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002652 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002653 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002654}
2655
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002656void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002657{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002658 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002659}
2660
Christian Borntraeger8e236542015-04-09 13:49:04 +02002661static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2662{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002663 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002664 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002665}
2666
2667static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2668{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04002669 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002670}
2671
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002672/*
2673 * Kick a guest cpu out of SIE and wait until SIE is not running.
2674 * If the CPU is not running (e.g. waiting as idle) the function will
2675 * return immediately. */
2676void exit_sie(struct kvm_vcpu *vcpu)
2677{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002678 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002679 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2680 cpu_relax();
2681}
2682
Christian Borntraeger8e236542015-04-09 13:49:04 +02002683/* Kick a guest cpu out of SIE to process a request synchronously */
2684void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002685{
Christian Borntraeger8e236542015-04-09 13:49:04 +02002686 kvm_make_request(req, vcpu);
2687 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002688}
2689
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002690static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2691 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002692{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002693 struct kvm *kvm = gmap->private;
2694 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002695 unsigned long prefix;
2696 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002697
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02002698 if (gmap_is_shadow(gmap))
2699 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002700 if (start >= 1UL << 31)
2701 /* We are only interested in prefix pages */
2702 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002703 kvm_for_each_vcpu(i, vcpu, kvm) {
2704 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002705 prefix = kvm_s390_get_prefix(vcpu);
2706 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2707 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2708 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002709 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002710 }
2711 }
2712}
2713
Christoffer Dallb6d33832012-03-08 16:44:24 -05002714int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2715{
2716 /* kvm common code refers to this, but never calls it */
2717 BUG();
2718 return 0;
2719}
2720
Carsten Otte14eebd92012-05-15 14:15:26 +02002721static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2722 struct kvm_one_reg *reg)
2723{
2724 int r = -EINVAL;
2725
2726 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002727 case KVM_REG_S390_TODPR:
2728 r = put_user(vcpu->arch.sie_block->todpr,
2729 (u32 __user *)reg->addr);
2730 break;
2731 case KVM_REG_S390_EPOCHDIFF:
2732 r = put_user(vcpu->arch.sie_block->epoch,
2733 (u64 __user *)reg->addr);
2734 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002735 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002736 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002737 (u64 __user *)reg->addr);
2738 break;
2739 case KVM_REG_S390_CLOCK_COMP:
2740 r = put_user(vcpu->arch.sie_block->ckc,
2741 (u64 __user *)reg->addr);
2742 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002743 case KVM_REG_S390_PFTOKEN:
2744 r = put_user(vcpu->arch.pfault_token,
2745 (u64 __user *)reg->addr);
2746 break;
2747 case KVM_REG_S390_PFCOMPARE:
2748 r = put_user(vcpu->arch.pfault_compare,
2749 (u64 __user *)reg->addr);
2750 break;
2751 case KVM_REG_S390_PFSELECT:
2752 r = put_user(vcpu->arch.pfault_select,
2753 (u64 __user *)reg->addr);
2754 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002755 case KVM_REG_S390_PP:
2756 r = put_user(vcpu->arch.sie_block->pp,
2757 (u64 __user *)reg->addr);
2758 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002759 case KVM_REG_S390_GBEA:
2760 r = put_user(vcpu->arch.sie_block->gbea,
2761 (u64 __user *)reg->addr);
2762 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002763 default:
2764 break;
2765 }
2766
2767 return r;
2768}
2769
2770static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2771 struct kvm_one_reg *reg)
2772{
2773 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002774 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002775
2776 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002777 case KVM_REG_S390_TODPR:
2778 r = get_user(vcpu->arch.sie_block->todpr,
2779 (u32 __user *)reg->addr);
2780 break;
2781 case KVM_REG_S390_EPOCHDIFF:
2782 r = get_user(vcpu->arch.sie_block->epoch,
2783 (u64 __user *)reg->addr);
2784 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002785 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002786 r = get_user(val, (u64 __user *)reg->addr);
2787 if (!r)
2788 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002789 break;
2790 case KVM_REG_S390_CLOCK_COMP:
2791 r = get_user(vcpu->arch.sie_block->ckc,
2792 (u64 __user *)reg->addr);
2793 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002794 case KVM_REG_S390_PFTOKEN:
2795 r = get_user(vcpu->arch.pfault_token,
2796 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002797 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2798 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002799 break;
2800 case KVM_REG_S390_PFCOMPARE:
2801 r = get_user(vcpu->arch.pfault_compare,
2802 (u64 __user *)reg->addr);
2803 break;
2804 case KVM_REG_S390_PFSELECT:
2805 r = get_user(vcpu->arch.pfault_select,
2806 (u64 __user *)reg->addr);
2807 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002808 case KVM_REG_S390_PP:
2809 r = get_user(vcpu->arch.sie_block->pp,
2810 (u64 __user *)reg->addr);
2811 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002812 case KVM_REG_S390_GBEA:
2813 r = get_user(vcpu->arch.sie_block->gbea,
2814 (u64 __user *)reg->addr);
2815 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002816 default:
2817 break;
2818 }
2819
2820 return r;
2821}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002822
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002823static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2824{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002825 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002826 return 0;
2827}
2828
2829int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2830{
Christoffer Dall875656f2017-12-04 21:35:27 +01002831 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002832 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01002833 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002834 return 0;
2835}
2836
2837int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2838{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01002839 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002840 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01002841 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002842 return 0;
2843}
2844
2845int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2846 struct kvm_sregs *sregs)
2847{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01002848 vcpu_load(vcpu);
2849
Christian Borntraeger59674c12012-01-11 11:20:33 +01002850 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002851 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01002852
2853 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002854 return 0;
2855}
2856
2857int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2858 struct kvm_sregs *sregs)
2859{
Christoffer Dallbcdec412017-12-04 21:35:28 +01002860 vcpu_load(vcpu);
2861
Christian Borntraeger59674c12012-01-11 11:20:33 +01002862 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002863 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01002864
2865 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002866 return 0;
2867}
2868
2869int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2870{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01002871 int ret = 0;
2872
2873 vcpu_load(vcpu);
2874
2875 if (test_fp_ctl(fpu->fpc)) {
2876 ret = -EINVAL;
2877 goto out;
2878 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002879 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002880 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002881 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
2882 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002883 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002884 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01002885
2886out:
2887 vcpu_put(vcpu);
2888 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002889}
2890
2891int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2892{
Christoffer Dall13931232017-12-04 21:35:34 +01002893 vcpu_load(vcpu);
2894
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002895 /* make sure we have the latest values */
2896 save_fpu_regs();
2897 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002898 convert_vx_to_fp((freg_t *) fpu->fprs,
2899 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002900 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002901 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002902 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01002903
2904 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002905 return 0;
2906}
2907
2908static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2909{
2910 int rc = 0;
2911
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02002912 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002913 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002914 else {
2915 vcpu->run->psw_mask = psw.mask;
2916 vcpu->run->psw_addr = psw.addr;
2917 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002918 return rc;
2919}
2920
2921int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2922 struct kvm_translation *tr)
2923{
2924 return -EINVAL; /* not implemented yet */
2925}
2926
David Hildenbrand27291e22014-01-23 12:26:52 +01002927#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2928 KVM_GUESTDBG_USE_HW_BP | \
2929 KVM_GUESTDBG_ENABLE)
2930
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002931int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2932 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002933{
David Hildenbrand27291e22014-01-23 12:26:52 +01002934 int rc = 0;
2935
Christoffer Dall66b56562017-12-04 21:35:33 +01002936 vcpu_load(vcpu);
2937
David Hildenbrand27291e22014-01-23 12:26:52 +01002938 vcpu->guest_debug = 0;
2939 kvm_s390_clear_bp_data(vcpu);
2940
Christoffer Dall66b56562017-12-04 21:35:33 +01002941 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
2942 rc = -EINVAL;
2943 goto out;
2944 }
2945 if (!sclp.has_gpere) {
2946 rc = -EINVAL;
2947 goto out;
2948 }
David Hildenbrand27291e22014-01-23 12:26:52 +01002949
2950 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2951 vcpu->guest_debug = dbg->control;
2952 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002953 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01002954
2955 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2956 rc = kvm_s390_import_bp_data(vcpu, dbg);
2957 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01002958 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01002959 vcpu->arch.guestdbg.last_bp = 0;
2960 }
2961
2962 if (rc) {
2963 vcpu->guest_debug = 0;
2964 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01002965 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01002966 }
2967
Christoffer Dall66b56562017-12-04 21:35:33 +01002968out:
2969 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01002970 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002971}
2972
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002973int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2974 struct kvm_mp_state *mp_state)
2975{
Christoffer Dallfd232562017-12-04 21:35:30 +01002976 int ret;
2977
2978 vcpu_load(vcpu);
2979
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002980 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01002981 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2982 KVM_MP_STATE_OPERATING;
2983
2984 vcpu_put(vcpu);
2985 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002986}
2987
2988int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2989 struct kvm_mp_state *mp_state)
2990{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002991 int rc = 0;
2992
Christoffer Dalle83dff52017-12-04 21:35:31 +01002993 vcpu_load(vcpu);
2994
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002995 /* user space knows about this interface - let it control the state */
2996 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2997
2998 switch (mp_state->mp_state) {
2999 case KVM_MP_STATE_STOPPED:
3000 kvm_s390_vcpu_stop(vcpu);
3001 break;
3002 case KVM_MP_STATE_OPERATING:
3003 kvm_s390_vcpu_start(vcpu);
3004 break;
3005 case KVM_MP_STATE_LOAD:
3006 case KVM_MP_STATE_CHECK_STOP:
3007 /* fall through - CHECK_STOP and LOAD are not supported yet */
3008 default:
3009 rc = -ENXIO;
3010 }
3011
Christoffer Dalle83dff52017-12-04 21:35:31 +01003012 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003013 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003014}
3015
David Hildenbrand8ad35752014-03-14 11:00:21 +01003016static bool ibs_enabled(struct kvm_vcpu *vcpu)
3017{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003018 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003019}
3020
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003021static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3022{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003023retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003024 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003025 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003026 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003027 /*
3028 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003029 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003030 * This ensures that the ipte instruction for this request has
3031 * already finished. We might race against a second unmapper that
3032 * wants to set the blocking bit. Lets just retry the request loop.
3033 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003034 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003035 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003036 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3037 kvm_s390_get_prefix(vcpu),
3038 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003039 if (rc) {
3040 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003041 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003042 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003043 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003044 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003045
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003046 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3047 vcpu->arch.sie_block->ihcpu = 0xffff;
3048 goto retry;
3049 }
3050
David Hildenbrand8ad35752014-03-14 11:00:21 +01003051 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3052 if (!ibs_enabled(vcpu)) {
3053 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003054 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003055 }
3056 goto retry;
3057 }
3058
3059 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3060 if (ibs_enabled(vcpu)) {
3061 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003062 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003063 }
3064 goto retry;
3065 }
3066
David Hildenbrand6502a342016-06-21 14:19:51 +02003067 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3068 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3069 goto retry;
3070 }
3071
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003072 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3073 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003074 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003075 * instruction manually, in order to provide additional
3076 * functionalities needed for live migration.
3077 */
3078 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3079 goto retry;
3080 }
3081
3082 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3083 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003084 * Re-enable CMM virtualization if CMMA is available and
3085 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003086 */
3087 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003088 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003089 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3090 goto retry;
3091 }
3092
David Hildenbrand0759d062014-05-13 16:54:32 +02003093 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003094 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003095
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003096 return 0;
3097}
3098
David Hildenbrand0e7def52018-02-07 12:46:43 +01003099void kvm_s390_set_tod_clock(struct kvm *kvm,
3100 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003101{
3102 struct kvm_vcpu *vcpu;
3103 struct kvm_s390_tod_clock_ext htod;
3104 int i;
3105
3106 mutex_lock(&kvm->lock);
3107 preempt_disable();
3108
3109 get_tod_clock_ext((char *)&htod);
3110
3111 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003112 kvm->arch.epdx = 0;
3113 if (test_kvm_facility(kvm, 139)) {
3114 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3115 if (kvm->arch.epoch > gtod->tod)
3116 kvm->arch.epdx -= 1;
3117 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003118
3119 kvm_s390_vcpu_block_all(kvm);
3120 kvm_for_each_vcpu(i, vcpu, kvm) {
3121 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3122 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3123 }
3124
3125 kvm_s390_vcpu_unblock_all(kvm);
3126 preempt_enable();
3127 mutex_unlock(&kvm->lock);
3128}
3129
Thomas Huthfa576c52014-05-06 17:20:16 +02003130/**
3131 * kvm_arch_fault_in_page - fault-in guest page if necessary
3132 * @vcpu: The corresponding virtual cpu
3133 * @gpa: Guest physical address
3134 * @writable: Whether the page should be writable or not
3135 *
3136 * Make sure that a guest page has been faulted-in on the host.
3137 *
3138 * Return: Zero on success, negative error code otherwise.
3139 */
3140long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003141{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003142 return gmap_fault(vcpu->arch.gmap, gpa,
3143 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003144}
3145
Dominik Dingel3c038e62013-10-07 17:11:48 +02003146static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3147 unsigned long token)
3148{
3149 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003150 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003151
3152 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003153 irq.u.ext.ext_params2 = token;
3154 irq.type = KVM_S390_INT_PFAULT_INIT;
3155 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003156 } else {
3157 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003158 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003159 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3160 }
3161}
3162
3163void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3164 struct kvm_async_pf *work)
3165{
3166 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3167 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3168}
3169
3170void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3171 struct kvm_async_pf *work)
3172{
3173 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3174 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3175}
3176
3177void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3178 struct kvm_async_pf *work)
3179{
3180 /* s390 will always inject the page directly */
3181}
3182
3183bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3184{
3185 /*
3186 * s390 will always inject the page directly,
3187 * but we still want check_async_completion to cleanup
3188 */
3189 return true;
3190}
3191
3192static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3193{
3194 hva_t hva;
3195 struct kvm_arch_async_pf arch;
3196 int rc;
3197
3198 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3199 return 0;
3200 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3201 vcpu->arch.pfault_compare)
3202 return 0;
3203 if (psw_extint_disabled(vcpu))
3204 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003205 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003206 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003207 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003208 return 0;
3209 if (!vcpu->arch.gmap->pfault_enabled)
3210 return 0;
3211
Heiko Carstens81480cc2014-01-01 16:36:07 +01003212 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3213 hva += current->thread.gmap_addr & ~PAGE_MASK;
3214 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003215 return 0;
3216
3217 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3218 return rc;
3219}
3220
Thomas Huth3fb4c402013-09-12 10:33:43 +02003221static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003222{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003223 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003224
Dominik Dingel3c038e62013-10-07 17:11:48 +02003225 /*
3226 * On s390 notifications for arriving pages will be delivered directly
3227 * to the guest but the house keeping for completed pfaults is
3228 * handled outside the worker.
3229 */
3230 kvm_check_async_pf_completion(vcpu);
3231
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003232 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3233 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003234
3235 if (need_resched())
3236 schedule();
3237
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003238 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003239 s390_handle_mcck();
3240
Jens Freimann79395032014-04-17 10:10:30 +02003241 if (!kvm_is_ucontrol(vcpu->kvm)) {
3242 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3243 if (rc)
3244 return rc;
3245 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003246
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003247 rc = kvm_s390_handle_requests(vcpu);
3248 if (rc)
3249 return rc;
3250
David Hildenbrand27291e22014-01-23 12:26:52 +01003251 if (guestdbg_enabled(vcpu)) {
3252 kvm_s390_backup_guest_per_regs(vcpu);
3253 kvm_s390_patch_guest_per_regs(vcpu);
3254 }
3255
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003256 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003257 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3258 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3259 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003260
Thomas Huth3fb4c402013-09-12 10:33:43 +02003261 return 0;
3262}
3263
Thomas Huth492d8642015-02-10 16:11:01 +01003264static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3265{
David Hildenbrand56317922016-01-12 17:37:58 +01003266 struct kvm_s390_pgm_info pgm_info = {
3267 .code = PGM_ADDRESSING,
3268 };
3269 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003270 int rc;
3271
3272 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3273 trace_kvm_s390_sie_fault(vcpu);
3274
3275 /*
3276 * We want to inject an addressing exception, which is defined as a
3277 * suppressing or terminating exception. However, since we came here
3278 * by a DAT access exception, the PSW still points to the faulting
3279 * instruction since DAT exceptions are nullifying. So we've got
3280 * to look up the current opcode to get the length of the instruction
3281 * to be able to forward the PSW.
3282 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003283 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003284 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003285 if (rc < 0) {
3286 return rc;
3287 } else if (rc) {
3288 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3289 * Forward by arbitrary ilc, injection will take care of
3290 * nullification if necessary.
3291 */
3292 pgm_info = vcpu->arch.pgm;
3293 ilen = 4;
3294 }
David Hildenbrand56317922016-01-12 17:37:58 +01003295 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3296 kvm_s390_forward_psw(vcpu, ilen);
3297 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003298}
3299
Thomas Huth3fb4c402013-09-12 10:33:43 +02003300static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3301{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003302 struct mcck_volatile_info *mcck_info;
3303 struct sie_page *sie_page;
3304
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003305 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3306 vcpu->arch.sie_block->icptcode);
3307 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3308
David Hildenbrand27291e22014-01-23 12:26:52 +01003309 if (guestdbg_enabled(vcpu))
3310 kvm_s390_restore_guest_per_regs(vcpu);
3311
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003312 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3313 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003314
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003315 if (exit_reason == -EINTR) {
3316 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3317 sie_page = container_of(vcpu->arch.sie_block,
3318 struct sie_page, sie_block);
3319 mcck_info = &sie_page->mcck_info;
3320 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3321 return 0;
3322 }
3323
David Hildenbrand71f116b2015-10-19 16:24:28 +02003324 if (vcpu->arch.sie_block->icptcode > 0) {
3325 int rc = kvm_handle_sie_intercept(vcpu);
3326
3327 if (rc != -EOPNOTSUPP)
3328 return rc;
3329 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3330 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3331 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3332 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3333 return -EREMOTE;
3334 } else if (exit_reason != -EFAULT) {
3335 vcpu->stat.exit_null++;
3336 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003337 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3338 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3339 vcpu->run->s390_ucontrol.trans_exc_code =
3340 current->thread.gmap_addr;
3341 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003342 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003343 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003344 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003345 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003346 if (kvm_arch_setup_async_pf(vcpu))
3347 return 0;
3348 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003349 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003350 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003351}
3352
3353static int __vcpu_run(struct kvm_vcpu *vcpu)
3354{
3355 int rc, exit_reason;
3356
Thomas Huth800c1062013-09-12 10:33:45 +02003357 /*
3358 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3359 * ning the guest), so that memslots (and other stuff) are protected
3360 */
3361 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3362
Thomas Hutha76ccff2013-09-12 10:33:44 +02003363 do {
3364 rc = vcpu_pre_run(vcpu);
3365 if (rc)
3366 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003367
Thomas Huth800c1062013-09-12 10:33:45 +02003368 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003369 /*
3370 * As PF_VCPU will be used in fault handler, between
3371 * guest_enter and guest_exit should be no uaccess.
3372 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003373 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003374 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003375 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003376 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003377 exit_reason = sie64a(vcpu->arch.sie_block,
3378 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003379 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003380 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003381 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003382 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003383 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003384
Thomas Hutha76ccff2013-09-12 10:33:44 +02003385 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003386 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003387
Thomas Huth800c1062013-09-12 10:33:45 +02003388 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003389 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003390}
3391
David Hildenbrandb028ee32014-07-17 10:47:43 +02003392static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3393{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003394 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003395 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003396
3397 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003398 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003399 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3400 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3401 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3402 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3403 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3404 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003405 /* some control register changes require a tlb flush */
3406 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003407 }
3408 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003409 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003410 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3411 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3412 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3413 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3414 }
3415 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3416 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3417 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3418 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003419 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3420 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003421 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003422 /*
3423 * If userspace sets the riccb (e.g. after migration) to a valid state,
3424 * we should enable RI here instead of doing the lazy enablement.
3425 */
3426 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003427 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003428 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003429 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003430 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003431 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003432 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003433 /*
3434 * If userspace sets the gscb (e.g. after migration) to non-zero,
3435 * we should enable GS here instead of doing the lazy enablement.
3436 */
3437 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3438 test_kvm_facility(vcpu->kvm, 133) &&
3439 gscb->gssm &&
3440 !vcpu->arch.gs_enabled) {
3441 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3442 vcpu->arch.sie_block->ecb |= ECB_GS;
3443 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3444 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003445 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003446 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3447 test_kvm_facility(vcpu->kvm, 82)) {
3448 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3449 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3450 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003451 save_access_regs(vcpu->arch.host_acrs);
3452 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003453 /* save host (userspace) fprs/vrs */
3454 save_fpu_regs();
3455 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3456 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3457 if (MACHINE_HAS_VX)
3458 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3459 else
3460 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3461 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3462 if (test_fp_ctl(current->thread.fpu.fpc))
3463 /* User space provided an invalid FPC, let's clear it */
3464 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003465 if (MACHINE_HAS_GS) {
3466 preempt_disable();
3467 __ctl_set_bit(2, 4);
3468 if (current->thread.gs_cb) {
3469 vcpu->arch.host_gscb = current->thread.gs_cb;
3470 save_gs_cb(vcpu->arch.host_gscb);
3471 }
3472 if (vcpu->arch.gs_enabled) {
3473 current->thread.gs_cb = (struct gs_cb *)
3474 &vcpu->run->s.regs.gscb;
3475 restore_gs_cb(current->thread.gs_cb);
3476 }
3477 preempt_enable();
3478 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003479
David Hildenbrandb028ee32014-07-17 10:47:43 +02003480 kvm_run->kvm_dirty_regs = 0;
3481}
3482
3483static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3484{
3485 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3486 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3487 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3488 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003489 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003490 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3491 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3492 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3493 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3494 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3495 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3496 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003497 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003498 save_access_regs(vcpu->run->s.regs.acrs);
3499 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003500 /* Save guest register state */
3501 save_fpu_regs();
3502 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3503 /* Restore will be done lazily at return */
3504 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3505 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003506 if (MACHINE_HAS_GS) {
3507 __ctl_set_bit(2, 4);
3508 if (vcpu->arch.gs_enabled)
3509 save_gs_cb(current->thread.gs_cb);
3510 preempt_disable();
3511 current->thread.gs_cb = vcpu->arch.host_gscb;
3512 restore_gs_cb(vcpu->arch.host_gscb);
3513 preempt_enable();
3514 if (!vcpu->arch.host_gscb)
3515 __ctl_clear_bit(2, 4);
3516 vcpu->arch.host_gscb = NULL;
3517 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003518
David Hildenbrandb028ee32014-07-17 10:47:43 +02003519}
3520
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003521int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3522{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003523 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003524
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003525 if (kvm_run->immediate_exit)
3526 return -EINTR;
3527
Christoffer Dallaccb7572017-12-04 21:35:25 +01003528 vcpu_load(vcpu);
3529
David Hildenbrand27291e22014-01-23 12:26:52 +01003530 if (guestdbg_exit_pending(vcpu)) {
3531 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01003532 rc = 0;
3533 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01003534 }
3535
Jan H. Schönherr20b70352017-11-24 22:39:01 +01003536 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003537
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003538 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3539 kvm_s390_vcpu_start(vcpu);
3540 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003541 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003542 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01003543 rc = -EINVAL;
3544 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003545 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003546
David Hildenbrandb028ee32014-07-17 10:47:43 +02003547 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003548 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003549
Heiko Carstensdab4079d2009-06-12 10:26:32 +02003550 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003551 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02003552
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003553 if (signal_pending(current) && !rc) {
3554 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003555 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003556 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003557
David Hildenbrand27291e22014-01-23 12:26:52 +01003558 if (guestdbg_exit_pending(vcpu) && !rc) {
3559 kvm_s390_prepare_debug_exit(vcpu);
3560 rc = 0;
3561 }
3562
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003563 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02003564 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003565 rc = 0;
3566 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003567
David Hildenbranddb0758b2016-02-15 09:42:25 +01003568 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003569 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003570
Jan H. Schönherr20b70352017-11-24 22:39:01 +01003571 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003572
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003573 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01003574out:
3575 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02003576 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003577}
3578
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003579/*
3580 * store status at address
3581 * we use have two special cases:
3582 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3583 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3584 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01003585int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003586{
Carsten Otte092670c2011-07-24 10:48:22 +02003587 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003588 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02003589 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01003590 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003591 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003592
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003593 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01003594 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
3595 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003596 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003597 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003598 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
3599 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003600 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003601 gpa = px;
3602 } else
3603 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003604
3605 /* manually convert vector registers if necessary */
3606 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01003607 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003608 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3609 fprs, 128);
3610 } else {
3611 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01003612 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003613 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003614 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003615 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003616 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003617 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003618 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02003619 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003620 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003621 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003622 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003623 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01003624 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003625 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01003626 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01003627 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003628 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003629 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003630 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003631 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003632 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003633 &vcpu->arch.sie_block->gcr, 128);
3634 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003635}
3636
Thomas Huthe8798922013-11-06 15:46:33 +01003637int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
3638{
3639 /*
3640 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003641 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01003642 * it into the save area
3643 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02003644 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003645 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01003646 save_access_regs(vcpu->run->s.regs.acrs);
3647
3648 return kvm_s390_store_status_unloaded(vcpu, addr);
3649}
3650
David Hildenbrand8ad35752014-03-14 11:00:21 +01003651static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3652{
3653 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003654 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003655}
3656
3657static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
3658{
3659 unsigned int i;
3660 struct kvm_vcpu *vcpu;
3661
3662 kvm_for_each_vcpu(i, vcpu, kvm) {
3663 __disable_ibs_on_vcpu(vcpu);
3664 }
3665}
3666
3667static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3668{
David Hildenbrand09a400e2016-04-04 15:57:08 +02003669 if (!sclp.has_ibs)
3670 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01003671 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003672 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003673}
3674
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003675void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
3676{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003677 int i, online_vcpus, started_vcpus = 0;
3678
3679 if (!is_vcpu_stopped(vcpu))
3680 return;
3681
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003682 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003683 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003684 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003685 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3686
3687 for (i = 0; i < online_vcpus; i++) {
3688 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
3689 started_vcpus++;
3690 }
3691
3692 if (started_vcpus == 0) {
3693 /* we're the only active VCPU -> speed it up */
3694 __enable_ibs_on_vcpu(vcpu);
3695 } else if (started_vcpus == 1) {
3696 /*
3697 * As we are starting a second VCPU, we have to disable
3698 * the IBS facility on all VCPUs to remove potentially
3699 * oustanding ENABLE requests.
3700 */
3701 __disable_ibs_on_all_vcpus(vcpu->kvm);
3702 }
3703
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003704 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003705 /*
3706 * Another VCPU might have used IBS while we were offline.
3707 * Let's play safe and flush the VCPU at startup.
3708 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003709 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003710 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003711 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003712}
3713
3714void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
3715{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003716 int i, online_vcpus, started_vcpus = 0;
3717 struct kvm_vcpu *started_vcpu = NULL;
3718
3719 if (is_vcpu_stopped(vcpu))
3720 return;
3721
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003722 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003723 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003724 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003725 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3726
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003727 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02003728 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003729
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003730 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003731 __disable_ibs_on_vcpu(vcpu);
3732
3733 for (i = 0; i < online_vcpus; i++) {
3734 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3735 started_vcpus++;
3736 started_vcpu = vcpu->kvm->vcpus[i];
3737 }
3738 }
3739
3740 if (started_vcpus == 1) {
3741 /*
3742 * As we only have one VCPU left, we want to enable the
3743 * IBS facility for that VCPU to speed it up.
3744 */
3745 __enable_ibs_on_vcpu(started_vcpu);
3746 }
3747
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003748 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003749 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003750}
3751
Cornelia Huckd6712df2012-12-20 15:32:11 +01003752static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3753 struct kvm_enable_cap *cap)
3754{
3755 int r;
3756
3757 if (cap->flags)
3758 return -EINVAL;
3759
3760 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003761 case KVM_CAP_S390_CSS_SUPPORT:
3762 if (!vcpu->kvm->arch.css_support) {
3763 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02003764 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003765 trace_kvm_s390_enable_css(vcpu->kvm);
3766 }
3767 r = 0;
3768 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01003769 default:
3770 r = -EINVAL;
3771 break;
3772 }
3773 return r;
3774}
3775
Thomas Huth41408c282015-02-06 15:01:21 +01003776static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3777 struct kvm_s390_mem_op *mop)
3778{
3779 void __user *uaddr = (void __user *)mop->buf;
3780 void *tmpbuf = NULL;
3781 int r, srcu_idx;
3782 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3783 | KVM_S390_MEMOP_F_CHECK_ONLY;
3784
3785 if (mop->flags & ~supported_flags)
3786 return -EINVAL;
3787
3788 if (mop->size > MEM_OP_MAX_SIZE)
3789 return -E2BIG;
3790
3791 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3792 tmpbuf = vmalloc(mop->size);
3793 if (!tmpbuf)
3794 return -ENOMEM;
3795 }
3796
3797 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3798
3799 switch (mop->op) {
3800 case KVM_S390_MEMOP_LOGICAL_READ:
3801 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003802 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3803 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01003804 break;
3805 }
3806 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3807 if (r == 0) {
3808 if (copy_to_user(uaddr, tmpbuf, mop->size))
3809 r = -EFAULT;
3810 }
3811 break;
3812 case KVM_S390_MEMOP_LOGICAL_WRITE:
3813 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003814 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3815 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01003816 break;
3817 }
3818 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3819 r = -EFAULT;
3820 break;
3821 }
3822 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3823 break;
3824 default:
3825 r = -EINVAL;
3826 }
3827
3828 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3829
3830 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3831 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3832
3833 vfree(tmpbuf);
3834 return r;
3835}
3836
Paolo Bonzini5cb09442017-12-12 17:41:34 +01003837long kvm_arch_vcpu_async_ioctl(struct file *filp,
3838 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003839{
3840 struct kvm_vcpu *vcpu = filp->private_data;
3841 void __user *argp = (void __user *)arg;
3842
Avi Kivity93736622010-05-13 12:35:17 +03003843 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01003844 case KVM_S390_IRQ: {
3845 struct kvm_s390_irq s390irq;
3846
Jens Freimann47b43c52014-11-11 20:57:06 +01003847 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01003848 return -EFAULT;
3849 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01003850 }
Avi Kivity93736622010-05-13 12:35:17 +03003851 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01003852 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02003853 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003854
3855 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01003856 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02003857 if (s390int_to_s390irq(&s390int, &s390irq))
3858 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01003859 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003860 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01003861 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01003862 return -ENOIOCTLCMD;
3863}
3864
3865long kvm_arch_vcpu_ioctl(struct file *filp,
3866 unsigned int ioctl, unsigned long arg)
3867{
3868 struct kvm_vcpu *vcpu = filp->private_data;
3869 void __user *argp = (void __user *)arg;
3870 int idx;
3871 long r;
Christoffer Dall9b0624712017-12-04 21:35:36 +01003872
3873 vcpu_load(vcpu);
3874
3875 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003876 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02003877 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003878 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02003879 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003880 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003881 case KVM_S390_SET_INITIAL_PSW: {
3882 psw_t psw;
3883
Avi Kivitybc923cc2010-05-13 12:21:46 +03003884 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003885 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03003886 break;
3887 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3888 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003889 }
3890 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03003891 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3892 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003893 case KVM_SET_ONE_REG:
3894 case KVM_GET_ONE_REG: {
3895 struct kvm_one_reg reg;
3896 r = -EFAULT;
3897 if (copy_from_user(&reg, argp, sizeof(reg)))
3898 break;
3899 if (ioctl == KVM_SET_ONE_REG)
3900 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3901 else
3902 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3903 break;
3904 }
Carsten Otte27e03932012-01-04 10:25:21 +01003905#ifdef CONFIG_KVM_S390_UCONTROL
3906 case KVM_S390_UCAS_MAP: {
3907 struct kvm_s390_ucas_mapping ucasmap;
3908
3909 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3910 r = -EFAULT;
3911 break;
3912 }
3913
3914 if (!kvm_is_ucontrol(vcpu->kvm)) {
3915 r = -EINVAL;
3916 break;
3917 }
3918
3919 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3920 ucasmap.vcpu_addr, ucasmap.length);
3921 break;
3922 }
3923 case KVM_S390_UCAS_UNMAP: {
3924 struct kvm_s390_ucas_mapping ucasmap;
3925
3926 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3927 r = -EFAULT;
3928 break;
3929 }
3930
3931 if (!kvm_is_ucontrol(vcpu->kvm)) {
3932 r = -EINVAL;
3933 break;
3934 }
3935
3936 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3937 ucasmap.length);
3938 break;
3939 }
3940#endif
Carsten Otteccc79102012-01-04 10:25:26 +01003941 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003942 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01003943 break;
3944 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01003945 case KVM_ENABLE_CAP:
3946 {
3947 struct kvm_enable_cap cap;
3948 r = -EFAULT;
3949 if (copy_from_user(&cap, argp, sizeof(cap)))
3950 break;
3951 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3952 break;
3953 }
Thomas Huth41408c282015-02-06 15:01:21 +01003954 case KVM_S390_MEM_OP: {
3955 struct kvm_s390_mem_op mem_op;
3956
3957 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3958 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3959 else
3960 r = -EFAULT;
3961 break;
3962 }
Jens Freimann816c7662014-11-24 17:13:46 +01003963 case KVM_S390_SET_IRQ_STATE: {
3964 struct kvm_s390_irq_state irq_state;
3965
3966 r = -EFAULT;
3967 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3968 break;
3969 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3970 irq_state.len == 0 ||
3971 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3972 r = -EINVAL;
3973 break;
3974 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003975 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01003976 r = kvm_s390_set_irq_state(vcpu,
3977 (void __user *) irq_state.buf,
3978 irq_state.len);
3979 break;
3980 }
3981 case KVM_S390_GET_IRQ_STATE: {
3982 struct kvm_s390_irq_state irq_state;
3983
3984 r = -EFAULT;
3985 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3986 break;
3987 if (irq_state.len == 0) {
3988 r = -EINVAL;
3989 break;
3990 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003991 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01003992 r = kvm_s390_get_irq_state(vcpu,
3993 (__u8 __user *) irq_state.buf,
3994 irq_state.len);
3995 break;
3996 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003997 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01003998 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003999 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004000
4001 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004002 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004003}
4004
Carsten Otte5b1c1492012-01-04 10:25:23 +01004005int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
4006{
4007#ifdef CONFIG_KVM_S390_UCONTROL
4008 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4009 && (kvm_is_ucontrol(vcpu->kvm))) {
4010 vmf->page = virt_to_page(vcpu->arch.sie_block);
4011 get_page(vmf->page);
4012 return 0;
4013 }
4014#endif
4015 return VM_FAULT_SIGBUS;
4016}
4017
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05304018int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4019 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09004020{
4021 return 0;
4022}
4023
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004024/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004025int kvm_arch_prepare_memory_region(struct kvm *kvm,
4026 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004027 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004028 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004029{
Nick Wangdd2887e2013-03-25 17:22:57 +01004030 /* A few sanity checks. We can have memory slots which have to be
4031 located/ended at a segment boundary (1MB). The memory in userland is
4032 ok to be fragmented into various different vmas. It is okay to mmap()
4033 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004034
Carsten Otte598841c2011-07-24 10:48:21 +02004035 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004036 return -EINVAL;
4037
Carsten Otte598841c2011-07-24 10:48:21 +02004038 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004039 return -EINVAL;
4040
Dominik Dingela3a92c32014-12-01 17:24:42 +01004041 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4042 return -EINVAL;
4043
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004044 return 0;
4045}
4046
4047void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004048 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004049 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004050 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004051 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004052{
Carsten Ottef7850c92011-07-24 10:48:23 +02004053 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004054
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01004055 /* If the basics of the memslot do not change, we do not want
4056 * to update the gmap. Every update causes several unnecessary
4057 * segment translation exceptions. This is usually handled just
4058 * fine by the normal fault handler + gmap, but it will also
4059 * cause faults on the prefix page of running guest CPUs.
4060 */
4061 if (old->userspace_addr == mem->userspace_addr &&
4062 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
4063 old->npages * PAGE_SIZE == mem->memory_size)
4064 return;
Carsten Otte598841c2011-07-24 10:48:21 +02004065
4066 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4067 mem->guest_phys_addr, mem->memory_size);
4068 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004069 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02004070 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004071}
4072
Alexander Yarygin60a37702016-04-01 15:38:57 +03004073static inline unsigned long nonhyp_mask(int i)
4074{
4075 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4076
4077 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4078}
4079
Christian Borntraeger3491caf2016-05-13 12:16:35 +02004080void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4081{
4082 vcpu->valid_wakeup = false;
4083}
4084
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004085static int __init kvm_s390_init(void)
4086{
Alexander Yarygin60a37702016-04-01 15:38:57 +03004087 int i;
4088
David Hildenbrand07197fd2015-01-30 16:01:38 +01004089 if (!sclp.has_sief2) {
4090 pr_info("SIE not available\n");
4091 return -ENODEV;
4092 }
4093
Alexander Yarygin60a37702016-04-01 15:38:57 +03004094 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00004095 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03004096 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4097
Michael Mueller9d8d5782015-02-02 15:42:51 +01004098 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004099}
4100
4101static void __exit kvm_s390_exit(void)
4102{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004103 kvm_exit();
4104}
4105
4106module_init(kvm_s390_init);
4107module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02004108
4109/*
4110 * Enable autoloading of the kvm module.
4111 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4112 * since x86 takes a different approach.
4113 */
4114#include <linux/miscdevice.h>
4115MODULE_ALIAS_MISCDEV(KVM_MINOR);
4116MODULE_ALIAS("devname:kvm");