blob: ac5da6b0b862a3d725297890b7feb6cae6d06788 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Christian Borntraegera37cb072018-01-23 13:28:40 +01005 * Copyright IBM Corp. 2008, 2018
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
14#include <linux/compiler.h>
15#include <linux/err.h>
16#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020017#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010018#include <linux/init.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010021#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010022#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050023#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020024#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010026#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010027#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010028#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010029#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020030#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010031
Heiko Carstenscbb870c2010-02-26 22:37:43 +010032#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010033#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020034#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010036#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010037#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010038#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020039#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020040#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020041#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040042#include <asm/timex.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010043#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010044#include "gaccess.h"
45
David Hildenbrandea2cdd22015-05-20 13:24:02 +020046#define KMSG_COMPONENT "kvm-s390"
47#undef pr_fmt
48#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
49
Cornelia Huck5786fff2012-07-23 17:20:29 +020050#define CREATE_TRACE_POINTS
51#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020052#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020053
Thomas Huth41408c282015-02-06 15:01:21 +010054#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010055#define LOCAL_IRQS 32
56#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010058
Heiko Carstensb0c632d2008-03-25 18:47:20 +010059#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Christian Borntraegerccc40c52018-03-08 12:48:05 +000060#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061
62struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020064 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010065 { "exit_validity", VCPU_STAT(exit_validity) },
66 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 { "exit_external_request", VCPU_STAT(exit_external_request) },
Christian Borntraegera5e0acea92018-02-23 07:57:33 +000068 { "exit_io_request", VCPU_STAT(exit_io_request) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010069 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030071 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020074 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010075 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020076 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020077 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020078 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020079 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010080 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010081 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
82 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000083 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
84 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010085 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020086 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010087 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000088 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010089 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
90 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
91 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000092 { "deliver_program", VCPU_STAT(deliver_program) },
93 { "deliver_io", VCPU_STAT(deliver_io) },
QingFeng Hao32de0742018-03-02 11:56:47 +010094 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010095 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000096 { "inject_ckc", VCPU_STAT(inject_ckc) },
97 { "inject_cputm", VCPU_STAT(inject_cputm) },
98 { "inject_external_call", VCPU_STAT(inject_external_call) },
99 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
100 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
101 { "inject_io", VM_STAT(inject_io) },
102 { "inject_mchk", VCPU_STAT(inject_mchk) },
103 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
104 { "inject_program", VCPU_STAT(inject_program) },
105 { "inject_restart", VCPU_STAT(inject_restart) },
106 { "inject_service_signal", VM_STAT(inject_service_signal) },
107 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
108 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
109 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
110 { "inject_virtio", VM_STAT(inject_virtio) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100111 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
112 { "instruction_gs", VCPU_STAT(instruction_gs) },
113 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
114 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
115 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200116 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100117 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100118 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100119 { "instruction_sck", VCPU_STAT(instruction_sck) },
120 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100121 { "instruction_spx", VCPU_STAT(instruction_spx) },
122 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
123 { "instruction_stap", VCPU_STAT(instruction_stap) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100124 { "instruction_iske", VCPU_STAT(instruction_iske) },
125 { "instruction_ri", VCPU_STAT(instruction_ri) },
126 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
127 { "instruction_sske", VCPU_STAT(instruction_sske) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100128 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200129 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100130 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
131 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100132 { "instruction_tb", VCPU_STAT(instruction_tb) },
133 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200134 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100135 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200136 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200137 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100138 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100139 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200140 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100141 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200142 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
143 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100144 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200145 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
146 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500147 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100148 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
149 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
150 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200151 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
152 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
153 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100154 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
155 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
156 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
157 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
158 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
159 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100160 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100161 { NULL }
162};
163
Collin L. Walling8fa16962016-07-26 15:29:44 -0400164struct kvm_s390_tod_clock_ext {
165 __u8 epoch_idx;
166 __u64 tod;
167 __u8 reserved[7];
168} __packed;
169
David Hildenbranda411edf2016-02-02 15:41:22 +0100170/* allow nested virtualization in KVM (if enabled by user space) */
171static int nested;
172module_param(nested, int, S_IRUGO);
173MODULE_PARM_DESC(nested, "Nested virtualization support");
174
Janosch Franka4499382018-07-13 11:28:31 +0100175/* allow 1m huge page guest backing, if !nested */
176static int hpage;
177module_param(hpage, int, 0444);
178MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100179
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000180/*
181 * For now we handle at most 16 double words as this is what the s390 base
182 * kernel handles and stores in the prefix page. If we ever need to go beyond
183 * this, this requires changes to code, but the external uapi can stay.
184 */
185#define SIZE_INTERNAL 16
186
187/*
188 * Base feature mask that defines default mask for facilities. Consists of the
189 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
190 */
191static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
192/*
193 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
194 * and defines the facilities that can be enabled via a cpu model.
195 */
196static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
197
198static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200199{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000200 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
201 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
202 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
203 sizeof(S390_lowcore.stfle_fac_list));
204
205 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200206}
207
David Hildenbrand15c97052015-03-19 17:36:43 +0100208/* available cpu features supported by kvm */
209static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200210/* available subfunctions indicated via query / "test bit" */
211static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100212
Michael Mueller9d8d5782015-02-02 15:42:51 +0100213static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200214static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200215debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100216
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100217/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200218int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100219{
220 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200221 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100222}
223
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100224static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
225 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200226
David Hildenbrand15757672018-02-07 12:46:45 +0100227static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
228{
229 u8 delta_idx = 0;
230
231 /*
232 * The TOD jumps by delta, we have to compensate this by adding
233 * -delta to the epoch.
234 */
235 delta = -delta;
236
237 /* sign-extension - we're adding to signed values below */
238 if ((s64)delta < 0)
239 delta_idx = -1;
240
241 scb->epoch += delta;
242 if (scb->ecd & ECD_MEF) {
243 scb->epdx += delta_idx;
244 if (scb->epoch < delta)
245 scb->epdx += 1;
246 }
247}
248
Fan Zhangfdf03652015-05-13 10:58:41 +0200249/*
250 * This callback is executed during stop_machine(). All CPUs are therefore
251 * temporarily stopped. In order not to change guest behavior, we have to
252 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
253 * so a CPU won't be stopped while calculating with the epoch.
254 */
255static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
256 void *v)
257{
258 struct kvm *kvm;
259 struct kvm_vcpu *vcpu;
260 int i;
261 unsigned long long *delta = v;
262
263 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200264 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100265 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
266 if (i == 0) {
267 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
268 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
269 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100270 if (vcpu->arch.cputm_enabled)
271 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100272 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100273 kvm_clock_sync_scb(vcpu->arch.vsie_block,
274 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200275 }
276 }
277 return NOTIFY_OK;
278}
279
280static struct notifier_block kvm_clock_notifier = {
281 .notifier_call = kvm_clock_sync,
282};
283
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100284int kvm_arch_hardware_setup(void)
285{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200286 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100287 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200288 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
289 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200290 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
291 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100292 return 0;
293}
294
295void kvm_arch_hardware_unsetup(void)
296{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100297 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200298 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200299 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
300 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100301}
302
David Hildenbrand22be5a132016-01-21 13:22:54 +0100303static void allow_cpu_feat(unsigned long nr)
304{
305 set_bit_inv(nr, kvm_s390_available_cpu_feat);
306}
307
David Hildenbrand0a763c72016-05-18 16:03:47 +0200308static inline int plo_test_bit(unsigned char nr)
309{
310 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100311 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200312
313 asm volatile(
314 /* Parameter registers are ignored for "test bit" */
315 " plo 0,0,0,0(0)\n"
316 " ipm %0\n"
317 " srl %0,28\n"
318 : "=d" (cc)
319 : "d" (r0)
320 : "cc");
321 return cc == 0;
322}
323
David Hildenbrand22be5a132016-01-21 13:22:54 +0100324static void kvm_s390_cpu_feat_init(void)
325{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200326 int i;
327
328 for (i = 0; i < 256; ++i) {
329 if (plo_test_bit(i))
330 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
331 }
332
333 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400334 ptff(kvm_s390_available_subfunc.ptff,
335 sizeof(kvm_s390_available_subfunc.ptff),
336 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200337
338 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200339 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
340 kvm_s390_available_subfunc.kmac);
341 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
342 kvm_s390_available_subfunc.kmc);
343 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
344 kvm_s390_available_subfunc.km);
345 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
346 kvm_s390_available_subfunc.kimd);
347 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
348 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200349 }
350 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200351 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
352 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200353 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200354 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
355 kvm_s390_available_subfunc.kmctr);
356 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
357 kvm_s390_available_subfunc.kmf);
358 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
359 kvm_s390_available_subfunc.kmo);
360 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
361 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200362 }
363 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100364 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200365 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200366
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400367 if (test_facility(146)) /* MSA8 */
368 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
369 kvm_s390_available_subfunc.kma);
370
David Hildenbrand22be5a132016-01-21 13:22:54 +0100371 if (MACHINE_HAS_ESOP)
372 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200373 /*
374 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
375 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
376 */
377 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100378 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200379 return;
380 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100381 if (sclp.has_64bscao)
382 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100383 if (sclp.has_siif)
384 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100385 if (sclp.has_gpere)
386 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100387 if (sclp.has_gsls)
388 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100389 if (sclp.has_ib)
390 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100391 if (sclp.has_cei)
392 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100393 if (sclp.has_ibs)
394 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500395 if (sclp.has_kss)
396 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200397 /*
398 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
399 * all skey handling functions read/set the skey from the PGSTE
400 * instead of the real storage key.
401 *
402 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
403 * pages being detected as preserved although they are resident.
404 *
405 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
406 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
407 *
408 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
409 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
410 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
411 *
412 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
413 * cannot easily shadow the SCA because of the ipte lock.
414 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100415}
416
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100417int kvm_arch_init(void *opaque)
418{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200419 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
420 if (!kvm_s390_dbf)
421 return -ENOMEM;
422
423 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
424 debug_unregister(kvm_s390_dbf);
425 return -ENOMEM;
426 }
427
David Hildenbrand22be5a132016-01-21 13:22:54 +0100428 kvm_s390_cpu_feat_init();
429
Cornelia Huck84877d92014-09-02 10:27:35 +0100430 /* Register floating interrupt controller interface. */
431 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100432}
433
Christian Borntraeger78f26132015-07-22 15:50:58 +0200434void kvm_arch_exit(void)
435{
436 debug_unregister(kvm_s390_dbf);
437}
438
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100439/* Section: device related */
440long kvm_arch_dev_ioctl(struct file *filp,
441 unsigned int ioctl, unsigned long arg)
442{
443 if (ioctl == KVM_S390_ENABLE_SIE)
444 return s390_enable_sie();
445 return -EINVAL;
446}
447
Alexander Graf784aa3d2014-07-14 18:27:35 +0200448int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100449{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100450 int r;
451
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200452 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100453 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200454 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100455 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100456#ifdef CONFIG_KVM_S390_UCONTROL
457 case KVM_CAP_S390_UCONTROL:
458#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200459 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100460 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200461 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100462 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100463 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100464 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200465 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200466 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200467 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200468 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200469 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100470 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100471 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200472 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100473 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400474 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100475 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200476 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200477 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100478 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100479 case KVM_CAP_S390_AIS_MIGRATION:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100480 r = 1;
481 break;
Janosch Franka4499382018-07-13 11:28:31 +0100482 case KVM_CAP_S390_HPAGE_1M:
483 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100484 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100485 r = 1;
486 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100487 case KVM_CAP_S390_MEM_OP:
488 r = MEM_OP_MAX_SIZE;
489 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200490 case KVM_CAP_NR_VCPUS:
491 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100492 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200493 if (!kvm_s390_use_sca_entries())
494 r = KVM_MAX_VCPUS;
495 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100496 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200497 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100498 case KVM_CAP_NR_MEMSLOTS:
499 r = KVM_USER_MEM_SLOTS;
500 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200501 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100502 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200503 break;
Eric Farman68c55752014-06-09 10:57:26 -0400504 case KVM_CAP_S390_VECTOR_REGISTERS:
505 r = MACHINE_HAS_VX;
506 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800507 case KVM_CAP_S390_RI:
508 r = test_facility(64);
509 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100510 case KVM_CAP_S390_GS:
511 r = test_facility(133);
512 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100513 case KVM_CAP_S390_BPB:
514 r = test_facility(82);
515 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200516 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100517 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200518 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100519 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100520}
521
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400522static void kvm_s390_sync_dirty_log(struct kvm *kvm,
Janosch Frank0959e162018-07-17 13:21:22 +0100523 struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400524{
Janosch Frank0959e162018-07-17 13:21:22 +0100525 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400526 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100527 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400528 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100529 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400530
Janosch Frank0959e162018-07-17 13:21:22 +0100531 /* Loop over all guest segments */
532 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400533 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100534 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
535 gaddr = gfn_to_gpa(cur_gfn);
536 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
537 if (kvm_is_error_hva(vmaddr))
538 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400539
Janosch Frank0959e162018-07-17 13:21:22 +0100540 bitmap_zero(bitmap, _PAGE_ENTRIES);
541 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
542 for (i = 0; i < _PAGE_ENTRIES; i++) {
543 if (test_bit(i, bitmap))
544 mark_page_dirty(kvm, cur_gfn + i);
545 }
546
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100547 if (fatal_signal_pending(current))
548 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100549 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400550 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400551}
552
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100553/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200554static void sca_del_vcpu(struct kvm_vcpu *vcpu);
555
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100556/*
557 * Get (and clear) the dirty memory log for a memory slot.
558 */
559int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
560 struct kvm_dirty_log *log)
561{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400562 int r;
563 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200564 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400565 struct kvm_memory_slot *memslot;
566 int is_dirty = 0;
567
Janosch Franke1e8a962017-02-02 16:39:31 +0100568 if (kvm_is_ucontrol(kvm))
569 return -EINVAL;
570
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400571 mutex_lock(&kvm->slots_lock);
572
573 r = -EINVAL;
574 if (log->slot >= KVM_USER_MEM_SLOTS)
575 goto out;
576
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200577 slots = kvm_memslots(kvm);
578 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400579 r = -ENOENT;
580 if (!memslot->dirty_bitmap)
581 goto out;
582
583 kvm_s390_sync_dirty_log(kvm, memslot);
584 r = kvm_get_dirty_log(kvm, log, &is_dirty);
585 if (r)
586 goto out;
587
588 /* Clear the dirty log */
589 if (is_dirty) {
590 n = kvm_dirty_bitmap_bytes(memslot);
591 memset(memslot->dirty_bitmap, 0, n);
592 }
593 r = 0;
594out:
595 mutex_unlock(&kvm->slots_lock);
596 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100597}
598
David Hildenbrand6502a342016-06-21 14:19:51 +0200599static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
600{
601 unsigned int i;
602 struct kvm_vcpu *vcpu;
603
604 kvm_for_each_vcpu(i, vcpu, kvm) {
605 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
606 }
607}
608
Cornelia Huckd938dc52013-10-23 18:26:34 +0200609static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
610{
611 int r;
612
613 if (cap->flags)
614 return -EINVAL;
615
616 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200617 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200618 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200619 kvm->arch.use_irqchip = 1;
620 r = 0;
621 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200622 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200623 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200624 kvm->arch.user_sigp = 1;
625 r = 0;
626 break;
Eric Farman68c55752014-06-09 10:57:26 -0400627 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100628 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200629 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100630 r = -EBUSY;
631 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100632 set_kvm_facility(kvm->arch.model.fac_mask, 129);
633 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200634 if (test_facility(134)) {
635 set_kvm_facility(kvm->arch.model.fac_mask, 134);
636 set_kvm_facility(kvm->arch.model.fac_list, 134);
637 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100638 if (test_facility(135)) {
639 set_kvm_facility(kvm->arch.model.fac_mask, 135);
640 set_kvm_facility(kvm->arch.model.fac_list, 135);
641 }
Michael Mueller18280d82015-03-16 16:05:41 +0100642 r = 0;
643 } else
644 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100645 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200646 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
647 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400648 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800649 case KVM_CAP_S390_RI:
650 r = -EINVAL;
651 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200652 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800653 r = -EBUSY;
654 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100655 set_kvm_facility(kvm->arch.model.fac_mask, 64);
656 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800657 r = 0;
658 }
659 mutex_unlock(&kvm->lock);
660 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
661 r ? "(not available)" : "(success)");
662 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100663 case KVM_CAP_S390_AIS:
664 mutex_lock(&kvm->lock);
665 if (kvm->created_vcpus) {
666 r = -EBUSY;
667 } else {
668 set_kvm_facility(kvm->arch.model.fac_mask, 72);
669 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100670 r = 0;
671 }
672 mutex_unlock(&kvm->lock);
673 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
674 r ? "(not available)" : "(success)");
675 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100676 case KVM_CAP_S390_GS:
677 r = -EINVAL;
678 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100679 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100680 r = -EBUSY;
681 } else if (test_facility(133)) {
682 set_kvm_facility(kvm->arch.model.fac_mask, 133);
683 set_kvm_facility(kvm->arch.model.fac_list, 133);
684 r = 0;
685 }
686 mutex_unlock(&kvm->lock);
687 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
688 r ? "(not available)" : "(success)");
689 break;
Janosch Franka4499382018-07-13 11:28:31 +0100690 case KVM_CAP_S390_HPAGE_1M:
691 mutex_lock(&kvm->lock);
692 if (kvm->created_vcpus)
693 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100694 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100695 r = -EINVAL;
696 else {
697 r = 0;
Janosch Frankdf88f312018-08-30 16:14:18 +0200698 down_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100699 kvm->mm->context.allow_gmap_hpage_1m = 1;
Janosch Frankdf88f312018-08-30 16:14:18 +0200700 up_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100701 /*
702 * We might have to create fake 4k page
703 * tables. To avoid that the hardware works on
704 * stale PGSTEs, we emulate these instructions.
705 */
706 kvm->arch.use_skf = 0;
707 kvm->arch.use_pfmfi = 0;
708 }
709 mutex_unlock(&kvm->lock);
710 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
711 r ? "(not available)" : "(success)");
712 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100713 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200714 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100715 kvm->arch.user_stsi = 1;
716 r = 0;
717 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200718 case KVM_CAP_S390_USER_INSTR0:
719 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
720 kvm->arch.user_instr0 = 1;
721 icpt_operexc_on_all_vcpus(kvm);
722 r = 0;
723 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200724 default:
725 r = -EINVAL;
726 break;
727 }
728 return r;
729}
730
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100731static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
732{
733 int ret;
734
735 switch (attr->attr) {
736 case KVM_S390_VM_MEM_LIMIT_SIZE:
737 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200738 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100739 kvm->arch.mem_limit);
740 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100741 ret = -EFAULT;
742 break;
743 default:
744 ret = -ENXIO;
745 break;
746 }
747 return ret;
748}
749
750static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200751{
752 int ret;
753 unsigned int idx;
754 switch (attr->attr) {
755 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100756 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100757 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200758 break;
759
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200760 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200761 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100762 if (kvm->created_vcpus)
763 ret = -EBUSY;
764 else if (kvm->mm->context.allow_gmap_hpage_1m)
765 ret = -EINVAL;
766 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200767 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100768 /* Not compatible with cmma. */
769 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200770 ret = 0;
771 }
772 mutex_unlock(&kvm->lock);
773 break;
774 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100775 ret = -ENXIO;
776 if (!sclp.has_cmma)
777 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200778 ret = -EINVAL;
779 if (!kvm->arch.use_cmma)
780 break;
781
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200782 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200783 mutex_lock(&kvm->lock);
784 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200785 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200786 srcu_read_unlock(&kvm->srcu, idx);
787 mutex_unlock(&kvm->lock);
788 ret = 0;
789 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100790 case KVM_S390_VM_MEM_LIMIT_SIZE: {
791 unsigned long new_limit;
792
793 if (kvm_is_ucontrol(kvm))
794 return -EINVAL;
795
796 if (get_user(new_limit, (u64 __user *)attr->addr))
797 return -EFAULT;
798
Dominik Dingela3a92c32014-12-01 17:24:42 +0100799 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
800 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100801 return -E2BIG;
802
Dominik Dingela3a92c32014-12-01 17:24:42 +0100803 if (!new_limit)
804 return -EINVAL;
805
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100806 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100807 if (new_limit != KVM_S390_NO_MEM_LIMIT)
808 new_limit -= 1;
809
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100810 ret = -EBUSY;
811 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200812 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100813 /* gmap_create will round the limit up */
814 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100815
816 if (!new) {
817 ret = -ENOMEM;
818 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100819 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100820 new->private = kvm;
821 kvm->arch.gmap = new;
822 ret = 0;
823 }
824 }
825 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100826 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
827 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
828 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100829 break;
830 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200831 default:
832 ret = -ENXIO;
833 break;
834 }
835 return ret;
836}
837
Tony Krowiaka374e892014-09-03 10:13:53 +0200838static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
839
Tony Krowiak20c922f2018-04-22 11:37:03 -0400840void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200841{
842 struct kvm_vcpu *vcpu;
843 int i;
844
Tony Krowiak20c922f2018-04-22 11:37:03 -0400845 kvm_s390_vcpu_block_all(kvm);
846
847 kvm_for_each_vcpu(i, vcpu, kvm)
848 kvm_s390_vcpu_crypto_setup(vcpu);
849
850 kvm_s390_vcpu_unblock_all(kvm);
851}
852
853static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
854{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100855 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200856 return -EINVAL;
857
858 mutex_lock(&kvm->lock);
859 switch (attr->attr) {
860 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
861 get_random_bytes(
862 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
863 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
864 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200865 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200866 break;
867 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
868 get_random_bytes(
869 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
870 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
871 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200872 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200873 break;
874 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
875 kvm->arch.crypto.aes_kw = 0;
876 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
877 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200878 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200879 break;
880 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
881 kvm->arch.crypto.dea_kw = 0;
882 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
883 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200884 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200885 break;
886 default:
887 mutex_unlock(&kvm->lock);
888 return -ENXIO;
889 }
890
Tony Krowiak20c922f2018-04-22 11:37:03 -0400891 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200892 mutex_unlock(&kvm->lock);
893 return 0;
894}
895
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200896static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
897{
898 int cx;
899 struct kvm_vcpu *vcpu;
900
901 kvm_for_each_vcpu(cx, vcpu, kvm)
902 kvm_s390_sync_request(req, vcpu);
903}
904
905/*
906 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100907 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200908 */
909static int kvm_s390_vm_start_migration(struct kvm *kvm)
910{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200911 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200912 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200913 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200914 int slotnr;
915
916 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200917 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200918 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200919 slots = kvm_memslots(kvm);
920 if (!slots || !slots->used_slots)
921 return -EINVAL;
922
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200923 if (!kvm->arch.use_cmma) {
924 kvm->arch.migration_mode = 1;
925 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200926 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200927 /* mark all the pages in active slots as dirty */
928 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
929 ms = slots->memslots + slotnr;
930 /*
931 * The second half of the bitmap is only used on x86,
932 * and would be wasted otherwise, so we put it to good
933 * use here to keep track of the state of the storage
934 * attributes.
935 */
936 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
937 ram_pages += ms->npages;
938 }
939 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
940 kvm->arch.migration_mode = 1;
941 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200942 return 0;
943}
944
945/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100946 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200947 * kvm_s390_vm_start_migration.
948 */
949static int kvm_s390_vm_stop_migration(struct kvm *kvm)
950{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200951 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200952 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200953 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200954 kvm->arch.migration_mode = 0;
955 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200956 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200957 return 0;
958}
959
960static int kvm_s390_vm_set_migration(struct kvm *kvm,
961 struct kvm_device_attr *attr)
962{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100963 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200964
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100965 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200966 switch (attr->attr) {
967 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200968 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200969 break;
970 case KVM_S390_VM_MIGRATION_STOP:
971 res = kvm_s390_vm_stop_migration(kvm);
972 break;
973 default:
974 break;
975 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100976 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200977
978 return res;
979}
980
981static int kvm_s390_vm_get_migration(struct kvm *kvm,
982 struct kvm_device_attr *attr)
983{
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200984 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200985
986 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
987 return -ENXIO;
988
989 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
990 return -EFAULT;
991 return 0;
992}
993
Collin L. Walling8fa16962016-07-26 15:29:44 -0400994static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
995{
996 struct kvm_s390_vm_tod_clock gtod;
997
998 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
999 return -EFAULT;
1000
David Hildenbrand0e7def52018-02-07 12:46:43 +01001001 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001002 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001003 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001004
1005 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1006 gtod.epoch_idx, gtod.tod);
1007
1008 return 0;
1009}
1010
Jason J. Herne72f25022014-11-25 09:46:02 -05001011static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1012{
1013 u8 gtod_high;
1014
1015 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1016 sizeof(gtod_high)))
1017 return -EFAULT;
1018
1019 if (gtod_high != 0)
1020 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001021 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001022
1023 return 0;
1024}
1025
1026static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1027{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001028 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001029
David Hildenbrand0e7def52018-02-07 12:46:43 +01001030 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1031 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001032 return -EFAULT;
1033
David Hildenbrand0e7def52018-02-07 12:46:43 +01001034 kvm_s390_set_tod_clock(kvm, &gtod);
1035 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001036 return 0;
1037}
1038
1039static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1040{
1041 int ret;
1042
1043 if (attr->flags)
1044 return -EINVAL;
1045
1046 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001047 case KVM_S390_VM_TOD_EXT:
1048 ret = kvm_s390_set_tod_ext(kvm, attr);
1049 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001050 case KVM_S390_VM_TOD_HIGH:
1051 ret = kvm_s390_set_tod_high(kvm, attr);
1052 break;
1053 case KVM_S390_VM_TOD_LOW:
1054 ret = kvm_s390_set_tod_low(kvm, attr);
1055 break;
1056 default:
1057 ret = -ENXIO;
1058 break;
1059 }
1060 return ret;
1061}
1062
David Hildenbrand33d1b272018-04-27 14:36:13 +02001063static void kvm_s390_get_tod_clock(struct kvm *kvm,
1064 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001065{
1066 struct kvm_s390_tod_clock_ext htod;
1067
1068 preempt_disable();
1069
1070 get_tod_clock_ext((char *)&htod);
1071
1072 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001073 gtod->epoch_idx = 0;
1074 if (test_kvm_facility(kvm, 139)) {
1075 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1076 if (gtod->tod < htod.tod)
1077 gtod->epoch_idx += 1;
1078 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001079
1080 preempt_enable();
1081}
1082
1083static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1084{
1085 struct kvm_s390_vm_tod_clock gtod;
1086
1087 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001088 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001089 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1090 return -EFAULT;
1091
1092 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1093 gtod.epoch_idx, gtod.tod);
1094 return 0;
1095}
1096
Jason J. Herne72f25022014-11-25 09:46:02 -05001097static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1098{
1099 u8 gtod_high = 0;
1100
1101 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1102 sizeof(gtod_high)))
1103 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001104 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001105
1106 return 0;
1107}
1108
1109static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1110{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001111 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001112
David Hildenbrand60417fc2015-09-29 16:20:36 +02001113 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001114 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1115 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001116 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001117
1118 return 0;
1119}
1120
1121static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1122{
1123 int ret;
1124
1125 if (attr->flags)
1126 return -EINVAL;
1127
1128 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001129 case KVM_S390_VM_TOD_EXT:
1130 ret = kvm_s390_get_tod_ext(kvm, attr);
1131 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001132 case KVM_S390_VM_TOD_HIGH:
1133 ret = kvm_s390_get_tod_high(kvm, attr);
1134 break;
1135 case KVM_S390_VM_TOD_LOW:
1136 ret = kvm_s390_get_tod_low(kvm, attr);
1137 break;
1138 default:
1139 ret = -ENXIO;
1140 break;
1141 }
1142 return ret;
1143}
1144
Michael Mueller658b6ed2015-02-02 15:49:35 +01001145static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1146{
1147 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001148 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001149 int ret = 0;
1150
1151 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001152 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001153 ret = -EBUSY;
1154 goto out;
1155 }
1156 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1157 if (!proc) {
1158 ret = -ENOMEM;
1159 goto out;
1160 }
1161 if (!copy_from_user(proc, (void __user *)attr->addr,
1162 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001163 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001164 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1165 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001166 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001167 if (proc->ibc > unblocked_ibc)
1168 kvm->arch.model.ibc = unblocked_ibc;
1169 else if (proc->ibc < lowest_ibc)
1170 kvm->arch.model.ibc = lowest_ibc;
1171 else
1172 kvm->arch.model.ibc = proc->ibc;
1173 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001174 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001175 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001176 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1177 kvm->arch.model.ibc,
1178 kvm->arch.model.cpuid);
1179 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1180 kvm->arch.model.fac_list[0],
1181 kvm->arch.model.fac_list[1],
1182 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001183 } else
1184 ret = -EFAULT;
1185 kfree(proc);
1186out:
1187 mutex_unlock(&kvm->lock);
1188 return ret;
1189}
1190
David Hildenbrand15c97052015-03-19 17:36:43 +01001191static int kvm_s390_set_processor_feat(struct kvm *kvm,
1192 struct kvm_device_attr *attr)
1193{
1194 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001195
1196 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1197 return -EFAULT;
1198 if (!bitmap_subset((unsigned long *) data.feat,
1199 kvm_s390_available_cpu_feat,
1200 KVM_S390_VM_CPU_FEAT_NR_BITS))
1201 return -EINVAL;
1202
1203 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001204 if (kvm->created_vcpus) {
1205 mutex_unlock(&kvm->lock);
1206 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001207 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001208 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1209 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001210 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001211 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1212 data.feat[0],
1213 data.feat[1],
1214 data.feat[2]);
1215 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001216}
1217
David Hildenbrand0a763c72016-05-18 16:03:47 +02001218static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1219 struct kvm_device_attr *attr)
1220{
1221 /*
1222 * Once supported by kernel + hw, we have to store the subfunctions
1223 * in kvm->arch and remember that user space configured them.
1224 */
1225 return -ENXIO;
1226}
1227
Michael Mueller658b6ed2015-02-02 15:49:35 +01001228static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1229{
1230 int ret = -ENXIO;
1231
1232 switch (attr->attr) {
1233 case KVM_S390_VM_CPU_PROCESSOR:
1234 ret = kvm_s390_set_processor(kvm, attr);
1235 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001236 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1237 ret = kvm_s390_set_processor_feat(kvm, attr);
1238 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001239 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1240 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1241 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001242 }
1243 return ret;
1244}
1245
1246static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1247{
1248 struct kvm_s390_vm_cpu_processor *proc;
1249 int ret = 0;
1250
1251 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1252 if (!proc) {
1253 ret = -ENOMEM;
1254 goto out;
1255 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001256 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001257 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001258 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1259 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001260 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1261 kvm->arch.model.ibc,
1262 kvm->arch.model.cpuid);
1263 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1264 kvm->arch.model.fac_list[0],
1265 kvm->arch.model.fac_list[1],
1266 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001267 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1268 ret = -EFAULT;
1269 kfree(proc);
1270out:
1271 return ret;
1272}
1273
1274static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1275{
1276 struct kvm_s390_vm_cpu_machine *mach;
1277 int ret = 0;
1278
1279 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1280 if (!mach) {
1281 ret = -ENOMEM;
1282 goto out;
1283 }
1284 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001285 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001286 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001287 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001288 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001289 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001290 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1291 kvm->arch.model.ibc,
1292 kvm->arch.model.cpuid);
1293 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1294 mach->fac_mask[0],
1295 mach->fac_mask[1],
1296 mach->fac_mask[2]);
1297 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1298 mach->fac_list[0],
1299 mach->fac_list[1],
1300 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001301 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1302 ret = -EFAULT;
1303 kfree(mach);
1304out:
1305 return ret;
1306}
1307
David Hildenbrand15c97052015-03-19 17:36:43 +01001308static int kvm_s390_get_processor_feat(struct kvm *kvm,
1309 struct kvm_device_attr *attr)
1310{
1311 struct kvm_s390_vm_cpu_feat data;
1312
1313 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1314 KVM_S390_VM_CPU_FEAT_NR_BITS);
1315 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1316 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001317 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1318 data.feat[0],
1319 data.feat[1],
1320 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001321 return 0;
1322}
1323
1324static int kvm_s390_get_machine_feat(struct kvm *kvm,
1325 struct kvm_device_attr *attr)
1326{
1327 struct kvm_s390_vm_cpu_feat data;
1328
1329 bitmap_copy((unsigned long *) data.feat,
1330 kvm_s390_available_cpu_feat,
1331 KVM_S390_VM_CPU_FEAT_NR_BITS);
1332 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1333 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001334 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1335 data.feat[0],
1336 data.feat[1],
1337 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001338 return 0;
1339}
1340
David Hildenbrand0a763c72016-05-18 16:03:47 +02001341static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1342 struct kvm_device_attr *attr)
1343{
1344 /*
1345 * Once we can actually configure subfunctions (kernel + hw support),
1346 * we have to check if they were already set by user space, if so copy
1347 * them from kvm->arch.
1348 */
1349 return -ENXIO;
1350}
1351
1352static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1353 struct kvm_device_attr *attr)
1354{
1355 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1356 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1357 return -EFAULT;
1358 return 0;
1359}
Michael Mueller658b6ed2015-02-02 15:49:35 +01001360static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1361{
1362 int ret = -ENXIO;
1363
1364 switch (attr->attr) {
1365 case KVM_S390_VM_CPU_PROCESSOR:
1366 ret = kvm_s390_get_processor(kvm, attr);
1367 break;
1368 case KVM_S390_VM_CPU_MACHINE:
1369 ret = kvm_s390_get_machine(kvm, attr);
1370 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001371 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1372 ret = kvm_s390_get_processor_feat(kvm, attr);
1373 break;
1374 case KVM_S390_VM_CPU_MACHINE_FEAT:
1375 ret = kvm_s390_get_machine_feat(kvm, attr);
1376 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001377 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1378 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1379 break;
1380 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1381 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1382 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001383 }
1384 return ret;
1385}
1386
Dominik Dingelf2061652014-04-09 13:13:00 +02001387static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1388{
1389 int ret;
1390
1391 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001392 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001393 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001394 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001395 case KVM_S390_VM_TOD:
1396 ret = kvm_s390_set_tod(kvm, attr);
1397 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001398 case KVM_S390_VM_CPU_MODEL:
1399 ret = kvm_s390_set_cpu_model(kvm, attr);
1400 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001401 case KVM_S390_VM_CRYPTO:
1402 ret = kvm_s390_vm_set_crypto(kvm, attr);
1403 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001404 case KVM_S390_VM_MIGRATION:
1405 ret = kvm_s390_vm_set_migration(kvm, attr);
1406 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001407 default:
1408 ret = -ENXIO;
1409 break;
1410 }
1411
1412 return ret;
1413}
1414
1415static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1416{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001417 int ret;
1418
1419 switch (attr->group) {
1420 case KVM_S390_VM_MEM_CTRL:
1421 ret = kvm_s390_get_mem_control(kvm, attr);
1422 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001423 case KVM_S390_VM_TOD:
1424 ret = kvm_s390_get_tod(kvm, attr);
1425 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001426 case KVM_S390_VM_CPU_MODEL:
1427 ret = kvm_s390_get_cpu_model(kvm, attr);
1428 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001429 case KVM_S390_VM_MIGRATION:
1430 ret = kvm_s390_vm_get_migration(kvm, attr);
1431 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001432 default:
1433 ret = -ENXIO;
1434 break;
1435 }
1436
1437 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001438}
1439
1440static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1441{
1442 int ret;
1443
1444 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001445 case KVM_S390_VM_MEM_CTRL:
1446 switch (attr->attr) {
1447 case KVM_S390_VM_MEM_ENABLE_CMMA:
1448 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001449 ret = sclp.has_cmma ? 0 : -ENXIO;
1450 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001451 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001452 ret = 0;
1453 break;
1454 default:
1455 ret = -ENXIO;
1456 break;
1457 }
1458 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001459 case KVM_S390_VM_TOD:
1460 switch (attr->attr) {
1461 case KVM_S390_VM_TOD_LOW:
1462 case KVM_S390_VM_TOD_HIGH:
1463 ret = 0;
1464 break;
1465 default:
1466 ret = -ENXIO;
1467 break;
1468 }
1469 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001470 case KVM_S390_VM_CPU_MODEL:
1471 switch (attr->attr) {
1472 case KVM_S390_VM_CPU_PROCESSOR:
1473 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001474 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1475 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001476 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001477 ret = 0;
1478 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001479 /* configuring subfunctions is not supported yet */
1480 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001481 default:
1482 ret = -ENXIO;
1483 break;
1484 }
1485 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001486 case KVM_S390_VM_CRYPTO:
1487 switch (attr->attr) {
1488 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1489 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1490 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1491 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1492 ret = 0;
1493 break;
1494 default:
1495 ret = -ENXIO;
1496 break;
1497 }
1498 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001499 case KVM_S390_VM_MIGRATION:
1500 ret = 0;
1501 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001502 default:
1503 ret = -ENXIO;
1504 break;
1505 }
1506
1507 return ret;
1508}
1509
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001510static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1511{
1512 uint8_t *keys;
1513 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001514 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001515
1516 if (args->flags != 0)
1517 return -EINVAL;
1518
1519 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001520 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001521 return KVM_S390_GET_SKEYS_NONE;
1522
1523 /* Enforce sane limit on memory allocation */
1524 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1525 return -EINVAL;
1526
Michal Hocko752ade62017-05-08 15:57:27 -07001527 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001528 if (!keys)
1529 return -ENOMEM;
1530
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001531 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001532 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001533 for (i = 0; i < args->count; i++) {
1534 hva = gfn_to_hva(kvm, args->start_gfn + i);
1535 if (kvm_is_error_hva(hva)) {
1536 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001537 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001538 }
1539
David Hildenbrand154c8c12016-05-09 11:22:34 +02001540 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1541 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001542 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001543 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001544 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001545 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001546
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001547 if (!r) {
1548 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1549 sizeof(uint8_t) * args->count);
1550 if (r)
1551 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001552 }
1553
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001554 kvfree(keys);
1555 return r;
1556}
1557
1558static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1559{
1560 uint8_t *keys;
1561 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001562 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001563 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001564
1565 if (args->flags != 0)
1566 return -EINVAL;
1567
1568 /* Enforce sane limit on memory allocation */
1569 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1570 return -EINVAL;
1571
Michal Hocko752ade62017-05-08 15:57:27 -07001572 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001573 if (!keys)
1574 return -ENOMEM;
1575
1576 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1577 sizeof(uint8_t) * args->count);
1578 if (r) {
1579 r = -EFAULT;
1580 goto out;
1581 }
1582
1583 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001584 r = s390_enable_skey();
1585 if (r)
1586 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001587
Janosch Frankbd096f62018-07-18 13:40:22 +01001588 i = 0;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001589 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001590 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001591 while (i < args->count) {
1592 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001593 hva = gfn_to_hva(kvm, args->start_gfn + i);
1594 if (kvm_is_error_hva(hva)) {
1595 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001596 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001597 }
1598
1599 /* Lowest order bit is reserved */
1600 if (keys[i] & 0x01) {
1601 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001602 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001603 }
1604
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001605 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001606 if (r) {
1607 r = fixup_user_fault(current, current->mm, hva,
1608 FAULT_FLAG_WRITE, &unlocked);
1609 if (r)
1610 break;
1611 }
1612 if (!r)
1613 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001614 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001615 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001616 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001617out:
1618 kvfree(keys);
1619 return r;
1620}
1621
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001622/*
1623 * Base address and length must be sent at the start of each block, therefore
1624 * it's cheaper to send some clean data, as long as it's less than the size of
1625 * two longs.
1626 */
1627#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1628/* for consistency */
1629#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1630
1631/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001632 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1633 * address falls in a hole. In that case the index of one of the memslots
1634 * bordering the hole is returned.
1635 */
1636static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1637{
1638 int start = 0, end = slots->used_slots;
1639 int slot = atomic_read(&slots->lru_slot);
1640 struct kvm_memory_slot *memslots = slots->memslots;
1641
1642 if (gfn >= memslots[slot].base_gfn &&
1643 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1644 return slot;
1645
1646 while (start < end) {
1647 slot = start + (end - start) / 2;
1648
1649 if (gfn >= memslots[slot].base_gfn)
1650 end = slot;
1651 else
1652 start = slot + 1;
1653 }
1654
1655 if (gfn >= memslots[start].base_gfn &&
1656 gfn < memslots[start].base_gfn + memslots[start].npages) {
1657 atomic_set(&slots->lru_slot, start);
1658 }
1659
1660 return start;
1661}
1662
1663static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1664 u8 *res, unsigned long bufsize)
1665{
1666 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1667
1668 args->count = 0;
1669 while (args->count < bufsize) {
1670 hva = gfn_to_hva(kvm, cur_gfn);
1671 /*
1672 * We return an error if the first value was invalid, but we
1673 * return successfully if at least one value was copied.
1674 */
1675 if (kvm_is_error_hva(hva))
1676 return args->count ? 0 : -EFAULT;
1677 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1678 pgstev = 0;
1679 res[args->count++] = (pgstev >> 24) & 0x43;
1680 cur_gfn++;
1681 }
1682
1683 return 0;
1684}
1685
1686static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1687 unsigned long cur_gfn)
1688{
1689 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1690 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1691 unsigned long ofs = cur_gfn - ms->base_gfn;
1692
1693 if (ms->base_gfn + ms->npages <= cur_gfn) {
1694 slotidx--;
1695 /* If we are above the highest slot, wrap around */
1696 if (slotidx < 0)
1697 slotidx = slots->used_slots - 1;
1698
1699 ms = slots->memslots + slotidx;
1700 ofs = 0;
1701 }
1702 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1703 while ((slotidx > 0) && (ofs >= ms->npages)) {
1704 slotidx--;
1705 ms = slots->memslots + slotidx;
1706 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1707 }
1708 return ms->base_gfn + ofs;
1709}
1710
1711static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1712 u8 *res, unsigned long bufsize)
1713{
1714 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
1715 struct kvm_memslots *slots = kvm_memslots(kvm);
1716 struct kvm_memory_slot *ms;
1717
1718 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
1719 ms = gfn_to_memslot(kvm, cur_gfn);
1720 args->count = 0;
1721 args->start_gfn = cur_gfn;
1722 if (!ms)
1723 return 0;
1724 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
1725 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
1726
1727 while (args->count < bufsize) {
1728 hva = gfn_to_hva(kvm, cur_gfn);
1729 if (kvm_is_error_hva(hva))
1730 return 0;
1731 /* Decrement only if we actually flipped the bit to 0 */
1732 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
1733 atomic64_dec(&kvm->arch.cmma_dirty_pages);
1734 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1735 pgstev = 0;
1736 /* Save the value */
1737 res[args->count++] = (pgstev >> 24) & 0x43;
1738 /* If the next bit is too far away, stop. */
1739 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
1740 return 0;
1741 /* If we reached the previous "next", find the next one */
1742 if (cur_gfn == next_gfn)
1743 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
1744 /* Reached the end of memory or of the buffer, stop */
1745 if ((next_gfn >= mem_end) ||
1746 (next_gfn - args->start_gfn >= bufsize))
1747 return 0;
1748 cur_gfn++;
1749 /* Reached the end of the current memslot, take the next one. */
1750 if (cur_gfn - ms->base_gfn >= ms->npages) {
1751 ms = gfn_to_memslot(kvm, cur_gfn);
1752 if (!ms)
1753 return 0;
1754 }
1755 }
1756 return 0;
1757}
1758
1759/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001760 * This function searches for the next page with dirty CMMA attributes, and
1761 * saves the attributes in the buffer up to either the end of the buffer or
1762 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
1763 * no trailing clean bytes are saved.
1764 * In case no dirty bits were found, or if CMMA was not enabled or used, the
1765 * output buffer will indicate 0 as length.
1766 */
1767static int kvm_s390_get_cmma_bits(struct kvm *kvm,
1768 struct kvm_s390_cmma_log *args)
1769{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001770 unsigned long bufsize;
1771 int srcu_idx, peek, ret;
1772 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001773
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001774 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001775 return -ENXIO;
1776 /* Invalid/unsupported flags were specified */
1777 if (args->flags & ~KVM_S390_CMMA_PEEK)
1778 return -EINVAL;
1779 /* Migration mode query, and we are not doing a migration */
1780 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001781 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001782 return -EINVAL;
1783 /* CMMA is disabled or was not used, or the buffer has length zero */
1784 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001785 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001786 memset(args, 0, sizeof(*args));
1787 return 0;
1788 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001789 /* We are not peeking, and there are no dirty pages */
1790 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
1791 memset(args, 0, sizeof(*args));
1792 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001793 }
1794
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001795 values = vmalloc(bufsize);
1796 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001797 return -ENOMEM;
1798
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001799 down_read(&kvm->mm->mmap_sem);
1800 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001801 if (peek)
1802 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
1803 else
1804 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001805 srcu_read_unlock(&kvm->srcu, srcu_idx);
1806 up_read(&kvm->mm->mmap_sem);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001807
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001808 if (kvm->arch.migration_mode)
1809 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
1810 else
1811 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001812
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001813 if (copy_to_user((void __user *)args->values, values, args->count))
1814 ret = -EFAULT;
1815
1816 vfree(values);
1817 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001818}
1819
1820/*
1821 * This function sets the CMMA attributes for the given pages. If the input
1822 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001823 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001824 */
1825static int kvm_s390_set_cmma_bits(struct kvm *kvm,
1826 const struct kvm_s390_cmma_log *args)
1827{
1828 unsigned long hva, mask, pgstev, i;
1829 uint8_t *bits;
1830 int srcu_idx, r = 0;
1831
1832 mask = args->mask;
1833
1834 if (!kvm->arch.use_cmma)
1835 return -ENXIO;
1836 /* invalid/unsupported flags */
1837 if (args->flags != 0)
1838 return -EINVAL;
1839 /* Enforce sane limit on memory allocation */
1840 if (args->count > KVM_S390_CMMA_SIZE_MAX)
1841 return -EINVAL;
1842 /* Nothing to do */
1843 if (args->count == 0)
1844 return 0;
1845
Kees Cook42bc47b2018-06-12 14:27:11 -07001846 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001847 if (!bits)
1848 return -ENOMEM;
1849
1850 r = copy_from_user(bits, (void __user *)args->values, args->count);
1851 if (r) {
1852 r = -EFAULT;
1853 goto out;
1854 }
1855
1856 down_read(&kvm->mm->mmap_sem);
1857 srcu_idx = srcu_read_lock(&kvm->srcu);
1858 for (i = 0; i < args->count; i++) {
1859 hva = gfn_to_hva(kvm, args->start_gfn + i);
1860 if (kvm_is_error_hva(hva)) {
1861 r = -EFAULT;
1862 break;
1863 }
1864
1865 pgstev = bits[i];
1866 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02001867 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001868 set_pgste_bits(kvm->mm, hva, mask, pgstev);
1869 }
1870 srcu_read_unlock(&kvm->srcu, srcu_idx);
1871 up_read(&kvm->mm->mmap_sem);
1872
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001873 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001874 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001875 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001876 up_write(&kvm->mm->mmap_sem);
1877 }
1878out:
1879 vfree(bits);
1880 return r;
1881}
1882
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001883long kvm_arch_vm_ioctl(struct file *filp,
1884 unsigned int ioctl, unsigned long arg)
1885{
1886 struct kvm *kvm = filp->private_data;
1887 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001888 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001889 int r;
1890
1891 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001892 case KVM_S390_INTERRUPT: {
1893 struct kvm_s390_interrupt s390int;
1894
1895 r = -EFAULT;
1896 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1897 break;
1898 r = kvm_s390_inject_vm(kvm, &s390int);
1899 break;
1900 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001901 case KVM_ENABLE_CAP: {
1902 struct kvm_enable_cap cap;
1903 r = -EFAULT;
1904 if (copy_from_user(&cap, argp, sizeof(cap)))
1905 break;
1906 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1907 break;
1908 }
Cornelia Huck84223592013-07-15 13:36:01 +02001909 case KVM_CREATE_IRQCHIP: {
1910 struct kvm_irq_routing_entry routing;
1911
1912 r = -EINVAL;
1913 if (kvm->arch.use_irqchip) {
1914 /* Set up dummy routing. */
1915 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001916 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001917 }
1918 break;
1919 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001920 case KVM_SET_DEVICE_ATTR: {
1921 r = -EFAULT;
1922 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1923 break;
1924 r = kvm_s390_vm_set_attr(kvm, &attr);
1925 break;
1926 }
1927 case KVM_GET_DEVICE_ATTR: {
1928 r = -EFAULT;
1929 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1930 break;
1931 r = kvm_s390_vm_get_attr(kvm, &attr);
1932 break;
1933 }
1934 case KVM_HAS_DEVICE_ATTR: {
1935 r = -EFAULT;
1936 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1937 break;
1938 r = kvm_s390_vm_has_attr(kvm, &attr);
1939 break;
1940 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001941 case KVM_S390_GET_SKEYS: {
1942 struct kvm_s390_skeys args;
1943
1944 r = -EFAULT;
1945 if (copy_from_user(&args, argp,
1946 sizeof(struct kvm_s390_skeys)))
1947 break;
1948 r = kvm_s390_get_skeys(kvm, &args);
1949 break;
1950 }
1951 case KVM_S390_SET_SKEYS: {
1952 struct kvm_s390_skeys args;
1953
1954 r = -EFAULT;
1955 if (copy_from_user(&args, argp,
1956 sizeof(struct kvm_s390_skeys)))
1957 break;
1958 r = kvm_s390_set_skeys(kvm, &args);
1959 break;
1960 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001961 case KVM_S390_GET_CMMA_BITS: {
1962 struct kvm_s390_cmma_log args;
1963
1964 r = -EFAULT;
1965 if (copy_from_user(&args, argp, sizeof(args)))
1966 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001967 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001968 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001969 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001970 if (!r) {
1971 r = copy_to_user(argp, &args, sizeof(args));
1972 if (r)
1973 r = -EFAULT;
1974 }
1975 break;
1976 }
1977 case KVM_S390_SET_CMMA_BITS: {
1978 struct kvm_s390_cmma_log args;
1979
1980 r = -EFAULT;
1981 if (copy_from_user(&args, argp, sizeof(args)))
1982 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001983 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001984 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001985 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001986 break;
1987 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001988 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001989 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001990 }
1991
1992 return r;
1993}
1994
Tony Krowiak45c9b472015-01-13 11:33:26 -05001995static int kvm_s390_query_ap_config(u8 *config)
1996{
1997 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001998 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001999
Christian Borntraeger86044c82015-02-26 13:53:47 +01002000 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05002001 asm volatile(
2002 "lgr 0,%1\n"
2003 "lgr 2,%2\n"
2004 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01002005 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05002006 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01002007 "1:\n"
2008 EX_TABLE(0b, 1b)
2009 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05002010 : "r" (fcn_code), "r" (config)
2011 : "cc", "0", "2", "memory"
2012 );
2013
2014 return cc;
2015}
2016
2017static int kvm_s390_apxa_installed(void)
2018{
2019 u8 config[128];
2020 int cc;
2021
Heiko Carstensa6aacc32015-11-24 14:28:12 +01002022 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05002023 cc = kvm_s390_query_ap_config(config);
2024
2025 if (cc)
2026 pr_err("PQAP(QCI) failed with cc=%d", cc);
2027 else
2028 return config[0] & 0x40;
2029 }
2030
2031 return 0;
2032}
2033
2034static void kvm_s390_set_crycb_format(struct kvm *kvm)
2035{
2036 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2037
2038 if (kvm_s390_apxa_installed())
2039 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2040 else
2041 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2042}
2043
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002044static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002045{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002046 struct cpuid cpuid;
2047
2048 get_cpu_id(&cpuid);
2049 cpuid.version = 0xff;
2050 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002051}
2052
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002053static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002054{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002055 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002056 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04002057
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002058 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002059 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002060
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002061 /* Enable AES/DEA protected key functions by default */
2062 kvm->arch.crypto.aes_kw = 1;
2063 kvm->arch.crypto.dea_kw = 1;
2064 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2065 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2066 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2067 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002068}
2069
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002070static void sca_dispose(struct kvm *kvm)
2071{
2072 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002073 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002074 else
2075 free_page((unsigned long)(kvm->arch.sca));
2076 kvm->arch.sca = NULL;
2077}
2078
Carsten Ottee08b9632012-01-04 10:25:20 +01002079int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002080{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002081 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002082 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002083 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002084 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002085
Carsten Ottee08b9632012-01-04 10:25:20 +01002086 rc = -EINVAL;
2087#ifdef CONFIG_KVM_S390_UCONTROL
2088 if (type & ~KVM_VM_S390_UCONTROL)
2089 goto out_err;
2090 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2091 goto out_err;
2092#else
2093 if (type)
2094 goto out_err;
2095#endif
2096
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002097 rc = s390_enable_sie();
2098 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002099 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002100
Carsten Otteb2904112011-10-18 12:27:13 +02002101 rc = -ENOMEM;
2102
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002103 if (!sclp.has_64bscao)
2104 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002105 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002106 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002107 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002108 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002109 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002110 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002111 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002112 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002113 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002114 kvm->arch.sca = (struct bsca_block *)
2115 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002116 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002117
2118 sprintf(debug_name, "kvm-%u", current->pid);
2119
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002120 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002121 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002122 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002123
Michael Mueller19114be2017-05-30 14:26:02 +02002124 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002125 kvm->arch.sie_page2 =
2126 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2127 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002128 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002129
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002130 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002131
2132 for (i = 0; i < kvm_s390_fac_size(); i++) {
2133 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2134 (kvm_s390_fac_base[i] |
2135 kvm_s390_fac_ext[i]);
2136 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2137 kvm_s390_fac_base[i];
2138 }
Michael Mueller981467c2015-02-24 13:51:04 +01002139
David Hildenbrand19352222017-08-29 16:31:08 +02002140 /* we are always in czam mode - even on pre z14 machines */
2141 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2142 set_kvm_facility(kvm->arch.model.fac_list, 138);
2143 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002144 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2145 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002146 if (MACHINE_HAS_TLB_GUEST) {
2147 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2148 set_kvm_facility(kvm->arch.model.fac_list, 147);
2149 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002150
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002151 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002152 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002153
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002154 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002155
Fei Li51978392017-02-17 17:06:26 +08002156 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002157 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002158 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2159 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002160 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002161 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002162
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002163 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002164 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002165
Carsten Ottee08b9632012-01-04 10:25:20 +01002166 if (type & KVM_VM_S390_UCONTROL) {
2167 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002168 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002169 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002170 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002171 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002172 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002173 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002174 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002175 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002176 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002177 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002178 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002179 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002180 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002181
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002182 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002183 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002184 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002185 kvm_s390_vsie_init(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002186 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002187 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002188
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002189 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002190out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002191 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002192 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002193 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002194 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002195 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002196}
2197
Luiz Capitulino235539b2016-09-07 14:47:23 -04002198bool kvm_arch_has_vcpu_debugfs(void)
2199{
2200 return false;
2201}
2202
2203int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2204{
2205 return 0;
2206}
2207
Christian Borntraegerd329c032008-11-26 14:50:27 +01002208void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2209{
2210 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002211 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002212 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002213 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002214 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002215 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002216
2217 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002218 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002219
Dominik Dingele6db1d62015-05-07 15:41:57 +02002220 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002221 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002222 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002223
Christian Borntraeger6692cef2008-11-26 14:51:08 +01002224 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02002225 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002226}
2227
2228static void kvm_free_vcpus(struct kvm *kvm)
2229{
2230 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002231 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002232
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002233 kvm_for_each_vcpu(i, vcpu, kvm)
2234 kvm_arch_vcpu_destroy(vcpu);
2235
2236 mutex_lock(&kvm->lock);
2237 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2238 kvm->vcpus[i] = NULL;
2239
2240 atomic_set(&kvm->online_vcpus, 0);
2241 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002242}
2243
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002244void kvm_arch_destroy_vm(struct kvm *kvm)
2245{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002246 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002247 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002248 debug_unregister(kvm->arch.dbf);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002249 kvm_s390_gisa_destroy(kvm);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002250 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002251 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002252 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002253 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002254 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002255 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002256 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002257}
2258
2259/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002260static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2261{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002262 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002263 if (!vcpu->arch.gmap)
2264 return -ENOMEM;
2265 vcpu->arch.gmap->private = vcpu->kvm;
2266
2267 return 0;
2268}
2269
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002270static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2271{
David Hildenbranda6940672016-08-08 22:39:32 +02002272 if (!kvm_s390_use_sca_entries())
2273 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002274 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002275 if (vcpu->kvm->arch.use_esca) {
2276 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002277
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002278 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002279 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002280 } else {
2281 struct bsca_block *sca = vcpu->kvm->arch.sca;
2282
2283 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002284 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002285 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002286 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002287}
2288
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002289static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002290{
David Hildenbranda6940672016-08-08 22:39:32 +02002291 if (!kvm_s390_use_sca_entries()) {
2292 struct bsca_block *sca = vcpu->kvm->arch.sca;
2293
2294 /* we still need the basic sca for the ipte control */
2295 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2296 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002297 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002298 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002299 read_lock(&vcpu->kvm->arch.sca_lock);
2300 if (vcpu->kvm->arch.use_esca) {
2301 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002302
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002303 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002304 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2305 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002306 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002307 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002308 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002309 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002310
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002311 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002312 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2313 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002314 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002315 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002316 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002317}
2318
2319/* Basic SCA to Extended SCA data copy routines */
2320static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2321{
2322 d->sda = s->sda;
2323 d->sigp_ctrl.c = s->sigp_ctrl.c;
2324 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2325}
2326
2327static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2328{
2329 int i;
2330
2331 d->ipte_control = s->ipte_control;
2332 d->mcn[0] = s->mcn;
2333 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2334 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2335}
2336
2337static int sca_switch_to_extended(struct kvm *kvm)
2338{
2339 struct bsca_block *old_sca = kvm->arch.sca;
2340 struct esca_block *new_sca;
2341 struct kvm_vcpu *vcpu;
2342 unsigned int vcpu_idx;
2343 u32 scaol, scaoh;
2344
2345 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2346 if (!new_sca)
2347 return -ENOMEM;
2348
2349 scaoh = (u32)((u64)(new_sca) >> 32);
2350 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2351
2352 kvm_s390_vcpu_block_all(kvm);
2353 write_lock(&kvm->arch.sca_lock);
2354
2355 sca_copy_b_to_e(new_sca, old_sca);
2356
2357 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2358 vcpu->arch.sie_block->scaoh = scaoh;
2359 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002360 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002361 }
2362 kvm->arch.sca = new_sca;
2363 kvm->arch.use_esca = 1;
2364
2365 write_unlock(&kvm->arch.sca_lock);
2366 kvm_s390_vcpu_unblock_all(kvm);
2367
2368 free_page((unsigned long)old_sca);
2369
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002370 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2371 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002372 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002373}
2374
2375static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2376{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002377 int rc;
2378
David Hildenbranda6940672016-08-08 22:39:32 +02002379 if (!kvm_s390_use_sca_entries()) {
2380 if (id < KVM_MAX_VCPUS)
2381 return true;
2382 return false;
2383 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002384 if (id < KVM_S390_BSCA_CPU_SLOTS)
2385 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002386 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002387 return false;
2388
2389 mutex_lock(&kvm->lock);
2390 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2391 mutex_unlock(&kvm->lock);
2392
2393 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002394}
2395
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002396int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2397{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002398 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2399 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002400 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2401 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002402 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002403 KVM_SYNC_CRS |
2404 KVM_SYNC_ARCH0 |
2405 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002406 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002407 if (test_kvm_facility(vcpu->kvm, 64))
2408 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002409 if (test_kvm_facility(vcpu->kvm, 82))
2410 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002411 if (test_kvm_facility(vcpu->kvm, 133))
2412 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002413 if (test_kvm_facility(vcpu->kvm, 156))
2414 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002415 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2416 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2417 */
2418 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002419 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002420 else
2421 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002422
2423 if (kvm_is_ucontrol(vcpu->kvm))
2424 return __kvm_ucontrol_vcpu_init(vcpu);
2425
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002426 return 0;
2427}
2428
David Hildenbranddb0758b2016-02-15 09:42:25 +01002429/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2430static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2431{
2432 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002433 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002434 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002435 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002436}
2437
2438/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2439static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2440{
2441 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002442 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002443 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2444 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002445 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002446}
2447
2448/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2449static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2450{
2451 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2452 vcpu->arch.cputm_enabled = true;
2453 __start_cpu_timer_accounting(vcpu);
2454}
2455
2456/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2457static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2458{
2459 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2460 __stop_cpu_timer_accounting(vcpu);
2461 vcpu->arch.cputm_enabled = false;
2462}
2463
2464static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2465{
2466 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2467 __enable_cpu_timer_accounting(vcpu);
2468 preempt_enable();
2469}
2470
2471static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2472{
2473 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2474 __disable_cpu_timer_accounting(vcpu);
2475 preempt_enable();
2476}
2477
David Hildenbrand4287f242016-02-15 09:40:12 +01002478/* set the cpu timer - may only be called from the VCPU thread itself */
2479void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2480{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002481 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002482 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002483 if (vcpu->arch.cputm_enabled)
2484 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002485 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002486 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002487 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002488}
2489
David Hildenbranddb0758b2016-02-15 09:42:25 +01002490/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002491__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2492{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002493 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002494 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002495
2496 if (unlikely(!vcpu->arch.cputm_enabled))
2497 return vcpu->arch.sie_block->cputm;
2498
David Hildenbrand9c23a132016-02-17 21:53:33 +01002499 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2500 do {
2501 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2502 /*
2503 * If the writer would ever execute a read in the critical
2504 * section, e.g. in irq context, we have a deadlock.
2505 */
2506 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2507 value = vcpu->arch.sie_block->cputm;
2508 /* if cputm_start is 0, accounting is being started/stopped */
2509 if (likely(vcpu->arch.cputm_start))
2510 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2511 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2512 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002513 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002514}
2515
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002516void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2517{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002518
David Hildenbrand37d9df92015-03-11 16:47:33 +01002519 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002520 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002521 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002522 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002523 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002524}
2525
2526void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2527{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002528 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002529 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002530 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01002531 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002532 vcpu->arch.enabled_gmap = gmap_get_enabled();
2533 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002534
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002535}
2536
2537static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2538{
2539 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2540 vcpu->arch.sie_block->gpsw.mask = 0UL;
2541 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002542 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002543 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002544 vcpu->arch.sie_block->ckc = 0UL;
2545 vcpu->arch.sie_block->todpr = 0;
2546 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
David Hildenbrandb9224cd2018-04-30 17:55:24 +02002547 vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 |
2548 CR0_INTERRUPT_KEY_SUBMASK |
2549 CR0_MEASUREMENT_ALERT_SUBMASK;
2550 vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
2551 CR14_UNUSED_33 |
2552 CR14_EXTERNAL_DAMAGE_SUBMASK;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002553 /* make sure the new fpc will be lazily loaded */
2554 save_fpu_regs();
2555 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002556 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002557 vcpu->arch.sie_block->pp = 0;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002558 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002559 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2560 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002561 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2562 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002563 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002564}
2565
Dominik Dingel31928aa2014-12-04 15:47:07 +01002566void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002567{
Jason J. Herne72f25022014-11-25 09:46:02 -05002568 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002569 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002570 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01002571 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02002572 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002573 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002574 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002575 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002576 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002577 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002578 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2579 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002580 /* make vcpu_load load the right gmap on the first trigger */
2581 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002582}
2583
Tony Krowiak5102ee82014-06-27 14:46:01 -04002584static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2585{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002586 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002587 return;
2588
Tony Krowiaka374e892014-09-03 10:13:53 +02002589 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
2590
2591 if (vcpu->kvm->arch.crypto.aes_kw)
2592 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2593 if (vcpu->kvm->arch.crypto.dea_kw)
2594 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
2595
Tony Krowiak5102ee82014-06-27 14:46:01 -04002596 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
2597}
2598
Dominik Dingelb31605c2014-03-25 13:47:11 +01002599void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2600{
2601 free_page(vcpu->arch.sie_block->cbrlo);
2602 vcpu->arch.sie_block->cbrlo = 0;
2603}
2604
2605int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2606{
2607 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2608 if (!vcpu->arch.sie_block->cbrlo)
2609 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002610 return 0;
2611}
2612
Michael Mueller91520f12015-02-27 14:32:11 +01002613static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2614{
2615 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2616
Michael Mueller91520f12015-02-27 14:32:11 +01002617 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002618 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002619 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002620}
2621
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002622int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2623{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002624 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002625
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002626 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2627 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002628 CPUSTAT_STOPPED);
2629
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002630 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002631 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002632 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002633 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002634
Michael Mueller91520f12015-02-27 14:32:11 +01002635 kvm_s390_vcpu_setup_model(vcpu);
2636
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002637 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2638 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002639 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002640 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002641 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002642 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002643 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002644
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002645 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002646 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002647 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002648 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2649 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002650 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002651 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002652 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002653 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002654 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002655 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002656 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002657 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002658 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002659 vcpu->arch.sie_block->eca |= ECA_VX;
2660 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002661 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04002662 if (test_kvm_facility(vcpu->kvm, 139))
2663 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002664 if (test_kvm_facility(vcpu->kvm, 156))
2665 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002666 if (vcpu->arch.sie_block->gd) {
2667 vcpu->arch.sie_block->eca |= ECA_AIV;
2668 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
2669 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
2670 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002671 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2672 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002673 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002674
2675 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002676 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05002677 else
2678 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002679
Dominik Dingele6db1d62015-05-07 15:41:57 +02002680 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002681 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2682 if (rc)
2683 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002684 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01002685 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002686 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002687
Tony Krowiak5102ee82014-06-27 14:46:01 -04002688 kvm_s390_vcpu_crypto_setup(vcpu);
2689
Dominik Dingelb31605c2014-03-25 13:47:11 +01002690 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002691}
2692
2693struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2694 unsigned int id)
2695{
Carsten Otte4d475552011-10-18 12:27:12 +02002696 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002697 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02002698 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002699
David Hildenbrand42158252015-10-12 12:57:22 +02002700 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02002701 goto out;
2702
2703 rc = -ENOMEM;
2704
Michael Muellerb110fea2013-06-12 13:54:54 +02002705 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002706 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02002707 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002708
QingFeng Haoda72ca42017-06-07 11:41:19 +02002709 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002710 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2711 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002712 goto out_free_cpu;
2713
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002714 vcpu->arch.sie_block = &sie_page->sie_block;
2715 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2716
David Hildenbrandefed1102015-04-16 12:32:41 +02002717 /* the real guest size will always be smaller than msl */
2718 vcpu->arch.sie_block->mso = 0;
2719 vcpu->arch.sie_block->msl = sclp.hamax;
2720
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002721 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002722 spin_lock_init(&vcpu->arch.local_int.lock);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002723 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa;
Michael Mueller4b9f9522017-06-23 13:51:25 +02002724 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
2725 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002726 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002727
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002728 rc = kvm_vcpu_init(vcpu, kvm, id);
2729 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002730 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002731 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002732 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02002733 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002734
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002735 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08002736out_free_sie_block:
2737 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002738out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02002739 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02002740out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002741 return ERR_PTR(rc);
2742}
2743
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002744int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2745{
David Hildenbrand9a022062014-08-05 17:40:47 +02002746 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002747}
2748
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002749bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
2750{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08002751 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002752}
2753
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002754void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002755{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002756 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002757 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002758}
2759
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002760void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002761{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002762 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002763}
2764
Christian Borntraeger8e236542015-04-09 13:49:04 +02002765static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2766{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002767 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002768 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002769}
2770
2771static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2772{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04002773 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002774}
2775
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002776/*
2777 * Kick a guest cpu out of SIE and wait until SIE is not running.
2778 * If the CPU is not running (e.g. waiting as idle) the function will
2779 * return immediately. */
2780void exit_sie(struct kvm_vcpu *vcpu)
2781{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002782 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002783 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2784 cpu_relax();
2785}
2786
Christian Borntraeger8e236542015-04-09 13:49:04 +02002787/* Kick a guest cpu out of SIE to process a request synchronously */
2788void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002789{
Christian Borntraeger8e236542015-04-09 13:49:04 +02002790 kvm_make_request(req, vcpu);
2791 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002792}
2793
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002794static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2795 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002796{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002797 struct kvm *kvm = gmap->private;
2798 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002799 unsigned long prefix;
2800 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002801
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02002802 if (gmap_is_shadow(gmap))
2803 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002804 if (start >= 1UL << 31)
2805 /* We are only interested in prefix pages */
2806 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002807 kvm_for_each_vcpu(i, vcpu, kvm) {
2808 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002809 prefix = kvm_s390_get_prefix(vcpu);
2810 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2811 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2812 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002813 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002814 }
2815 }
2816}
2817
Christoffer Dallb6d33832012-03-08 16:44:24 -05002818int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2819{
2820 /* kvm common code refers to this, but never calls it */
2821 BUG();
2822 return 0;
2823}
2824
Carsten Otte14eebd92012-05-15 14:15:26 +02002825static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2826 struct kvm_one_reg *reg)
2827{
2828 int r = -EINVAL;
2829
2830 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002831 case KVM_REG_S390_TODPR:
2832 r = put_user(vcpu->arch.sie_block->todpr,
2833 (u32 __user *)reg->addr);
2834 break;
2835 case KVM_REG_S390_EPOCHDIFF:
2836 r = put_user(vcpu->arch.sie_block->epoch,
2837 (u64 __user *)reg->addr);
2838 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002839 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002840 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002841 (u64 __user *)reg->addr);
2842 break;
2843 case KVM_REG_S390_CLOCK_COMP:
2844 r = put_user(vcpu->arch.sie_block->ckc,
2845 (u64 __user *)reg->addr);
2846 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002847 case KVM_REG_S390_PFTOKEN:
2848 r = put_user(vcpu->arch.pfault_token,
2849 (u64 __user *)reg->addr);
2850 break;
2851 case KVM_REG_S390_PFCOMPARE:
2852 r = put_user(vcpu->arch.pfault_compare,
2853 (u64 __user *)reg->addr);
2854 break;
2855 case KVM_REG_S390_PFSELECT:
2856 r = put_user(vcpu->arch.pfault_select,
2857 (u64 __user *)reg->addr);
2858 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002859 case KVM_REG_S390_PP:
2860 r = put_user(vcpu->arch.sie_block->pp,
2861 (u64 __user *)reg->addr);
2862 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002863 case KVM_REG_S390_GBEA:
2864 r = put_user(vcpu->arch.sie_block->gbea,
2865 (u64 __user *)reg->addr);
2866 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002867 default:
2868 break;
2869 }
2870
2871 return r;
2872}
2873
2874static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2875 struct kvm_one_reg *reg)
2876{
2877 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002878 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002879
2880 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002881 case KVM_REG_S390_TODPR:
2882 r = get_user(vcpu->arch.sie_block->todpr,
2883 (u32 __user *)reg->addr);
2884 break;
2885 case KVM_REG_S390_EPOCHDIFF:
2886 r = get_user(vcpu->arch.sie_block->epoch,
2887 (u64 __user *)reg->addr);
2888 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002889 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002890 r = get_user(val, (u64 __user *)reg->addr);
2891 if (!r)
2892 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002893 break;
2894 case KVM_REG_S390_CLOCK_COMP:
2895 r = get_user(vcpu->arch.sie_block->ckc,
2896 (u64 __user *)reg->addr);
2897 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002898 case KVM_REG_S390_PFTOKEN:
2899 r = get_user(vcpu->arch.pfault_token,
2900 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002901 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2902 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002903 break;
2904 case KVM_REG_S390_PFCOMPARE:
2905 r = get_user(vcpu->arch.pfault_compare,
2906 (u64 __user *)reg->addr);
2907 break;
2908 case KVM_REG_S390_PFSELECT:
2909 r = get_user(vcpu->arch.pfault_select,
2910 (u64 __user *)reg->addr);
2911 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002912 case KVM_REG_S390_PP:
2913 r = get_user(vcpu->arch.sie_block->pp,
2914 (u64 __user *)reg->addr);
2915 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002916 case KVM_REG_S390_GBEA:
2917 r = get_user(vcpu->arch.sie_block->gbea,
2918 (u64 __user *)reg->addr);
2919 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002920 default:
2921 break;
2922 }
2923
2924 return r;
2925}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002926
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002927static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2928{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002929 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002930 return 0;
2931}
2932
2933int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2934{
Christoffer Dall875656f2017-12-04 21:35:27 +01002935 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002936 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01002937 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002938 return 0;
2939}
2940
2941int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2942{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01002943 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002944 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01002945 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002946 return 0;
2947}
2948
2949int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2950 struct kvm_sregs *sregs)
2951{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01002952 vcpu_load(vcpu);
2953
Christian Borntraeger59674c12012-01-11 11:20:33 +01002954 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002955 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01002956
2957 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002958 return 0;
2959}
2960
2961int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2962 struct kvm_sregs *sregs)
2963{
Christoffer Dallbcdec412017-12-04 21:35:28 +01002964 vcpu_load(vcpu);
2965
Christian Borntraeger59674c12012-01-11 11:20:33 +01002966 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002967 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01002968
2969 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002970 return 0;
2971}
2972
2973int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2974{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01002975 int ret = 0;
2976
2977 vcpu_load(vcpu);
2978
2979 if (test_fp_ctl(fpu->fpc)) {
2980 ret = -EINVAL;
2981 goto out;
2982 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002983 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002984 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002985 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
2986 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002987 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002988 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01002989
2990out:
2991 vcpu_put(vcpu);
2992 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002993}
2994
2995int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2996{
Christoffer Dall13931232017-12-04 21:35:34 +01002997 vcpu_load(vcpu);
2998
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002999 /* make sure we have the latest values */
3000 save_fpu_regs();
3001 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003002 convert_vx_to_fp((freg_t *) fpu->fprs,
3003 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003004 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003005 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003006 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003007
3008 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003009 return 0;
3010}
3011
3012static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3013{
3014 int rc = 0;
3015
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003016 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003017 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003018 else {
3019 vcpu->run->psw_mask = psw.mask;
3020 vcpu->run->psw_addr = psw.addr;
3021 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003022 return rc;
3023}
3024
3025int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3026 struct kvm_translation *tr)
3027{
3028 return -EINVAL; /* not implemented yet */
3029}
3030
David Hildenbrand27291e22014-01-23 12:26:52 +01003031#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3032 KVM_GUESTDBG_USE_HW_BP | \
3033 KVM_GUESTDBG_ENABLE)
3034
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003035int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3036 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003037{
David Hildenbrand27291e22014-01-23 12:26:52 +01003038 int rc = 0;
3039
Christoffer Dall66b56562017-12-04 21:35:33 +01003040 vcpu_load(vcpu);
3041
David Hildenbrand27291e22014-01-23 12:26:52 +01003042 vcpu->guest_debug = 0;
3043 kvm_s390_clear_bp_data(vcpu);
3044
Christoffer Dall66b56562017-12-04 21:35:33 +01003045 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3046 rc = -EINVAL;
3047 goto out;
3048 }
3049 if (!sclp.has_gpere) {
3050 rc = -EINVAL;
3051 goto out;
3052 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003053
3054 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3055 vcpu->guest_debug = dbg->control;
3056 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003057 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003058
3059 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3060 rc = kvm_s390_import_bp_data(vcpu, dbg);
3061 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003062 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003063 vcpu->arch.guestdbg.last_bp = 0;
3064 }
3065
3066 if (rc) {
3067 vcpu->guest_debug = 0;
3068 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003069 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003070 }
3071
Christoffer Dall66b56562017-12-04 21:35:33 +01003072out:
3073 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003074 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003075}
3076
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003077int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3078 struct kvm_mp_state *mp_state)
3079{
Christoffer Dallfd232562017-12-04 21:35:30 +01003080 int ret;
3081
3082 vcpu_load(vcpu);
3083
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003084 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003085 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3086 KVM_MP_STATE_OPERATING;
3087
3088 vcpu_put(vcpu);
3089 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003090}
3091
3092int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3093 struct kvm_mp_state *mp_state)
3094{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003095 int rc = 0;
3096
Christoffer Dalle83dff52017-12-04 21:35:31 +01003097 vcpu_load(vcpu);
3098
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003099 /* user space knows about this interface - let it control the state */
3100 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3101
3102 switch (mp_state->mp_state) {
3103 case KVM_MP_STATE_STOPPED:
3104 kvm_s390_vcpu_stop(vcpu);
3105 break;
3106 case KVM_MP_STATE_OPERATING:
3107 kvm_s390_vcpu_start(vcpu);
3108 break;
3109 case KVM_MP_STATE_LOAD:
3110 case KVM_MP_STATE_CHECK_STOP:
3111 /* fall through - CHECK_STOP and LOAD are not supported yet */
3112 default:
3113 rc = -ENXIO;
3114 }
3115
Christoffer Dalle83dff52017-12-04 21:35:31 +01003116 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003117 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003118}
3119
David Hildenbrand8ad35752014-03-14 11:00:21 +01003120static bool ibs_enabled(struct kvm_vcpu *vcpu)
3121{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003122 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003123}
3124
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003125static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3126{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003127retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003128 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003129 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003130 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003131 /*
3132 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003133 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003134 * This ensures that the ipte instruction for this request has
3135 * already finished. We might race against a second unmapper that
3136 * wants to set the blocking bit. Lets just retry the request loop.
3137 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003138 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003139 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003140 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3141 kvm_s390_get_prefix(vcpu),
3142 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003143 if (rc) {
3144 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003145 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003146 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003147 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003148 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003149
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003150 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3151 vcpu->arch.sie_block->ihcpu = 0xffff;
3152 goto retry;
3153 }
3154
David Hildenbrand8ad35752014-03-14 11:00:21 +01003155 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3156 if (!ibs_enabled(vcpu)) {
3157 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003158 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003159 }
3160 goto retry;
3161 }
3162
3163 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3164 if (ibs_enabled(vcpu)) {
3165 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003166 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003167 }
3168 goto retry;
3169 }
3170
David Hildenbrand6502a342016-06-21 14:19:51 +02003171 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3172 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3173 goto retry;
3174 }
3175
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003176 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3177 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003178 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003179 * instruction manually, in order to provide additional
3180 * functionalities needed for live migration.
3181 */
3182 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3183 goto retry;
3184 }
3185
3186 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3187 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003188 * Re-enable CMM virtualization if CMMA is available and
3189 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003190 */
3191 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003192 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003193 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3194 goto retry;
3195 }
3196
David Hildenbrand0759d062014-05-13 16:54:32 +02003197 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003198 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003199
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003200 return 0;
3201}
3202
David Hildenbrand0e7def52018-02-07 12:46:43 +01003203void kvm_s390_set_tod_clock(struct kvm *kvm,
3204 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003205{
3206 struct kvm_vcpu *vcpu;
3207 struct kvm_s390_tod_clock_ext htod;
3208 int i;
3209
3210 mutex_lock(&kvm->lock);
3211 preempt_disable();
3212
3213 get_tod_clock_ext((char *)&htod);
3214
3215 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003216 kvm->arch.epdx = 0;
3217 if (test_kvm_facility(kvm, 139)) {
3218 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3219 if (kvm->arch.epoch > gtod->tod)
3220 kvm->arch.epdx -= 1;
3221 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003222
3223 kvm_s390_vcpu_block_all(kvm);
3224 kvm_for_each_vcpu(i, vcpu, kvm) {
3225 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3226 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3227 }
3228
3229 kvm_s390_vcpu_unblock_all(kvm);
3230 preempt_enable();
3231 mutex_unlock(&kvm->lock);
3232}
3233
Thomas Huthfa576c52014-05-06 17:20:16 +02003234/**
3235 * kvm_arch_fault_in_page - fault-in guest page if necessary
3236 * @vcpu: The corresponding virtual cpu
3237 * @gpa: Guest physical address
3238 * @writable: Whether the page should be writable or not
3239 *
3240 * Make sure that a guest page has been faulted-in on the host.
3241 *
3242 * Return: Zero on success, negative error code otherwise.
3243 */
3244long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003245{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003246 return gmap_fault(vcpu->arch.gmap, gpa,
3247 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003248}
3249
Dominik Dingel3c038e62013-10-07 17:11:48 +02003250static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3251 unsigned long token)
3252{
3253 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003254 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003255
3256 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003257 irq.u.ext.ext_params2 = token;
3258 irq.type = KVM_S390_INT_PFAULT_INIT;
3259 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003260 } else {
3261 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003262 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003263 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3264 }
3265}
3266
3267void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3268 struct kvm_async_pf *work)
3269{
3270 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3271 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3272}
3273
3274void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3275 struct kvm_async_pf *work)
3276{
3277 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3278 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3279}
3280
3281void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3282 struct kvm_async_pf *work)
3283{
3284 /* s390 will always inject the page directly */
3285}
3286
3287bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3288{
3289 /*
3290 * s390 will always inject the page directly,
3291 * but we still want check_async_completion to cleanup
3292 */
3293 return true;
3294}
3295
3296static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3297{
3298 hva_t hva;
3299 struct kvm_arch_async_pf arch;
3300 int rc;
3301
3302 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3303 return 0;
3304 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3305 vcpu->arch.pfault_compare)
3306 return 0;
3307 if (psw_extint_disabled(vcpu))
3308 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003309 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003310 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003311 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003312 return 0;
3313 if (!vcpu->arch.gmap->pfault_enabled)
3314 return 0;
3315
Heiko Carstens81480cc2014-01-01 16:36:07 +01003316 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3317 hva += current->thread.gmap_addr & ~PAGE_MASK;
3318 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003319 return 0;
3320
3321 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3322 return rc;
3323}
3324
Thomas Huth3fb4c402013-09-12 10:33:43 +02003325static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003326{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003327 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003328
Dominik Dingel3c038e62013-10-07 17:11:48 +02003329 /*
3330 * On s390 notifications for arriving pages will be delivered directly
3331 * to the guest but the house keeping for completed pfaults is
3332 * handled outside the worker.
3333 */
3334 kvm_check_async_pf_completion(vcpu);
3335
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003336 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3337 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003338
3339 if (need_resched())
3340 schedule();
3341
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003342 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003343 s390_handle_mcck();
3344
Jens Freimann79395032014-04-17 10:10:30 +02003345 if (!kvm_is_ucontrol(vcpu->kvm)) {
3346 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3347 if (rc)
3348 return rc;
3349 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003350
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003351 rc = kvm_s390_handle_requests(vcpu);
3352 if (rc)
3353 return rc;
3354
David Hildenbrand27291e22014-01-23 12:26:52 +01003355 if (guestdbg_enabled(vcpu)) {
3356 kvm_s390_backup_guest_per_regs(vcpu);
3357 kvm_s390_patch_guest_per_regs(vcpu);
3358 }
3359
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003360 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003361 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3362 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3363 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003364
Thomas Huth3fb4c402013-09-12 10:33:43 +02003365 return 0;
3366}
3367
Thomas Huth492d8642015-02-10 16:11:01 +01003368static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3369{
David Hildenbrand56317922016-01-12 17:37:58 +01003370 struct kvm_s390_pgm_info pgm_info = {
3371 .code = PGM_ADDRESSING,
3372 };
3373 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003374 int rc;
3375
3376 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3377 trace_kvm_s390_sie_fault(vcpu);
3378
3379 /*
3380 * We want to inject an addressing exception, which is defined as a
3381 * suppressing or terminating exception. However, since we came here
3382 * by a DAT access exception, the PSW still points to the faulting
3383 * instruction since DAT exceptions are nullifying. So we've got
3384 * to look up the current opcode to get the length of the instruction
3385 * to be able to forward the PSW.
3386 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003387 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003388 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003389 if (rc < 0) {
3390 return rc;
3391 } else if (rc) {
3392 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3393 * Forward by arbitrary ilc, injection will take care of
3394 * nullification if necessary.
3395 */
3396 pgm_info = vcpu->arch.pgm;
3397 ilen = 4;
3398 }
David Hildenbrand56317922016-01-12 17:37:58 +01003399 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3400 kvm_s390_forward_psw(vcpu, ilen);
3401 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003402}
3403
Thomas Huth3fb4c402013-09-12 10:33:43 +02003404static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3405{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003406 struct mcck_volatile_info *mcck_info;
3407 struct sie_page *sie_page;
3408
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003409 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3410 vcpu->arch.sie_block->icptcode);
3411 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3412
David Hildenbrand27291e22014-01-23 12:26:52 +01003413 if (guestdbg_enabled(vcpu))
3414 kvm_s390_restore_guest_per_regs(vcpu);
3415
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003416 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3417 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003418
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003419 if (exit_reason == -EINTR) {
3420 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3421 sie_page = container_of(vcpu->arch.sie_block,
3422 struct sie_page, sie_block);
3423 mcck_info = &sie_page->mcck_info;
3424 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3425 return 0;
3426 }
3427
David Hildenbrand71f116b2015-10-19 16:24:28 +02003428 if (vcpu->arch.sie_block->icptcode > 0) {
3429 int rc = kvm_handle_sie_intercept(vcpu);
3430
3431 if (rc != -EOPNOTSUPP)
3432 return rc;
3433 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3434 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3435 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3436 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3437 return -EREMOTE;
3438 } else if (exit_reason != -EFAULT) {
3439 vcpu->stat.exit_null++;
3440 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003441 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3442 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3443 vcpu->run->s390_ucontrol.trans_exc_code =
3444 current->thread.gmap_addr;
3445 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003446 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003447 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003448 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003449 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003450 if (kvm_arch_setup_async_pf(vcpu))
3451 return 0;
3452 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003453 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003454 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003455}
3456
3457static int __vcpu_run(struct kvm_vcpu *vcpu)
3458{
3459 int rc, exit_reason;
3460
Thomas Huth800c1062013-09-12 10:33:45 +02003461 /*
3462 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3463 * ning the guest), so that memslots (and other stuff) are protected
3464 */
3465 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3466
Thomas Hutha76ccff2013-09-12 10:33:44 +02003467 do {
3468 rc = vcpu_pre_run(vcpu);
3469 if (rc)
3470 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003471
Thomas Huth800c1062013-09-12 10:33:45 +02003472 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003473 /*
3474 * As PF_VCPU will be used in fault handler, between
3475 * guest_enter and guest_exit should be no uaccess.
3476 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003477 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003478 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003479 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003480 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003481 exit_reason = sie64a(vcpu->arch.sie_block,
3482 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003483 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003484 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003485 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003486 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003487 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003488
Thomas Hutha76ccff2013-09-12 10:33:44 +02003489 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003490 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003491
Thomas Huth800c1062013-09-12 10:33:45 +02003492 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003493 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003494}
3495
David Hildenbrandb028ee32014-07-17 10:47:43 +02003496static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3497{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003498 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003499 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003500
3501 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003502 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003503 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3504 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3505 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3506 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3507 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3508 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003509 /* some control register changes require a tlb flush */
3510 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003511 }
3512 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003513 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003514 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3515 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3516 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3517 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3518 }
3519 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3520 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3521 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3522 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003523 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3524 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003525 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003526 /*
3527 * If userspace sets the riccb (e.g. after migration) to a valid state,
3528 * we should enable RI here instead of doing the lazy enablement.
3529 */
3530 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003531 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003532 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003533 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003534 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003535 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003536 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003537 /*
3538 * If userspace sets the gscb (e.g. after migration) to non-zero,
3539 * we should enable GS here instead of doing the lazy enablement.
3540 */
3541 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3542 test_kvm_facility(vcpu->kvm, 133) &&
3543 gscb->gssm &&
3544 !vcpu->arch.gs_enabled) {
3545 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3546 vcpu->arch.sie_block->ecb |= ECB_GS;
3547 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3548 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003549 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003550 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3551 test_kvm_facility(vcpu->kvm, 82)) {
3552 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3553 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3554 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003555 save_access_regs(vcpu->arch.host_acrs);
3556 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003557 /* save host (userspace) fprs/vrs */
3558 save_fpu_regs();
3559 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3560 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3561 if (MACHINE_HAS_VX)
3562 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3563 else
3564 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3565 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3566 if (test_fp_ctl(current->thread.fpu.fpc))
3567 /* User space provided an invalid FPC, let's clear it */
3568 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003569 if (MACHINE_HAS_GS) {
3570 preempt_disable();
3571 __ctl_set_bit(2, 4);
3572 if (current->thread.gs_cb) {
3573 vcpu->arch.host_gscb = current->thread.gs_cb;
3574 save_gs_cb(vcpu->arch.host_gscb);
3575 }
3576 if (vcpu->arch.gs_enabled) {
3577 current->thread.gs_cb = (struct gs_cb *)
3578 &vcpu->run->s.regs.gscb;
3579 restore_gs_cb(current->thread.gs_cb);
3580 }
3581 preempt_enable();
3582 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003583 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Fan Zhang80cd8762016-08-15 04:53:22 +02003584
David Hildenbrandb028ee32014-07-17 10:47:43 +02003585 kvm_run->kvm_dirty_regs = 0;
3586}
3587
3588static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3589{
3590 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3591 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3592 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3593 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003594 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003595 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3596 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3597 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3598 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3599 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3600 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3601 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003602 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003603 save_access_regs(vcpu->run->s.regs.acrs);
3604 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003605 /* Save guest register state */
3606 save_fpu_regs();
3607 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3608 /* Restore will be done lazily at return */
3609 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3610 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003611 if (MACHINE_HAS_GS) {
3612 __ctl_set_bit(2, 4);
3613 if (vcpu->arch.gs_enabled)
3614 save_gs_cb(current->thread.gs_cb);
3615 preempt_disable();
3616 current->thread.gs_cb = vcpu->arch.host_gscb;
3617 restore_gs_cb(vcpu->arch.host_gscb);
3618 preempt_enable();
3619 if (!vcpu->arch.host_gscb)
3620 __ctl_clear_bit(2, 4);
3621 vcpu->arch.host_gscb = NULL;
3622 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003623 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02003624}
3625
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003626int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3627{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003628 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003629
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003630 if (kvm_run->immediate_exit)
3631 return -EINTR;
3632
Christoffer Dallaccb7572017-12-04 21:35:25 +01003633 vcpu_load(vcpu);
3634
David Hildenbrand27291e22014-01-23 12:26:52 +01003635 if (guestdbg_exit_pending(vcpu)) {
3636 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01003637 rc = 0;
3638 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01003639 }
3640
Jan H. Schönherr20b70352017-11-24 22:39:01 +01003641 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003642
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003643 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3644 kvm_s390_vcpu_start(vcpu);
3645 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003646 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003647 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01003648 rc = -EINVAL;
3649 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003650 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003651
David Hildenbrandb028ee32014-07-17 10:47:43 +02003652 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003653 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003654
Heiko Carstensdab4079d2009-06-12 10:26:32 +02003655 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003656 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02003657
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003658 if (signal_pending(current) && !rc) {
3659 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003660 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003661 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003662
David Hildenbrand27291e22014-01-23 12:26:52 +01003663 if (guestdbg_exit_pending(vcpu) && !rc) {
3664 kvm_s390_prepare_debug_exit(vcpu);
3665 rc = 0;
3666 }
3667
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003668 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02003669 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003670 rc = 0;
3671 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003672
David Hildenbranddb0758b2016-02-15 09:42:25 +01003673 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003674 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003675
Jan H. Schönherr20b70352017-11-24 22:39:01 +01003676 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003677
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003678 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01003679out:
3680 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02003681 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003682}
3683
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003684/*
3685 * store status at address
3686 * we use have two special cases:
3687 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3688 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3689 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01003690int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003691{
Carsten Otte092670c2011-07-24 10:48:22 +02003692 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003693 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02003694 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01003695 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003696 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003697
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003698 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01003699 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
3700 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003701 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003702 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003703 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
3704 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003705 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003706 gpa = px;
3707 } else
3708 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003709
3710 /* manually convert vector registers if necessary */
3711 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01003712 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003713 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3714 fprs, 128);
3715 } else {
3716 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01003717 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003718 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003719 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003720 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003721 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003722 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003723 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02003724 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003725 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003726 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003727 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003728 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01003729 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003730 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01003731 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01003732 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003733 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003734 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003735 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003736 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003737 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003738 &vcpu->arch.sie_block->gcr, 128);
3739 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003740}
3741
Thomas Huthe8798922013-11-06 15:46:33 +01003742int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
3743{
3744 /*
3745 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003746 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01003747 * it into the save area
3748 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02003749 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003750 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01003751 save_access_regs(vcpu->run->s.regs.acrs);
3752
3753 return kvm_s390_store_status_unloaded(vcpu, addr);
3754}
3755
David Hildenbrand8ad35752014-03-14 11:00:21 +01003756static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3757{
3758 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003759 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003760}
3761
3762static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
3763{
3764 unsigned int i;
3765 struct kvm_vcpu *vcpu;
3766
3767 kvm_for_each_vcpu(i, vcpu, kvm) {
3768 __disable_ibs_on_vcpu(vcpu);
3769 }
3770}
3771
3772static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3773{
David Hildenbrand09a400e2016-04-04 15:57:08 +02003774 if (!sclp.has_ibs)
3775 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01003776 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003777 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003778}
3779
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003780void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
3781{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003782 int i, online_vcpus, started_vcpus = 0;
3783
3784 if (!is_vcpu_stopped(vcpu))
3785 return;
3786
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003787 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003788 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003789 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003790 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3791
3792 for (i = 0; i < online_vcpus; i++) {
3793 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
3794 started_vcpus++;
3795 }
3796
3797 if (started_vcpus == 0) {
3798 /* we're the only active VCPU -> speed it up */
3799 __enable_ibs_on_vcpu(vcpu);
3800 } else if (started_vcpus == 1) {
3801 /*
3802 * As we are starting a second VCPU, we have to disable
3803 * the IBS facility on all VCPUs to remove potentially
3804 * oustanding ENABLE requests.
3805 */
3806 __disable_ibs_on_all_vcpus(vcpu->kvm);
3807 }
3808
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003809 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003810 /*
3811 * Another VCPU might have used IBS while we were offline.
3812 * Let's play safe and flush the VCPU at startup.
3813 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003814 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003815 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003816 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003817}
3818
3819void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
3820{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003821 int i, online_vcpus, started_vcpus = 0;
3822 struct kvm_vcpu *started_vcpu = NULL;
3823
3824 if (is_vcpu_stopped(vcpu))
3825 return;
3826
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003827 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003828 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003829 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003830 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3831
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003832 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02003833 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003834
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003835 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003836 __disable_ibs_on_vcpu(vcpu);
3837
3838 for (i = 0; i < online_vcpus; i++) {
3839 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3840 started_vcpus++;
3841 started_vcpu = vcpu->kvm->vcpus[i];
3842 }
3843 }
3844
3845 if (started_vcpus == 1) {
3846 /*
3847 * As we only have one VCPU left, we want to enable the
3848 * IBS facility for that VCPU to speed it up.
3849 */
3850 __enable_ibs_on_vcpu(started_vcpu);
3851 }
3852
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003853 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003854 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003855}
3856
Cornelia Huckd6712df2012-12-20 15:32:11 +01003857static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3858 struct kvm_enable_cap *cap)
3859{
3860 int r;
3861
3862 if (cap->flags)
3863 return -EINVAL;
3864
3865 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003866 case KVM_CAP_S390_CSS_SUPPORT:
3867 if (!vcpu->kvm->arch.css_support) {
3868 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02003869 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003870 trace_kvm_s390_enable_css(vcpu->kvm);
3871 }
3872 r = 0;
3873 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01003874 default:
3875 r = -EINVAL;
3876 break;
3877 }
3878 return r;
3879}
3880
Thomas Huth41408c282015-02-06 15:01:21 +01003881static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3882 struct kvm_s390_mem_op *mop)
3883{
3884 void __user *uaddr = (void __user *)mop->buf;
3885 void *tmpbuf = NULL;
3886 int r, srcu_idx;
3887 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3888 | KVM_S390_MEMOP_F_CHECK_ONLY;
3889
3890 if (mop->flags & ~supported_flags)
3891 return -EINVAL;
3892
3893 if (mop->size > MEM_OP_MAX_SIZE)
3894 return -E2BIG;
3895
3896 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3897 tmpbuf = vmalloc(mop->size);
3898 if (!tmpbuf)
3899 return -ENOMEM;
3900 }
3901
3902 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3903
3904 switch (mop->op) {
3905 case KVM_S390_MEMOP_LOGICAL_READ:
3906 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003907 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3908 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01003909 break;
3910 }
3911 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3912 if (r == 0) {
3913 if (copy_to_user(uaddr, tmpbuf, mop->size))
3914 r = -EFAULT;
3915 }
3916 break;
3917 case KVM_S390_MEMOP_LOGICAL_WRITE:
3918 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003919 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3920 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01003921 break;
3922 }
3923 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3924 r = -EFAULT;
3925 break;
3926 }
3927 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3928 break;
3929 default:
3930 r = -EINVAL;
3931 }
3932
3933 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3934
3935 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3936 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3937
3938 vfree(tmpbuf);
3939 return r;
3940}
3941
Paolo Bonzini5cb09442017-12-12 17:41:34 +01003942long kvm_arch_vcpu_async_ioctl(struct file *filp,
3943 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003944{
3945 struct kvm_vcpu *vcpu = filp->private_data;
3946 void __user *argp = (void __user *)arg;
3947
Avi Kivity93736622010-05-13 12:35:17 +03003948 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01003949 case KVM_S390_IRQ: {
3950 struct kvm_s390_irq s390irq;
3951
Jens Freimann47b43c52014-11-11 20:57:06 +01003952 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01003953 return -EFAULT;
3954 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01003955 }
Avi Kivity93736622010-05-13 12:35:17 +03003956 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01003957 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02003958 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003959
3960 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01003961 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02003962 if (s390int_to_s390irq(&s390int, &s390irq))
3963 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01003964 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003965 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01003966 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01003967 return -ENOIOCTLCMD;
3968}
3969
3970long kvm_arch_vcpu_ioctl(struct file *filp,
3971 unsigned int ioctl, unsigned long arg)
3972{
3973 struct kvm_vcpu *vcpu = filp->private_data;
3974 void __user *argp = (void __user *)arg;
3975 int idx;
3976 long r;
Christoffer Dall9b0624712017-12-04 21:35:36 +01003977
3978 vcpu_load(vcpu);
3979
3980 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003981 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02003982 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003983 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02003984 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003985 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003986 case KVM_S390_SET_INITIAL_PSW: {
3987 psw_t psw;
3988
Avi Kivitybc923cc2010-05-13 12:21:46 +03003989 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003990 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03003991 break;
3992 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3993 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003994 }
3995 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03003996 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3997 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003998 case KVM_SET_ONE_REG:
3999 case KVM_GET_ONE_REG: {
4000 struct kvm_one_reg reg;
4001 r = -EFAULT;
4002 if (copy_from_user(&reg, argp, sizeof(reg)))
4003 break;
4004 if (ioctl == KVM_SET_ONE_REG)
4005 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4006 else
4007 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4008 break;
4009 }
Carsten Otte27e03932012-01-04 10:25:21 +01004010#ifdef CONFIG_KVM_S390_UCONTROL
4011 case KVM_S390_UCAS_MAP: {
4012 struct kvm_s390_ucas_mapping ucasmap;
4013
4014 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4015 r = -EFAULT;
4016 break;
4017 }
4018
4019 if (!kvm_is_ucontrol(vcpu->kvm)) {
4020 r = -EINVAL;
4021 break;
4022 }
4023
4024 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4025 ucasmap.vcpu_addr, ucasmap.length);
4026 break;
4027 }
4028 case KVM_S390_UCAS_UNMAP: {
4029 struct kvm_s390_ucas_mapping ucasmap;
4030
4031 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4032 r = -EFAULT;
4033 break;
4034 }
4035
4036 if (!kvm_is_ucontrol(vcpu->kvm)) {
4037 r = -EINVAL;
4038 break;
4039 }
4040
4041 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4042 ucasmap.length);
4043 break;
4044 }
4045#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004046 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004047 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004048 break;
4049 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004050 case KVM_ENABLE_CAP:
4051 {
4052 struct kvm_enable_cap cap;
4053 r = -EFAULT;
4054 if (copy_from_user(&cap, argp, sizeof(cap)))
4055 break;
4056 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4057 break;
4058 }
Thomas Huth41408c282015-02-06 15:01:21 +01004059 case KVM_S390_MEM_OP: {
4060 struct kvm_s390_mem_op mem_op;
4061
4062 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4063 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4064 else
4065 r = -EFAULT;
4066 break;
4067 }
Jens Freimann816c7662014-11-24 17:13:46 +01004068 case KVM_S390_SET_IRQ_STATE: {
4069 struct kvm_s390_irq_state irq_state;
4070
4071 r = -EFAULT;
4072 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4073 break;
4074 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4075 irq_state.len == 0 ||
4076 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4077 r = -EINVAL;
4078 break;
4079 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004080 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004081 r = kvm_s390_set_irq_state(vcpu,
4082 (void __user *) irq_state.buf,
4083 irq_state.len);
4084 break;
4085 }
4086 case KVM_S390_GET_IRQ_STATE: {
4087 struct kvm_s390_irq_state irq_state;
4088
4089 r = -EFAULT;
4090 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4091 break;
4092 if (irq_state.len == 0) {
4093 r = -EINVAL;
4094 break;
4095 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004096 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004097 r = kvm_s390_get_irq_state(vcpu,
4098 (__u8 __user *) irq_state.buf,
4099 irq_state.len);
4100 break;
4101 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004102 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004103 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004104 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004105
4106 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004107 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004108}
4109
Souptick Joarder1499fa82018-04-19 00:49:58 +05304110vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004111{
4112#ifdef CONFIG_KVM_S390_UCONTROL
4113 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4114 && (kvm_is_ucontrol(vcpu->kvm))) {
4115 vmf->page = virt_to_page(vcpu->arch.sie_block);
4116 get_page(vmf->page);
4117 return 0;
4118 }
4119#endif
4120 return VM_FAULT_SIGBUS;
4121}
4122
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05304123int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4124 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09004125{
4126 return 0;
4127}
4128
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004129/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004130int kvm_arch_prepare_memory_region(struct kvm *kvm,
4131 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004132 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004133 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004134{
Nick Wangdd2887e2013-03-25 17:22:57 +01004135 /* A few sanity checks. We can have memory slots which have to be
4136 located/ended at a segment boundary (1MB). The memory in userland is
4137 ok to be fragmented into various different vmas. It is okay to mmap()
4138 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004139
Carsten Otte598841c2011-07-24 10:48:21 +02004140 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004141 return -EINVAL;
4142
Carsten Otte598841c2011-07-24 10:48:21 +02004143 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004144 return -EINVAL;
4145
Dominik Dingela3a92c32014-12-01 17:24:42 +01004146 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4147 return -EINVAL;
4148
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004149 return 0;
4150}
4151
4152void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004153 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004154 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004155 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004156 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004157{
Carsten Ottef7850c92011-07-24 10:48:23 +02004158 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004159
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01004160 /* If the basics of the memslot do not change, we do not want
4161 * to update the gmap. Every update causes several unnecessary
4162 * segment translation exceptions. This is usually handled just
4163 * fine by the normal fault handler + gmap, but it will also
4164 * cause faults on the prefix page of running guest CPUs.
4165 */
4166 if (old->userspace_addr == mem->userspace_addr &&
4167 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
4168 old->npages * PAGE_SIZE == mem->memory_size)
4169 return;
Carsten Otte598841c2011-07-24 10:48:21 +02004170
4171 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4172 mem->guest_phys_addr, mem->memory_size);
4173 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004174 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02004175 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004176}
4177
Alexander Yarygin60a37702016-04-01 15:38:57 +03004178static inline unsigned long nonhyp_mask(int i)
4179{
4180 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4181
4182 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4183}
4184
Christian Borntraeger3491caf2016-05-13 12:16:35 +02004185void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4186{
4187 vcpu->valid_wakeup = false;
4188}
4189
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004190static int __init kvm_s390_init(void)
4191{
Alexander Yarygin60a37702016-04-01 15:38:57 +03004192 int i;
4193
David Hildenbrand07197fd2015-01-30 16:01:38 +01004194 if (!sclp.has_sief2) {
4195 pr_info("SIE not available\n");
4196 return -ENODEV;
4197 }
4198
Janosch Franka4499382018-07-13 11:28:31 +01004199 if (nested && hpage) {
4200 pr_info("nested (vSIE) and hpage (huge page backing) can currently not be activated concurrently");
4201 return -EINVAL;
4202 }
4203
Alexander Yarygin60a37702016-04-01 15:38:57 +03004204 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00004205 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03004206 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4207
Michael Mueller9d8d5782015-02-02 15:42:51 +01004208 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004209}
4210
4211static void __exit kvm_s390_exit(void)
4212{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004213 kvm_exit();
4214}
4215
4216module_init(kvm_s390_init);
4217module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02004218
4219/*
4220 * Enable autoloading of the kvm module.
4221 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4222 * since x86 takes a different approach.
4223 */
4224#include <linux/miscdevice.h>
4225MODULE_ALIAS_MISCDEV(KVM_MINOR);
4226MODULE_ALIAS("devname:kvm");