blob: a6230b00c1dfaa93988d9884254faba0103dba1d [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Christian Borntraegera37cb072018-01-23 13:28:40 +01005 * Copyright IBM Corp. 2008, 2018
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
14#include <linux/compiler.h>
15#include <linux/err.h>
16#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020017#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010018#include <linux/init.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010021#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010022#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050023#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020024#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010026#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010027#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010028#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010029#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020030#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010031
Heiko Carstenscbb870c2010-02-26 22:37:43 +010032#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010033#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020034#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010036#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010037#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010038#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020039#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020040#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020041#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040042#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040043#include <asm/ap.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010044#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010045#include "gaccess.h"
46
David Hildenbrandea2cdd22015-05-20 13:24:02 +020047#define KMSG_COMPONENT "kvm-s390"
48#undef pr_fmt
49#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
50
Cornelia Huck5786fff2012-07-23 17:20:29 +020051#define CREATE_TRACE_POINTS
52#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020053#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020054
Thomas Huth41408c282015-02-06 15:01:21 +010055#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010056#define LOCAL_IRQS 32
57#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
58 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010059
Heiko Carstensb0c632d2008-03-25 18:47:20 +010060#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Christian Borntraegerccc40c52018-03-08 12:48:05 +000061#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
Heiko Carstensb0c632d2008-03-25 18:47:20 +010062
63struct kvm_stats_debugfs_item debugfs_entries[] = {
64 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020065 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010066 { "exit_validity", VCPU_STAT(exit_validity) },
67 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
68 { "exit_external_request", VCPU_STAT(exit_external_request) },
Christian Borntraegera5e0acea92018-02-23 07:57:33 +000069 { "exit_io_request", VCPU_STAT(exit_io_request) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010070 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010071 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030072 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010073 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
74 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020075 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010076 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020077 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020078 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020079 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020080 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010082 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000084 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
85 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010086 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020087 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010088 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000089 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010090 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
91 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
92 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000093 { "deliver_program", VCPU_STAT(deliver_program) },
94 { "deliver_io", VCPU_STAT(deliver_io) },
QingFeng Hao32de0742018-03-02 11:56:47 +010095 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010096 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000097 { "inject_ckc", VCPU_STAT(inject_ckc) },
98 { "inject_cputm", VCPU_STAT(inject_cputm) },
99 { "inject_external_call", VCPU_STAT(inject_external_call) },
100 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
101 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
102 { "inject_io", VM_STAT(inject_io) },
103 { "inject_mchk", VCPU_STAT(inject_mchk) },
104 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
105 { "inject_program", VCPU_STAT(inject_program) },
106 { "inject_restart", VCPU_STAT(inject_restart) },
107 { "inject_service_signal", VM_STAT(inject_service_signal) },
108 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
109 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
110 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
111 { "inject_virtio", VM_STAT(inject_virtio) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100112 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
113 { "instruction_gs", VCPU_STAT(instruction_gs) },
114 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
115 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
116 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200117 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100118 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100119 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100120 { "instruction_sck", VCPU_STAT(instruction_sck) },
121 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100122 { "instruction_spx", VCPU_STAT(instruction_spx) },
123 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
124 { "instruction_stap", VCPU_STAT(instruction_stap) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100125 { "instruction_iske", VCPU_STAT(instruction_iske) },
126 { "instruction_ri", VCPU_STAT(instruction_ri) },
127 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
128 { "instruction_sske", VCPU_STAT(instruction_sske) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100129 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200130 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100131 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
132 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100133 { "instruction_tb", VCPU_STAT(instruction_tb) },
134 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200135 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100136 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200137 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200138 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100139 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100140 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200141 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100142 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200143 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
144 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100145 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200146 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
147 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500148 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100149 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
150 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
151 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200152 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
153 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
154 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100155 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
156 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
157 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
158 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
159 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
160 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100161 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100162 { NULL }
163};
164
Collin L. Walling8fa16962016-07-26 15:29:44 -0400165struct kvm_s390_tod_clock_ext {
166 __u8 epoch_idx;
167 __u64 tod;
168 __u8 reserved[7];
169} __packed;
170
David Hildenbranda411edf2016-02-02 15:41:22 +0100171/* allow nested virtualization in KVM (if enabled by user space) */
172static int nested;
173module_param(nested, int, S_IRUGO);
174MODULE_PARM_DESC(nested, "Nested virtualization support");
175
Janosch Franka4499382018-07-13 11:28:31 +0100176/* allow 1m huge page guest backing, if !nested */
177static int hpage;
178module_param(hpage, int, 0444);
179MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100180
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000181/*
182 * For now we handle at most 16 double words as this is what the s390 base
183 * kernel handles and stores in the prefix page. If we ever need to go beyond
184 * this, this requires changes to code, but the external uapi can stay.
185 */
186#define SIZE_INTERNAL 16
187
188/*
189 * Base feature mask that defines default mask for facilities. Consists of the
190 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
191 */
192static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
193/*
194 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
195 * and defines the facilities that can be enabled via a cpu model.
196 */
197static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
198
199static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200200{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000201 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
202 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
203 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
204 sizeof(S390_lowcore.stfle_fac_list));
205
206 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200207}
208
David Hildenbrand15c97052015-03-19 17:36:43 +0100209/* available cpu features supported by kvm */
210static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200211/* available subfunctions indicated via query / "test bit" */
212static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100213
Michael Mueller9d8d5782015-02-02 15:42:51 +0100214static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200215static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200216debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100217
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100218/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200219int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100220{
221 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200222 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100223}
224
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100225static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
226 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200227
David Hildenbrand15757672018-02-07 12:46:45 +0100228static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
229{
230 u8 delta_idx = 0;
231
232 /*
233 * The TOD jumps by delta, we have to compensate this by adding
234 * -delta to the epoch.
235 */
236 delta = -delta;
237
238 /* sign-extension - we're adding to signed values below */
239 if ((s64)delta < 0)
240 delta_idx = -1;
241
242 scb->epoch += delta;
243 if (scb->ecd & ECD_MEF) {
244 scb->epdx += delta_idx;
245 if (scb->epoch < delta)
246 scb->epdx += 1;
247 }
248}
249
Fan Zhangfdf03652015-05-13 10:58:41 +0200250/*
251 * This callback is executed during stop_machine(). All CPUs are therefore
252 * temporarily stopped. In order not to change guest behavior, we have to
253 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
254 * so a CPU won't be stopped while calculating with the epoch.
255 */
256static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
257 void *v)
258{
259 struct kvm *kvm;
260 struct kvm_vcpu *vcpu;
261 int i;
262 unsigned long long *delta = v;
263
264 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200265 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100266 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
267 if (i == 0) {
268 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
269 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
270 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100271 if (vcpu->arch.cputm_enabled)
272 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100273 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100274 kvm_clock_sync_scb(vcpu->arch.vsie_block,
275 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200276 }
277 }
278 return NOTIFY_OK;
279}
280
281static struct notifier_block kvm_clock_notifier = {
282 .notifier_call = kvm_clock_sync,
283};
284
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100285int kvm_arch_hardware_setup(void)
286{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200287 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100288 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200289 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
290 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200291 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
292 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100293 return 0;
294}
295
296void kvm_arch_hardware_unsetup(void)
297{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100298 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200299 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200300 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
301 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100302}
303
David Hildenbrand22be5a132016-01-21 13:22:54 +0100304static void allow_cpu_feat(unsigned long nr)
305{
306 set_bit_inv(nr, kvm_s390_available_cpu_feat);
307}
308
David Hildenbrand0a763c72016-05-18 16:03:47 +0200309static inline int plo_test_bit(unsigned char nr)
310{
311 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100312 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200313
314 asm volatile(
315 /* Parameter registers are ignored for "test bit" */
316 " plo 0,0,0,0(0)\n"
317 " ipm %0\n"
318 " srl %0,28\n"
319 : "=d" (cc)
320 : "d" (r0)
321 : "cc");
322 return cc == 0;
323}
324
David Hildenbrand22be5a132016-01-21 13:22:54 +0100325static void kvm_s390_cpu_feat_init(void)
326{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200327 int i;
328
329 for (i = 0; i < 256; ++i) {
330 if (plo_test_bit(i))
331 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
332 }
333
334 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400335 ptff(kvm_s390_available_subfunc.ptff,
336 sizeof(kvm_s390_available_subfunc.ptff),
337 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200338
339 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200340 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
341 kvm_s390_available_subfunc.kmac);
342 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
343 kvm_s390_available_subfunc.kmc);
344 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
345 kvm_s390_available_subfunc.km);
346 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
347 kvm_s390_available_subfunc.kimd);
348 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
349 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200350 }
351 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200352 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
353 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200354 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200355 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
356 kvm_s390_available_subfunc.kmctr);
357 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
358 kvm_s390_available_subfunc.kmf);
359 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
360 kvm_s390_available_subfunc.kmo);
361 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
362 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200363 }
364 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100365 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200366 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200367
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400368 if (test_facility(146)) /* MSA8 */
369 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
370 kvm_s390_available_subfunc.kma);
371
David Hildenbrand22be5a132016-01-21 13:22:54 +0100372 if (MACHINE_HAS_ESOP)
373 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200374 /*
375 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
376 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
377 */
378 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100379 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200380 return;
381 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100382 if (sclp.has_64bscao)
383 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100384 if (sclp.has_siif)
385 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100386 if (sclp.has_gpere)
387 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100388 if (sclp.has_gsls)
389 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100390 if (sclp.has_ib)
391 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100392 if (sclp.has_cei)
393 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100394 if (sclp.has_ibs)
395 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500396 if (sclp.has_kss)
397 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200398 /*
399 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
400 * all skey handling functions read/set the skey from the PGSTE
401 * instead of the real storage key.
402 *
403 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
404 * pages being detected as preserved although they are resident.
405 *
406 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
407 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
408 *
409 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
410 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
411 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
412 *
413 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
414 * cannot easily shadow the SCA because of the ipte lock.
415 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100416}
417
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100418int kvm_arch_init(void *opaque)
419{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200420 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
421 if (!kvm_s390_dbf)
422 return -ENOMEM;
423
424 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
425 debug_unregister(kvm_s390_dbf);
426 return -ENOMEM;
427 }
428
David Hildenbrand22be5a132016-01-21 13:22:54 +0100429 kvm_s390_cpu_feat_init();
430
Cornelia Huck84877d92014-09-02 10:27:35 +0100431 /* Register floating interrupt controller interface. */
432 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100433}
434
Christian Borntraeger78f26132015-07-22 15:50:58 +0200435void kvm_arch_exit(void)
436{
437 debug_unregister(kvm_s390_dbf);
438}
439
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100440/* Section: device related */
441long kvm_arch_dev_ioctl(struct file *filp,
442 unsigned int ioctl, unsigned long arg)
443{
444 if (ioctl == KVM_S390_ENABLE_SIE)
445 return s390_enable_sie();
446 return -EINVAL;
447}
448
Alexander Graf784aa3d2014-07-14 18:27:35 +0200449int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100450{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100451 int r;
452
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200453 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100454 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200455 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100456 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100457#ifdef CONFIG_KVM_S390_UCONTROL
458 case KVM_CAP_S390_UCONTROL:
459#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200460 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100461 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200462 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100463 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100464 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100465 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200466 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200467 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200468 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200469 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200470 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100471 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100472 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200473 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100474 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400475 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100476 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200477 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200478 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100479 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100480 case KVM_CAP_S390_AIS_MIGRATION:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100481 r = 1;
482 break;
Janosch Franka4499382018-07-13 11:28:31 +0100483 case KVM_CAP_S390_HPAGE_1M:
484 r = 0;
485 if (hpage)
486 r = 1;
487 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100488 case KVM_CAP_S390_MEM_OP:
489 r = MEM_OP_MAX_SIZE;
490 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200491 case KVM_CAP_NR_VCPUS:
492 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100493 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200494 if (!kvm_s390_use_sca_entries())
495 r = KVM_MAX_VCPUS;
496 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100497 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200498 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100499 case KVM_CAP_NR_MEMSLOTS:
500 r = KVM_USER_MEM_SLOTS;
501 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200502 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100503 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200504 break;
Eric Farman68c55752014-06-09 10:57:26 -0400505 case KVM_CAP_S390_VECTOR_REGISTERS:
506 r = MACHINE_HAS_VX;
507 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800508 case KVM_CAP_S390_RI:
509 r = test_facility(64);
510 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100511 case KVM_CAP_S390_GS:
512 r = test_facility(133);
513 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100514 case KVM_CAP_S390_BPB:
515 r = test_facility(82);
516 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200517 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100518 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200519 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100520 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100521}
522
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400523static void kvm_s390_sync_dirty_log(struct kvm *kvm,
Janosch Frank0959e162018-07-17 13:21:22 +0100524 struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400525{
Janosch Frank0959e162018-07-17 13:21:22 +0100526 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400527 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100528 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400529 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100530 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400531
Janosch Frank0959e162018-07-17 13:21:22 +0100532 /* Loop over all guest segments */
533 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400534 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100535 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
536 gaddr = gfn_to_gpa(cur_gfn);
537 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
538 if (kvm_is_error_hva(vmaddr))
539 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400540
Janosch Frank0959e162018-07-17 13:21:22 +0100541 bitmap_zero(bitmap, _PAGE_ENTRIES);
542 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
543 for (i = 0; i < _PAGE_ENTRIES; i++) {
544 if (test_bit(i, bitmap))
545 mark_page_dirty(kvm, cur_gfn + i);
546 }
547
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100548 if (fatal_signal_pending(current))
549 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100550 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400551 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400552}
553
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100554/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200555static void sca_del_vcpu(struct kvm_vcpu *vcpu);
556
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100557/*
558 * Get (and clear) the dirty memory log for a memory slot.
559 */
560int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
561 struct kvm_dirty_log *log)
562{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400563 int r;
564 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200565 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400566 struct kvm_memory_slot *memslot;
567 int is_dirty = 0;
568
Janosch Franke1e8a962017-02-02 16:39:31 +0100569 if (kvm_is_ucontrol(kvm))
570 return -EINVAL;
571
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400572 mutex_lock(&kvm->slots_lock);
573
574 r = -EINVAL;
575 if (log->slot >= KVM_USER_MEM_SLOTS)
576 goto out;
577
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200578 slots = kvm_memslots(kvm);
579 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400580 r = -ENOENT;
581 if (!memslot->dirty_bitmap)
582 goto out;
583
584 kvm_s390_sync_dirty_log(kvm, memslot);
585 r = kvm_get_dirty_log(kvm, log, &is_dirty);
586 if (r)
587 goto out;
588
589 /* Clear the dirty log */
590 if (is_dirty) {
591 n = kvm_dirty_bitmap_bytes(memslot);
592 memset(memslot->dirty_bitmap, 0, n);
593 }
594 r = 0;
595out:
596 mutex_unlock(&kvm->slots_lock);
597 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100598}
599
David Hildenbrand6502a342016-06-21 14:19:51 +0200600static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
601{
602 unsigned int i;
603 struct kvm_vcpu *vcpu;
604
605 kvm_for_each_vcpu(i, vcpu, kvm) {
606 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
607 }
608}
609
Cornelia Huckd938dc52013-10-23 18:26:34 +0200610static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
611{
612 int r;
613
614 if (cap->flags)
615 return -EINVAL;
616
617 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200618 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200619 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200620 kvm->arch.use_irqchip = 1;
621 r = 0;
622 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200623 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200624 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200625 kvm->arch.user_sigp = 1;
626 r = 0;
627 break;
Eric Farman68c55752014-06-09 10:57:26 -0400628 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100629 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200630 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100631 r = -EBUSY;
632 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100633 set_kvm_facility(kvm->arch.model.fac_mask, 129);
634 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200635 if (test_facility(134)) {
636 set_kvm_facility(kvm->arch.model.fac_mask, 134);
637 set_kvm_facility(kvm->arch.model.fac_list, 134);
638 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100639 if (test_facility(135)) {
640 set_kvm_facility(kvm->arch.model.fac_mask, 135);
641 set_kvm_facility(kvm->arch.model.fac_list, 135);
642 }
Michael Mueller18280d82015-03-16 16:05:41 +0100643 r = 0;
644 } else
645 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100646 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200647 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
648 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400649 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800650 case KVM_CAP_S390_RI:
651 r = -EINVAL;
652 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200653 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800654 r = -EBUSY;
655 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100656 set_kvm_facility(kvm->arch.model.fac_mask, 64);
657 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800658 r = 0;
659 }
660 mutex_unlock(&kvm->lock);
661 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
662 r ? "(not available)" : "(success)");
663 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100664 case KVM_CAP_S390_AIS:
665 mutex_lock(&kvm->lock);
666 if (kvm->created_vcpus) {
667 r = -EBUSY;
668 } else {
669 set_kvm_facility(kvm->arch.model.fac_mask, 72);
670 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100671 r = 0;
672 }
673 mutex_unlock(&kvm->lock);
674 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
675 r ? "(not available)" : "(success)");
676 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100677 case KVM_CAP_S390_GS:
678 r = -EINVAL;
679 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100680 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100681 r = -EBUSY;
682 } else if (test_facility(133)) {
683 set_kvm_facility(kvm->arch.model.fac_mask, 133);
684 set_kvm_facility(kvm->arch.model.fac_list, 133);
685 r = 0;
686 }
687 mutex_unlock(&kvm->lock);
688 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
689 r ? "(not available)" : "(success)");
690 break;
Janosch Franka4499382018-07-13 11:28:31 +0100691 case KVM_CAP_S390_HPAGE_1M:
692 mutex_lock(&kvm->lock);
693 if (kvm->created_vcpus)
694 r = -EBUSY;
695 else if (!hpage || kvm->arch.use_cmma)
696 r = -EINVAL;
697 else {
698 r = 0;
Janosch Frankdf88f312018-08-30 16:14:18 +0200699 down_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100700 kvm->mm->context.allow_gmap_hpage_1m = 1;
Janosch Frankdf88f312018-08-30 16:14:18 +0200701 up_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100702 /*
703 * We might have to create fake 4k page
704 * tables. To avoid that the hardware works on
705 * stale PGSTEs, we emulate these instructions.
706 */
707 kvm->arch.use_skf = 0;
708 kvm->arch.use_pfmfi = 0;
709 }
710 mutex_unlock(&kvm->lock);
711 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
712 r ? "(not available)" : "(success)");
713 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100714 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200715 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100716 kvm->arch.user_stsi = 1;
717 r = 0;
718 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200719 case KVM_CAP_S390_USER_INSTR0:
720 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
721 kvm->arch.user_instr0 = 1;
722 icpt_operexc_on_all_vcpus(kvm);
723 r = 0;
724 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200725 default:
726 r = -EINVAL;
727 break;
728 }
729 return r;
730}
731
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100732static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
733{
734 int ret;
735
736 switch (attr->attr) {
737 case KVM_S390_VM_MEM_LIMIT_SIZE:
738 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200739 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100740 kvm->arch.mem_limit);
741 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100742 ret = -EFAULT;
743 break;
744 default:
745 ret = -ENXIO;
746 break;
747 }
748 return ret;
749}
750
751static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200752{
753 int ret;
754 unsigned int idx;
755 switch (attr->attr) {
756 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100757 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100758 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200759 break;
760
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200761 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200762 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100763 if (kvm->created_vcpus)
764 ret = -EBUSY;
765 else if (kvm->mm->context.allow_gmap_hpage_1m)
766 ret = -EINVAL;
767 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200768 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100769 /* Not compatible with cmma. */
770 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200771 ret = 0;
772 }
773 mutex_unlock(&kvm->lock);
774 break;
775 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100776 ret = -ENXIO;
777 if (!sclp.has_cmma)
778 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200779 ret = -EINVAL;
780 if (!kvm->arch.use_cmma)
781 break;
782
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200783 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200784 mutex_lock(&kvm->lock);
785 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200786 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200787 srcu_read_unlock(&kvm->srcu, idx);
788 mutex_unlock(&kvm->lock);
789 ret = 0;
790 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100791 case KVM_S390_VM_MEM_LIMIT_SIZE: {
792 unsigned long new_limit;
793
794 if (kvm_is_ucontrol(kvm))
795 return -EINVAL;
796
797 if (get_user(new_limit, (u64 __user *)attr->addr))
798 return -EFAULT;
799
Dominik Dingela3a92c32014-12-01 17:24:42 +0100800 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
801 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100802 return -E2BIG;
803
Dominik Dingela3a92c32014-12-01 17:24:42 +0100804 if (!new_limit)
805 return -EINVAL;
806
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100807 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100808 if (new_limit != KVM_S390_NO_MEM_LIMIT)
809 new_limit -= 1;
810
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100811 ret = -EBUSY;
812 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200813 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100814 /* gmap_create will round the limit up */
815 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100816
817 if (!new) {
818 ret = -ENOMEM;
819 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100820 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100821 new->private = kvm;
822 kvm->arch.gmap = new;
823 ret = 0;
824 }
825 }
826 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100827 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
828 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
829 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100830 break;
831 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200832 default:
833 ret = -ENXIO;
834 break;
835 }
836 return ret;
837}
838
Tony Krowiaka374e892014-09-03 10:13:53 +0200839static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
840
Tony Krowiak20c922f2018-04-22 11:37:03 -0400841void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200842{
843 struct kvm_vcpu *vcpu;
844 int i;
845
Tony Krowiak20c922f2018-04-22 11:37:03 -0400846 kvm_s390_vcpu_block_all(kvm);
847
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400848 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400849 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400850 /* recreate the shadow crycb by leaving the VSIE handler */
851 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
852 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400853
854 kvm_s390_vcpu_unblock_all(kvm);
855}
856
857static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
858{
Tony Krowiaka374e892014-09-03 10:13:53 +0200859 mutex_lock(&kvm->lock);
860 switch (attr->attr) {
861 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Tony Krowiak37940fb2018-09-25 19:16:39 -0400862 if (!test_kvm_facility(kvm, 76))
863 return -EINVAL;
Tony Krowiaka374e892014-09-03 10:13:53 +0200864 get_random_bytes(
865 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
866 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
867 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200868 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200869 break;
870 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Tony Krowiak37940fb2018-09-25 19:16:39 -0400871 if (!test_kvm_facility(kvm, 76))
872 return -EINVAL;
Tony Krowiaka374e892014-09-03 10:13:53 +0200873 get_random_bytes(
874 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
875 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
876 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200877 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200878 break;
879 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Tony Krowiak37940fb2018-09-25 19:16:39 -0400880 if (!test_kvm_facility(kvm, 76))
881 return -EINVAL;
Tony Krowiaka374e892014-09-03 10:13:53 +0200882 kvm->arch.crypto.aes_kw = 0;
883 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
884 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200885 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200886 break;
887 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Tony Krowiak37940fb2018-09-25 19:16:39 -0400888 if (!test_kvm_facility(kvm, 76))
889 return -EINVAL;
Tony Krowiaka374e892014-09-03 10:13:53 +0200890 kvm->arch.crypto.dea_kw = 0;
891 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
892 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200893 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200894 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400895 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
896 if (!ap_instructions_available()) {
897 mutex_unlock(&kvm->lock);
898 return -EOPNOTSUPP;
899 }
900 kvm->arch.crypto.apie = 1;
901 break;
902 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
903 if (!ap_instructions_available()) {
904 mutex_unlock(&kvm->lock);
905 return -EOPNOTSUPP;
906 }
907 kvm->arch.crypto.apie = 0;
908 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200909 default:
910 mutex_unlock(&kvm->lock);
911 return -ENXIO;
912 }
913
Tony Krowiak20c922f2018-04-22 11:37:03 -0400914 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200915 mutex_unlock(&kvm->lock);
916 return 0;
917}
918
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200919static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
920{
921 int cx;
922 struct kvm_vcpu *vcpu;
923
924 kvm_for_each_vcpu(cx, vcpu, kvm)
925 kvm_s390_sync_request(req, vcpu);
926}
927
928/*
929 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100930 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200931 */
932static int kvm_s390_vm_start_migration(struct kvm *kvm)
933{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200934 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200935 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200936 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200937 int slotnr;
938
939 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200940 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200941 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200942 slots = kvm_memslots(kvm);
943 if (!slots || !slots->used_slots)
944 return -EINVAL;
945
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200946 if (!kvm->arch.use_cmma) {
947 kvm->arch.migration_mode = 1;
948 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200949 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200950 /* mark all the pages in active slots as dirty */
951 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
952 ms = slots->memslots + slotnr;
953 /*
954 * The second half of the bitmap is only used on x86,
955 * and would be wasted otherwise, so we put it to good
956 * use here to keep track of the state of the storage
957 * attributes.
958 */
959 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
960 ram_pages += ms->npages;
961 }
962 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
963 kvm->arch.migration_mode = 1;
964 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200965 return 0;
966}
967
968/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100969 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200970 * kvm_s390_vm_start_migration.
971 */
972static int kvm_s390_vm_stop_migration(struct kvm *kvm)
973{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200974 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200975 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200976 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200977 kvm->arch.migration_mode = 0;
978 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200979 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200980 return 0;
981}
982
983static int kvm_s390_vm_set_migration(struct kvm *kvm,
984 struct kvm_device_attr *attr)
985{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100986 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200987
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100988 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200989 switch (attr->attr) {
990 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200991 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200992 break;
993 case KVM_S390_VM_MIGRATION_STOP:
994 res = kvm_s390_vm_stop_migration(kvm);
995 break;
996 default:
997 break;
998 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100999 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001000
1001 return res;
1002}
1003
1004static int kvm_s390_vm_get_migration(struct kvm *kvm,
1005 struct kvm_device_attr *attr)
1006{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001007 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001008
1009 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1010 return -ENXIO;
1011
1012 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1013 return -EFAULT;
1014 return 0;
1015}
1016
Collin L. Walling8fa16962016-07-26 15:29:44 -04001017static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1018{
1019 struct kvm_s390_vm_tod_clock gtod;
1020
1021 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1022 return -EFAULT;
1023
David Hildenbrand0e7def52018-02-07 12:46:43 +01001024 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001025 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001026 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001027
1028 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1029 gtod.epoch_idx, gtod.tod);
1030
1031 return 0;
1032}
1033
Jason J. Herne72f25022014-11-25 09:46:02 -05001034static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1035{
1036 u8 gtod_high;
1037
1038 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1039 sizeof(gtod_high)))
1040 return -EFAULT;
1041
1042 if (gtod_high != 0)
1043 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001044 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001045
1046 return 0;
1047}
1048
1049static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1050{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001051 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001052
David Hildenbrand0e7def52018-02-07 12:46:43 +01001053 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1054 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001055 return -EFAULT;
1056
David Hildenbrand0e7def52018-02-07 12:46:43 +01001057 kvm_s390_set_tod_clock(kvm, &gtod);
1058 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001059 return 0;
1060}
1061
1062static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1063{
1064 int ret;
1065
1066 if (attr->flags)
1067 return -EINVAL;
1068
1069 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001070 case KVM_S390_VM_TOD_EXT:
1071 ret = kvm_s390_set_tod_ext(kvm, attr);
1072 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001073 case KVM_S390_VM_TOD_HIGH:
1074 ret = kvm_s390_set_tod_high(kvm, attr);
1075 break;
1076 case KVM_S390_VM_TOD_LOW:
1077 ret = kvm_s390_set_tod_low(kvm, attr);
1078 break;
1079 default:
1080 ret = -ENXIO;
1081 break;
1082 }
1083 return ret;
1084}
1085
David Hildenbrand33d1b272018-04-27 14:36:13 +02001086static void kvm_s390_get_tod_clock(struct kvm *kvm,
1087 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001088{
1089 struct kvm_s390_tod_clock_ext htod;
1090
1091 preempt_disable();
1092
1093 get_tod_clock_ext((char *)&htod);
1094
1095 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001096 gtod->epoch_idx = 0;
1097 if (test_kvm_facility(kvm, 139)) {
1098 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1099 if (gtod->tod < htod.tod)
1100 gtod->epoch_idx += 1;
1101 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001102
1103 preempt_enable();
1104}
1105
1106static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1107{
1108 struct kvm_s390_vm_tod_clock gtod;
1109
1110 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001111 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001112 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1113 return -EFAULT;
1114
1115 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1116 gtod.epoch_idx, gtod.tod);
1117 return 0;
1118}
1119
Jason J. Herne72f25022014-11-25 09:46:02 -05001120static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1121{
1122 u8 gtod_high = 0;
1123
1124 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1125 sizeof(gtod_high)))
1126 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001127 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001128
1129 return 0;
1130}
1131
1132static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1133{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001134 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001135
David Hildenbrand60417fc2015-09-29 16:20:36 +02001136 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001137 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1138 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001139 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001140
1141 return 0;
1142}
1143
1144static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1145{
1146 int ret;
1147
1148 if (attr->flags)
1149 return -EINVAL;
1150
1151 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001152 case KVM_S390_VM_TOD_EXT:
1153 ret = kvm_s390_get_tod_ext(kvm, attr);
1154 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001155 case KVM_S390_VM_TOD_HIGH:
1156 ret = kvm_s390_get_tod_high(kvm, attr);
1157 break;
1158 case KVM_S390_VM_TOD_LOW:
1159 ret = kvm_s390_get_tod_low(kvm, attr);
1160 break;
1161 default:
1162 ret = -ENXIO;
1163 break;
1164 }
1165 return ret;
1166}
1167
Michael Mueller658b6ed2015-02-02 15:49:35 +01001168static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1169{
1170 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001171 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001172 int ret = 0;
1173
1174 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001175 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001176 ret = -EBUSY;
1177 goto out;
1178 }
1179 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1180 if (!proc) {
1181 ret = -ENOMEM;
1182 goto out;
1183 }
1184 if (!copy_from_user(proc, (void __user *)attr->addr,
1185 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001186 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001187 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1188 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001189 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001190 if (proc->ibc > unblocked_ibc)
1191 kvm->arch.model.ibc = unblocked_ibc;
1192 else if (proc->ibc < lowest_ibc)
1193 kvm->arch.model.ibc = lowest_ibc;
1194 else
1195 kvm->arch.model.ibc = proc->ibc;
1196 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001197 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001198 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001199 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1200 kvm->arch.model.ibc,
1201 kvm->arch.model.cpuid);
1202 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1203 kvm->arch.model.fac_list[0],
1204 kvm->arch.model.fac_list[1],
1205 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001206 } else
1207 ret = -EFAULT;
1208 kfree(proc);
1209out:
1210 mutex_unlock(&kvm->lock);
1211 return ret;
1212}
1213
David Hildenbrand15c97052015-03-19 17:36:43 +01001214static int kvm_s390_set_processor_feat(struct kvm *kvm,
1215 struct kvm_device_attr *attr)
1216{
1217 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001218
1219 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1220 return -EFAULT;
1221 if (!bitmap_subset((unsigned long *) data.feat,
1222 kvm_s390_available_cpu_feat,
1223 KVM_S390_VM_CPU_FEAT_NR_BITS))
1224 return -EINVAL;
1225
1226 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001227 if (kvm->created_vcpus) {
1228 mutex_unlock(&kvm->lock);
1229 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001230 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001231 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1232 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001233 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001234 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1235 data.feat[0],
1236 data.feat[1],
1237 data.feat[2]);
1238 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001239}
1240
David Hildenbrand0a763c72016-05-18 16:03:47 +02001241static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1242 struct kvm_device_attr *attr)
1243{
1244 /*
1245 * Once supported by kernel + hw, we have to store the subfunctions
1246 * in kvm->arch and remember that user space configured them.
1247 */
1248 return -ENXIO;
1249}
1250
Michael Mueller658b6ed2015-02-02 15:49:35 +01001251static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1252{
1253 int ret = -ENXIO;
1254
1255 switch (attr->attr) {
1256 case KVM_S390_VM_CPU_PROCESSOR:
1257 ret = kvm_s390_set_processor(kvm, attr);
1258 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001259 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1260 ret = kvm_s390_set_processor_feat(kvm, attr);
1261 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001262 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1263 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1264 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001265 }
1266 return ret;
1267}
1268
1269static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1270{
1271 struct kvm_s390_vm_cpu_processor *proc;
1272 int ret = 0;
1273
1274 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1275 if (!proc) {
1276 ret = -ENOMEM;
1277 goto out;
1278 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001279 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001280 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001281 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1282 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001283 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1284 kvm->arch.model.ibc,
1285 kvm->arch.model.cpuid);
1286 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1287 kvm->arch.model.fac_list[0],
1288 kvm->arch.model.fac_list[1],
1289 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001290 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1291 ret = -EFAULT;
1292 kfree(proc);
1293out:
1294 return ret;
1295}
1296
1297static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1298{
1299 struct kvm_s390_vm_cpu_machine *mach;
1300 int ret = 0;
1301
1302 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1303 if (!mach) {
1304 ret = -ENOMEM;
1305 goto out;
1306 }
1307 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001308 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001309 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001310 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001311 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001312 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001313 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1314 kvm->arch.model.ibc,
1315 kvm->arch.model.cpuid);
1316 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1317 mach->fac_mask[0],
1318 mach->fac_mask[1],
1319 mach->fac_mask[2]);
1320 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1321 mach->fac_list[0],
1322 mach->fac_list[1],
1323 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001324 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1325 ret = -EFAULT;
1326 kfree(mach);
1327out:
1328 return ret;
1329}
1330
David Hildenbrand15c97052015-03-19 17:36:43 +01001331static int kvm_s390_get_processor_feat(struct kvm *kvm,
1332 struct kvm_device_attr *attr)
1333{
1334 struct kvm_s390_vm_cpu_feat data;
1335
1336 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1337 KVM_S390_VM_CPU_FEAT_NR_BITS);
1338 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1339 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001340 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1341 data.feat[0],
1342 data.feat[1],
1343 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001344 return 0;
1345}
1346
1347static int kvm_s390_get_machine_feat(struct kvm *kvm,
1348 struct kvm_device_attr *attr)
1349{
1350 struct kvm_s390_vm_cpu_feat data;
1351
1352 bitmap_copy((unsigned long *) data.feat,
1353 kvm_s390_available_cpu_feat,
1354 KVM_S390_VM_CPU_FEAT_NR_BITS);
1355 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1356 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001357 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1358 data.feat[0],
1359 data.feat[1],
1360 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001361 return 0;
1362}
1363
David Hildenbrand0a763c72016-05-18 16:03:47 +02001364static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1365 struct kvm_device_attr *attr)
1366{
1367 /*
1368 * Once we can actually configure subfunctions (kernel + hw support),
1369 * we have to check if they were already set by user space, if so copy
1370 * them from kvm->arch.
1371 */
1372 return -ENXIO;
1373}
1374
1375static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1376 struct kvm_device_attr *attr)
1377{
1378 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1379 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1380 return -EFAULT;
1381 return 0;
1382}
Michael Mueller658b6ed2015-02-02 15:49:35 +01001383static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1384{
1385 int ret = -ENXIO;
1386
1387 switch (attr->attr) {
1388 case KVM_S390_VM_CPU_PROCESSOR:
1389 ret = kvm_s390_get_processor(kvm, attr);
1390 break;
1391 case KVM_S390_VM_CPU_MACHINE:
1392 ret = kvm_s390_get_machine(kvm, attr);
1393 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001394 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1395 ret = kvm_s390_get_processor_feat(kvm, attr);
1396 break;
1397 case KVM_S390_VM_CPU_MACHINE_FEAT:
1398 ret = kvm_s390_get_machine_feat(kvm, attr);
1399 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001400 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1401 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1402 break;
1403 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1404 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1405 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001406 }
1407 return ret;
1408}
1409
Dominik Dingelf2061652014-04-09 13:13:00 +02001410static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1411{
1412 int ret;
1413
1414 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001415 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001416 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001417 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001418 case KVM_S390_VM_TOD:
1419 ret = kvm_s390_set_tod(kvm, attr);
1420 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001421 case KVM_S390_VM_CPU_MODEL:
1422 ret = kvm_s390_set_cpu_model(kvm, attr);
1423 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001424 case KVM_S390_VM_CRYPTO:
1425 ret = kvm_s390_vm_set_crypto(kvm, attr);
1426 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001427 case KVM_S390_VM_MIGRATION:
1428 ret = kvm_s390_vm_set_migration(kvm, attr);
1429 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001430 default:
1431 ret = -ENXIO;
1432 break;
1433 }
1434
1435 return ret;
1436}
1437
1438static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1439{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001440 int ret;
1441
1442 switch (attr->group) {
1443 case KVM_S390_VM_MEM_CTRL:
1444 ret = kvm_s390_get_mem_control(kvm, attr);
1445 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001446 case KVM_S390_VM_TOD:
1447 ret = kvm_s390_get_tod(kvm, attr);
1448 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001449 case KVM_S390_VM_CPU_MODEL:
1450 ret = kvm_s390_get_cpu_model(kvm, attr);
1451 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001452 case KVM_S390_VM_MIGRATION:
1453 ret = kvm_s390_vm_get_migration(kvm, attr);
1454 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001455 default:
1456 ret = -ENXIO;
1457 break;
1458 }
1459
1460 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001461}
1462
1463static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1464{
1465 int ret;
1466
1467 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001468 case KVM_S390_VM_MEM_CTRL:
1469 switch (attr->attr) {
1470 case KVM_S390_VM_MEM_ENABLE_CMMA:
1471 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001472 ret = sclp.has_cmma ? 0 : -ENXIO;
1473 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001474 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001475 ret = 0;
1476 break;
1477 default:
1478 ret = -ENXIO;
1479 break;
1480 }
1481 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001482 case KVM_S390_VM_TOD:
1483 switch (attr->attr) {
1484 case KVM_S390_VM_TOD_LOW:
1485 case KVM_S390_VM_TOD_HIGH:
1486 ret = 0;
1487 break;
1488 default:
1489 ret = -ENXIO;
1490 break;
1491 }
1492 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001493 case KVM_S390_VM_CPU_MODEL:
1494 switch (attr->attr) {
1495 case KVM_S390_VM_CPU_PROCESSOR:
1496 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001497 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1498 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001499 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001500 ret = 0;
1501 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001502 /* configuring subfunctions is not supported yet */
1503 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001504 default:
1505 ret = -ENXIO;
1506 break;
1507 }
1508 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001509 case KVM_S390_VM_CRYPTO:
1510 switch (attr->attr) {
1511 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1512 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1513 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1514 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1515 ret = 0;
1516 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001517 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1518 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1519 ret = ap_instructions_available() ? 0 : -ENXIO;
1520 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001521 default:
1522 ret = -ENXIO;
1523 break;
1524 }
1525 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001526 case KVM_S390_VM_MIGRATION:
1527 ret = 0;
1528 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001529 default:
1530 ret = -ENXIO;
1531 break;
1532 }
1533
1534 return ret;
1535}
1536
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001537static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1538{
1539 uint8_t *keys;
1540 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001541 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001542
1543 if (args->flags != 0)
1544 return -EINVAL;
1545
1546 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001547 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001548 return KVM_S390_GET_SKEYS_NONE;
1549
1550 /* Enforce sane limit on memory allocation */
1551 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1552 return -EINVAL;
1553
Michal Hocko752ade62017-05-08 15:57:27 -07001554 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001555 if (!keys)
1556 return -ENOMEM;
1557
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001558 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001559 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001560 for (i = 0; i < args->count; i++) {
1561 hva = gfn_to_hva(kvm, args->start_gfn + i);
1562 if (kvm_is_error_hva(hva)) {
1563 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001564 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001565 }
1566
David Hildenbrand154c8c12016-05-09 11:22:34 +02001567 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1568 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001569 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001570 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001571 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001572 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001573
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001574 if (!r) {
1575 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1576 sizeof(uint8_t) * args->count);
1577 if (r)
1578 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001579 }
1580
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001581 kvfree(keys);
1582 return r;
1583}
1584
1585static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1586{
1587 uint8_t *keys;
1588 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001589 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001590 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001591
1592 if (args->flags != 0)
1593 return -EINVAL;
1594
1595 /* Enforce sane limit on memory allocation */
1596 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1597 return -EINVAL;
1598
Michal Hocko752ade62017-05-08 15:57:27 -07001599 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001600 if (!keys)
1601 return -ENOMEM;
1602
1603 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1604 sizeof(uint8_t) * args->count);
1605 if (r) {
1606 r = -EFAULT;
1607 goto out;
1608 }
1609
1610 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001611 r = s390_enable_skey();
1612 if (r)
1613 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001614
Janosch Frankbd096f62018-07-18 13:40:22 +01001615 i = 0;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001616 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001617 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001618 while (i < args->count) {
1619 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001620 hva = gfn_to_hva(kvm, args->start_gfn + i);
1621 if (kvm_is_error_hva(hva)) {
1622 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001623 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001624 }
1625
1626 /* Lowest order bit is reserved */
1627 if (keys[i] & 0x01) {
1628 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001629 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001630 }
1631
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001632 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001633 if (r) {
1634 r = fixup_user_fault(current, current->mm, hva,
1635 FAULT_FLAG_WRITE, &unlocked);
1636 if (r)
1637 break;
1638 }
1639 if (!r)
1640 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001641 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001642 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001643 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001644out:
1645 kvfree(keys);
1646 return r;
1647}
1648
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001649/*
1650 * Base address and length must be sent at the start of each block, therefore
1651 * it's cheaper to send some clean data, as long as it's less than the size of
1652 * two longs.
1653 */
1654#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1655/* for consistency */
1656#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1657
1658/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001659 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1660 * address falls in a hole. In that case the index of one of the memslots
1661 * bordering the hole is returned.
1662 */
1663static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1664{
1665 int start = 0, end = slots->used_slots;
1666 int slot = atomic_read(&slots->lru_slot);
1667 struct kvm_memory_slot *memslots = slots->memslots;
1668
1669 if (gfn >= memslots[slot].base_gfn &&
1670 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1671 return slot;
1672
1673 while (start < end) {
1674 slot = start + (end - start) / 2;
1675
1676 if (gfn >= memslots[slot].base_gfn)
1677 end = slot;
1678 else
1679 start = slot + 1;
1680 }
1681
1682 if (gfn >= memslots[start].base_gfn &&
1683 gfn < memslots[start].base_gfn + memslots[start].npages) {
1684 atomic_set(&slots->lru_slot, start);
1685 }
1686
1687 return start;
1688}
1689
1690static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1691 u8 *res, unsigned long bufsize)
1692{
1693 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1694
1695 args->count = 0;
1696 while (args->count < bufsize) {
1697 hva = gfn_to_hva(kvm, cur_gfn);
1698 /*
1699 * We return an error if the first value was invalid, but we
1700 * return successfully if at least one value was copied.
1701 */
1702 if (kvm_is_error_hva(hva))
1703 return args->count ? 0 : -EFAULT;
1704 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1705 pgstev = 0;
1706 res[args->count++] = (pgstev >> 24) & 0x43;
1707 cur_gfn++;
1708 }
1709
1710 return 0;
1711}
1712
1713static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1714 unsigned long cur_gfn)
1715{
1716 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1717 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1718 unsigned long ofs = cur_gfn - ms->base_gfn;
1719
1720 if (ms->base_gfn + ms->npages <= cur_gfn) {
1721 slotidx--;
1722 /* If we are above the highest slot, wrap around */
1723 if (slotidx < 0)
1724 slotidx = slots->used_slots - 1;
1725
1726 ms = slots->memslots + slotidx;
1727 ofs = 0;
1728 }
1729 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1730 while ((slotidx > 0) && (ofs >= ms->npages)) {
1731 slotidx--;
1732 ms = slots->memslots + slotidx;
1733 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1734 }
1735 return ms->base_gfn + ofs;
1736}
1737
1738static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1739 u8 *res, unsigned long bufsize)
1740{
1741 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
1742 struct kvm_memslots *slots = kvm_memslots(kvm);
1743 struct kvm_memory_slot *ms;
1744
1745 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
1746 ms = gfn_to_memslot(kvm, cur_gfn);
1747 args->count = 0;
1748 args->start_gfn = cur_gfn;
1749 if (!ms)
1750 return 0;
1751 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
1752 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
1753
1754 while (args->count < bufsize) {
1755 hva = gfn_to_hva(kvm, cur_gfn);
1756 if (kvm_is_error_hva(hva))
1757 return 0;
1758 /* Decrement only if we actually flipped the bit to 0 */
1759 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
1760 atomic64_dec(&kvm->arch.cmma_dirty_pages);
1761 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1762 pgstev = 0;
1763 /* Save the value */
1764 res[args->count++] = (pgstev >> 24) & 0x43;
1765 /* If the next bit is too far away, stop. */
1766 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
1767 return 0;
1768 /* If we reached the previous "next", find the next one */
1769 if (cur_gfn == next_gfn)
1770 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
1771 /* Reached the end of memory or of the buffer, stop */
1772 if ((next_gfn >= mem_end) ||
1773 (next_gfn - args->start_gfn >= bufsize))
1774 return 0;
1775 cur_gfn++;
1776 /* Reached the end of the current memslot, take the next one. */
1777 if (cur_gfn - ms->base_gfn >= ms->npages) {
1778 ms = gfn_to_memslot(kvm, cur_gfn);
1779 if (!ms)
1780 return 0;
1781 }
1782 }
1783 return 0;
1784}
1785
1786/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001787 * This function searches for the next page with dirty CMMA attributes, and
1788 * saves the attributes in the buffer up to either the end of the buffer or
1789 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
1790 * no trailing clean bytes are saved.
1791 * In case no dirty bits were found, or if CMMA was not enabled or used, the
1792 * output buffer will indicate 0 as length.
1793 */
1794static int kvm_s390_get_cmma_bits(struct kvm *kvm,
1795 struct kvm_s390_cmma_log *args)
1796{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001797 unsigned long bufsize;
1798 int srcu_idx, peek, ret;
1799 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001800
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001801 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001802 return -ENXIO;
1803 /* Invalid/unsupported flags were specified */
1804 if (args->flags & ~KVM_S390_CMMA_PEEK)
1805 return -EINVAL;
1806 /* Migration mode query, and we are not doing a migration */
1807 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001808 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001809 return -EINVAL;
1810 /* CMMA is disabled or was not used, or the buffer has length zero */
1811 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001812 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001813 memset(args, 0, sizeof(*args));
1814 return 0;
1815 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001816 /* We are not peeking, and there are no dirty pages */
1817 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
1818 memset(args, 0, sizeof(*args));
1819 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001820 }
1821
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001822 values = vmalloc(bufsize);
1823 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001824 return -ENOMEM;
1825
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001826 down_read(&kvm->mm->mmap_sem);
1827 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001828 if (peek)
1829 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
1830 else
1831 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001832 srcu_read_unlock(&kvm->srcu, srcu_idx);
1833 up_read(&kvm->mm->mmap_sem);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001834
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001835 if (kvm->arch.migration_mode)
1836 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
1837 else
1838 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001839
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001840 if (copy_to_user((void __user *)args->values, values, args->count))
1841 ret = -EFAULT;
1842
1843 vfree(values);
1844 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001845}
1846
1847/*
1848 * This function sets the CMMA attributes for the given pages. If the input
1849 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001850 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001851 */
1852static int kvm_s390_set_cmma_bits(struct kvm *kvm,
1853 const struct kvm_s390_cmma_log *args)
1854{
1855 unsigned long hva, mask, pgstev, i;
1856 uint8_t *bits;
1857 int srcu_idx, r = 0;
1858
1859 mask = args->mask;
1860
1861 if (!kvm->arch.use_cmma)
1862 return -ENXIO;
1863 /* invalid/unsupported flags */
1864 if (args->flags != 0)
1865 return -EINVAL;
1866 /* Enforce sane limit on memory allocation */
1867 if (args->count > KVM_S390_CMMA_SIZE_MAX)
1868 return -EINVAL;
1869 /* Nothing to do */
1870 if (args->count == 0)
1871 return 0;
1872
Kees Cook42bc47b2018-06-12 14:27:11 -07001873 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001874 if (!bits)
1875 return -ENOMEM;
1876
1877 r = copy_from_user(bits, (void __user *)args->values, args->count);
1878 if (r) {
1879 r = -EFAULT;
1880 goto out;
1881 }
1882
1883 down_read(&kvm->mm->mmap_sem);
1884 srcu_idx = srcu_read_lock(&kvm->srcu);
1885 for (i = 0; i < args->count; i++) {
1886 hva = gfn_to_hva(kvm, args->start_gfn + i);
1887 if (kvm_is_error_hva(hva)) {
1888 r = -EFAULT;
1889 break;
1890 }
1891
1892 pgstev = bits[i];
1893 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02001894 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001895 set_pgste_bits(kvm->mm, hva, mask, pgstev);
1896 }
1897 srcu_read_unlock(&kvm->srcu, srcu_idx);
1898 up_read(&kvm->mm->mmap_sem);
1899
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001900 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001901 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001902 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001903 up_write(&kvm->mm->mmap_sem);
1904 }
1905out:
1906 vfree(bits);
1907 return r;
1908}
1909
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001910long kvm_arch_vm_ioctl(struct file *filp,
1911 unsigned int ioctl, unsigned long arg)
1912{
1913 struct kvm *kvm = filp->private_data;
1914 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001915 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001916 int r;
1917
1918 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001919 case KVM_S390_INTERRUPT: {
1920 struct kvm_s390_interrupt s390int;
1921
1922 r = -EFAULT;
1923 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1924 break;
1925 r = kvm_s390_inject_vm(kvm, &s390int);
1926 break;
1927 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001928 case KVM_ENABLE_CAP: {
1929 struct kvm_enable_cap cap;
1930 r = -EFAULT;
1931 if (copy_from_user(&cap, argp, sizeof(cap)))
1932 break;
1933 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1934 break;
1935 }
Cornelia Huck84223592013-07-15 13:36:01 +02001936 case KVM_CREATE_IRQCHIP: {
1937 struct kvm_irq_routing_entry routing;
1938
1939 r = -EINVAL;
1940 if (kvm->arch.use_irqchip) {
1941 /* Set up dummy routing. */
1942 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001943 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001944 }
1945 break;
1946 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001947 case KVM_SET_DEVICE_ATTR: {
1948 r = -EFAULT;
1949 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1950 break;
1951 r = kvm_s390_vm_set_attr(kvm, &attr);
1952 break;
1953 }
1954 case KVM_GET_DEVICE_ATTR: {
1955 r = -EFAULT;
1956 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1957 break;
1958 r = kvm_s390_vm_get_attr(kvm, &attr);
1959 break;
1960 }
1961 case KVM_HAS_DEVICE_ATTR: {
1962 r = -EFAULT;
1963 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1964 break;
1965 r = kvm_s390_vm_has_attr(kvm, &attr);
1966 break;
1967 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001968 case KVM_S390_GET_SKEYS: {
1969 struct kvm_s390_skeys args;
1970
1971 r = -EFAULT;
1972 if (copy_from_user(&args, argp,
1973 sizeof(struct kvm_s390_skeys)))
1974 break;
1975 r = kvm_s390_get_skeys(kvm, &args);
1976 break;
1977 }
1978 case KVM_S390_SET_SKEYS: {
1979 struct kvm_s390_skeys args;
1980
1981 r = -EFAULT;
1982 if (copy_from_user(&args, argp,
1983 sizeof(struct kvm_s390_skeys)))
1984 break;
1985 r = kvm_s390_set_skeys(kvm, &args);
1986 break;
1987 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001988 case KVM_S390_GET_CMMA_BITS: {
1989 struct kvm_s390_cmma_log args;
1990
1991 r = -EFAULT;
1992 if (copy_from_user(&args, argp, sizeof(args)))
1993 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001994 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001995 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001996 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001997 if (!r) {
1998 r = copy_to_user(argp, &args, sizeof(args));
1999 if (r)
2000 r = -EFAULT;
2001 }
2002 break;
2003 }
2004 case KVM_S390_SET_CMMA_BITS: {
2005 struct kvm_s390_cmma_log args;
2006
2007 r = -EFAULT;
2008 if (copy_from_user(&args, argp, sizeof(args)))
2009 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002010 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002011 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002012 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002013 break;
2014 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002015 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002016 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002017 }
2018
2019 return r;
2020}
2021
Tony Krowiak45c9b472015-01-13 11:33:26 -05002022static int kvm_s390_apxa_installed(void)
2023{
Tony Krowiake585b242018-09-25 19:16:18 -04002024 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002025
Tony Krowiake585b242018-09-25 19:16:18 -04002026 if (ap_instructions_available()) {
2027 if (ap_qci(&info) == 0)
2028 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002029 }
2030
2031 return 0;
2032}
2033
Tony Krowiake585b242018-09-25 19:16:18 -04002034/*
2035 * The format of the crypto control block (CRYCB) is specified in the 3 low
2036 * order bits of the CRYCB designation (CRYCBD) field as follows:
2037 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2038 * AP extended addressing (APXA) facility are installed.
2039 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2040 * Format 2: Both the APXA and MSAX3 facilities are installed
2041 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002042static void kvm_s390_set_crycb_format(struct kvm *kvm)
2043{
2044 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2045
Tony Krowiake585b242018-09-25 19:16:18 -04002046 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2047 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2048
2049 /* Check whether MSAX3 is installed */
2050 if (!test_kvm_facility(kvm, 76))
2051 return;
2052
Tony Krowiak45c9b472015-01-13 11:33:26 -05002053 if (kvm_s390_apxa_installed())
2054 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2055 else
2056 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2057}
2058
Tony Krowiak421045982018-09-25 19:16:25 -04002059void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2060{
2061 mutex_lock(&kvm->lock);
2062 kvm_s390_vcpu_block_all(kvm);
2063
2064 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2065 sizeof(kvm->arch.crypto.crycb->apcb0));
2066 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2067 sizeof(kvm->arch.crypto.crycb->apcb1));
2068
Pierre Morel6cc571b2018-09-25 19:16:30 -04002069 /* recreate the shadow crycb for each vcpu */
2070 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002071 kvm_s390_vcpu_unblock_all(kvm);
2072 mutex_unlock(&kvm->lock);
2073}
2074EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2075
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002076static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002077{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002078 struct cpuid cpuid;
2079
2080 get_cpu_id(&cpuid);
2081 cpuid.version = 0xff;
2082 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002083}
2084
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002085static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002086{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002087 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002088 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002089
Tony Krowiake585b242018-09-25 19:16:18 -04002090 if (!test_kvm_facility(kvm, 76))
2091 return;
2092
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002093 /* Enable AES/DEA protected key functions by default */
2094 kvm->arch.crypto.aes_kw = 1;
2095 kvm->arch.crypto.dea_kw = 1;
2096 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2097 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2098 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2099 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002100}
2101
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002102static void sca_dispose(struct kvm *kvm)
2103{
2104 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002105 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002106 else
2107 free_page((unsigned long)(kvm->arch.sca));
2108 kvm->arch.sca = NULL;
2109}
2110
Carsten Ottee08b9632012-01-04 10:25:20 +01002111int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002112{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002113 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002114 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002115 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002116 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002117
Carsten Ottee08b9632012-01-04 10:25:20 +01002118 rc = -EINVAL;
2119#ifdef CONFIG_KVM_S390_UCONTROL
2120 if (type & ~KVM_VM_S390_UCONTROL)
2121 goto out_err;
2122 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2123 goto out_err;
2124#else
2125 if (type)
2126 goto out_err;
2127#endif
2128
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002129 rc = s390_enable_sie();
2130 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002131 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002132
Carsten Otteb2904112011-10-18 12:27:13 +02002133 rc = -ENOMEM;
2134
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002135 if (!sclp.has_64bscao)
2136 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002137 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002138 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002139 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002140 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002141 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002142 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002143 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002144 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002145 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002146 kvm->arch.sca = (struct bsca_block *)
2147 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002148 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002149
2150 sprintf(debug_name, "kvm-%u", current->pid);
2151
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002152 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002153 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002154 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002155
Michael Mueller19114be2017-05-30 14:26:02 +02002156 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002157 kvm->arch.sie_page2 =
2158 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2159 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002160 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002161
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002162 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002163
2164 for (i = 0; i < kvm_s390_fac_size(); i++) {
2165 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2166 (kvm_s390_fac_base[i] |
2167 kvm_s390_fac_ext[i]);
2168 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2169 kvm_s390_fac_base[i];
2170 }
Michael Mueller981467c2015-02-24 13:51:04 +01002171
David Hildenbrand19352222017-08-29 16:31:08 +02002172 /* we are always in czam mode - even on pre z14 machines */
2173 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2174 set_kvm_facility(kvm->arch.model.fac_list, 138);
2175 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002176 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2177 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002178 if (MACHINE_HAS_TLB_GUEST) {
2179 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2180 set_kvm_facility(kvm->arch.model.fac_list, 147);
2181 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002182
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002183 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002184 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002185
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002186 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002187
Fei Li51978392017-02-17 17:06:26 +08002188 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002189 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002190 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2191 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002192 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002193 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002194
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002195 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002196 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002197
Carsten Ottee08b9632012-01-04 10:25:20 +01002198 if (type & KVM_VM_S390_UCONTROL) {
2199 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002200 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002201 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002202 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002203 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002204 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002205 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002206 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002207 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002208 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002209 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002210 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002211 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002212 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002213
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002214 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002215 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002216 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002217 kvm_s390_vsie_init(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002218 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002219 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002220
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002221 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002222out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002223 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002224 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002225 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002226 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002227 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002228}
2229
Luiz Capitulino235539b2016-09-07 14:47:23 -04002230bool kvm_arch_has_vcpu_debugfs(void)
2231{
2232 return false;
2233}
2234
2235int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2236{
2237 return 0;
2238}
2239
Christian Borntraegerd329c032008-11-26 14:50:27 +01002240void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2241{
2242 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002243 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002244 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002245 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002246 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002247 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002248
2249 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002250 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002251
Dominik Dingele6db1d62015-05-07 15:41:57 +02002252 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002253 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002254 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002255
Christian Borntraeger6692cef2008-11-26 14:51:08 +01002256 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02002257 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002258}
2259
2260static void kvm_free_vcpus(struct kvm *kvm)
2261{
2262 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002263 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002264
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002265 kvm_for_each_vcpu(i, vcpu, kvm)
2266 kvm_arch_vcpu_destroy(vcpu);
2267
2268 mutex_lock(&kvm->lock);
2269 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2270 kvm->vcpus[i] = NULL;
2271
2272 atomic_set(&kvm->online_vcpus, 0);
2273 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002274}
2275
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002276void kvm_arch_destroy_vm(struct kvm *kvm)
2277{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002278 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002279 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002280 debug_unregister(kvm->arch.dbf);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002281 kvm_s390_gisa_destroy(kvm);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002282 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002283 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002284 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002285 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002286 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002287 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002288 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002289}
2290
2291/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002292static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2293{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002294 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002295 if (!vcpu->arch.gmap)
2296 return -ENOMEM;
2297 vcpu->arch.gmap->private = vcpu->kvm;
2298
2299 return 0;
2300}
2301
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002302static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2303{
David Hildenbranda6940672016-08-08 22:39:32 +02002304 if (!kvm_s390_use_sca_entries())
2305 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002306 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002307 if (vcpu->kvm->arch.use_esca) {
2308 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002309
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002310 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002311 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002312 } else {
2313 struct bsca_block *sca = vcpu->kvm->arch.sca;
2314
2315 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002316 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002317 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002318 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002319}
2320
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002321static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002322{
David Hildenbranda6940672016-08-08 22:39:32 +02002323 if (!kvm_s390_use_sca_entries()) {
2324 struct bsca_block *sca = vcpu->kvm->arch.sca;
2325
2326 /* we still need the basic sca for the ipte control */
2327 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2328 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002329 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002330 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002331 read_lock(&vcpu->kvm->arch.sca_lock);
2332 if (vcpu->kvm->arch.use_esca) {
2333 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002334
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002335 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002336 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2337 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002338 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002339 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002340 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002341 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002342
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002343 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002344 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2345 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002346 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002347 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002348 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002349}
2350
2351/* Basic SCA to Extended SCA data copy routines */
2352static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2353{
2354 d->sda = s->sda;
2355 d->sigp_ctrl.c = s->sigp_ctrl.c;
2356 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2357}
2358
2359static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2360{
2361 int i;
2362
2363 d->ipte_control = s->ipte_control;
2364 d->mcn[0] = s->mcn;
2365 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2366 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2367}
2368
2369static int sca_switch_to_extended(struct kvm *kvm)
2370{
2371 struct bsca_block *old_sca = kvm->arch.sca;
2372 struct esca_block *new_sca;
2373 struct kvm_vcpu *vcpu;
2374 unsigned int vcpu_idx;
2375 u32 scaol, scaoh;
2376
2377 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2378 if (!new_sca)
2379 return -ENOMEM;
2380
2381 scaoh = (u32)((u64)(new_sca) >> 32);
2382 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2383
2384 kvm_s390_vcpu_block_all(kvm);
2385 write_lock(&kvm->arch.sca_lock);
2386
2387 sca_copy_b_to_e(new_sca, old_sca);
2388
2389 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2390 vcpu->arch.sie_block->scaoh = scaoh;
2391 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002392 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002393 }
2394 kvm->arch.sca = new_sca;
2395 kvm->arch.use_esca = 1;
2396
2397 write_unlock(&kvm->arch.sca_lock);
2398 kvm_s390_vcpu_unblock_all(kvm);
2399
2400 free_page((unsigned long)old_sca);
2401
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002402 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2403 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002404 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002405}
2406
2407static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2408{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002409 int rc;
2410
David Hildenbranda6940672016-08-08 22:39:32 +02002411 if (!kvm_s390_use_sca_entries()) {
2412 if (id < KVM_MAX_VCPUS)
2413 return true;
2414 return false;
2415 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002416 if (id < KVM_S390_BSCA_CPU_SLOTS)
2417 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002418 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002419 return false;
2420
2421 mutex_lock(&kvm->lock);
2422 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2423 mutex_unlock(&kvm->lock);
2424
2425 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002426}
2427
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002428int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2429{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002430 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2431 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002432 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2433 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002434 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002435 KVM_SYNC_CRS |
2436 KVM_SYNC_ARCH0 |
2437 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002438 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002439 if (test_kvm_facility(vcpu->kvm, 64))
2440 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002441 if (test_kvm_facility(vcpu->kvm, 82))
2442 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002443 if (test_kvm_facility(vcpu->kvm, 133))
2444 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002445 if (test_kvm_facility(vcpu->kvm, 156))
2446 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002447 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2448 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2449 */
2450 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002451 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002452 else
2453 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002454
2455 if (kvm_is_ucontrol(vcpu->kvm))
2456 return __kvm_ucontrol_vcpu_init(vcpu);
2457
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002458 return 0;
2459}
2460
David Hildenbranddb0758b2016-02-15 09:42:25 +01002461/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2462static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2463{
2464 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002465 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002466 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002467 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002468}
2469
2470/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2471static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2472{
2473 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002474 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002475 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2476 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002477 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002478}
2479
2480/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2481static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2482{
2483 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2484 vcpu->arch.cputm_enabled = true;
2485 __start_cpu_timer_accounting(vcpu);
2486}
2487
2488/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2489static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2490{
2491 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2492 __stop_cpu_timer_accounting(vcpu);
2493 vcpu->arch.cputm_enabled = false;
2494}
2495
2496static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2497{
2498 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2499 __enable_cpu_timer_accounting(vcpu);
2500 preempt_enable();
2501}
2502
2503static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2504{
2505 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2506 __disable_cpu_timer_accounting(vcpu);
2507 preempt_enable();
2508}
2509
David Hildenbrand4287f242016-02-15 09:40:12 +01002510/* set the cpu timer - may only be called from the VCPU thread itself */
2511void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2512{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002513 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002514 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002515 if (vcpu->arch.cputm_enabled)
2516 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002517 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002518 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002519 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002520}
2521
David Hildenbranddb0758b2016-02-15 09:42:25 +01002522/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002523__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2524{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002525 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002526 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002527
2528 if (unlikely(!vcpu->arch.cputm_enabled))
2529 return vcpu->arch.sie_block->cputm;
2530
David Hildenbrand9c23a132016-02-17 21:53:33 +01002531 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2532 do {
2533 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2534 /*
2535 * If the writer would ever execute a read in the critical
2536 * section, e.g. in irq context, we have a deadlock.
2537 */
2538 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2539 value = vcpu->arch.sie_block->cputm;
2540 /* if cputm_start is 0, accounting is being started/stopped */
2541 if (likely(vcpu->arch.cputm_start))
2542 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2543 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2544 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002545 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002546}
2547
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002548void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2549{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002550
David Hildenbrand37d9df92015-03-11 16:47:33 +01002551 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002552 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002553 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002554 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002555 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002556}
2557
2558void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2559{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002560 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002561 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002562 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01002563 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002564 vcpu->arch.enabled_gmap = gmap_get_enabled();
2565 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002566
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002567}
2568
2569static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2570{
2571 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2572 vcpu->arch.sie_block->gpsw.mask = 0UL;
2573 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002574 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002575 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002576 vcpu->arch.sie_block->ckc = 0UL;
2577 vcpu->arch.sie_block->todpr = 0;
2578 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
David Hildenbrandb9224cd2018-04-30 17:55:24 +02002579 vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 |
2580 CR0_INTERRUPT_KEY_SUBMASK |
2581 CR0_MEASUREMENT_ALERT_SUBMASK;
2582 vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
2583 CR14_UNUSED_33 |
2584 CR14_EXTERNAL_DAMAGE_SUBMASK;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002585 /* make sure the new fpc will be lazily loaded */
2586 save_fpu_regs();
2587 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002588 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002589 vcpu->arch.sie_block->pp = 0;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002590 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002591 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2592 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002593 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2594 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002595 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002596}
2597
Dominik Dingel31928aa2014-12-04 15:47:07 +01002598void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002599{
Jason J. Herne72f25022014-11-25 09:46:02 -05002600 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002601 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002602 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01002603 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02002604 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002605 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002606 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002607 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002608 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002609 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002610 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2611 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002612 /* make vcpu_load load the right gmap on the first trigger */
2613 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002614}
2615
Tony Krowiak5102ee82014-06-27 14:46:01 -04002616static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2617{
Tony Krowiake585b242018-09-25 19:16:18 -04002618 /*
2619 * If the AP instructions are not being interpreted and the MSAX3
2620 * facility is not configured for the guest, there is nothing to set up.
2621 */
2622 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002623 return;
2624
Tony Krowiake585b242018-09-25 19:16:18 -04002625 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02002626 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04002627 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Tony Krowiaka374e892014-09-03 10:13:53 +02002628
Tony Krowiake585b242018-09-25 19:16:18 -04002629 if (vcpu->kvm->arch.crypto.apie)
2630 vcpu->arch.sie_block->eca |= ECA_APIE;
2631
2632 /* Set up protected key support */
Tony Krowiaka374e892014-09-03 10:13:53 +02002633 if (vcpu->kvm->arch.crypto.aes_kw)
2634 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2635 if (vcpu->kvm->arch.crypto.dea_kw)
2636 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04002637}
2638
Dominik Dingelb31605c2014-03-25 13:47:11 +01002639void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2640{
2641 free_page(vcpu->arch.sie_block->cbrlo);
2642 vcpu->arch.sie_block->cbrlo = 0;
2643}
2644
2645int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2646{
2647 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2648 if (!vcpu->arch.sie_block->cbrlo)
2649 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002650 return 0;
2651}
2652
Michael Mueller91520f12015-02-27 14:32:11 +01002653static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2654{
2655 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2656
Michael Mueller91520f12015-02-27 14:32:11 +01002657 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002658 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002659 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002660}
2661
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002662int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2663{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002664 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002665
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002666 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2667 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002668 CPUSTAT_STOPPED);
2669
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002670 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002671 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002672 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002673 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002674
Michael Mueller91520f12015-02-27 14:32:11 +01002675 kvm_s390_vcpu_setup_model(vcpu);
2676
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002677 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2678 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002679 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002680 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002681 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002682 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002683 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002684
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002685 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002686 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002687 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002688 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2689 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002690 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002691 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002692 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002693 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002694 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002695 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002696 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002697 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002698 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002699 vcpu->arch.sie_block->eca |= ECA_VX;
2700 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002701 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04002702 if (test_kvm_facility(vcpu->kvm, 139))
2703 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002704 if (test_kvm_facility(vcpu->kvm, 156))
2705 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002706 if (vcpu->arch.sie_block->gd) {
2707 vcpu->arch.sie_block->eca |= ECA_AIV;
2708 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
2709 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
2710 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002711 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2712 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002713 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002714
2715 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002716 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05002717 else
2718 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002719
Dominik Dingele6db1d62015-05-07 15:41:57 +02002720 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002721 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2722 if (rc)
2723 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002724 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01002725 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002726 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002727
Tony Krowiak5102ee82014-06-27 14:46:01 -04002728 kvm_s390_vcpu_crypto_setup(vcpu);
2729
Dominik Dingelb31605c2014-03-25 13:47:11 +01002730 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002731}
2732
2733struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2734 unsigned int id)
2735{
Carsten Otte4d475552011-10-18 12:27:12 +02002736 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002737 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02002738 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002739
David Hildenbrand42158252015-10-12 12:57:22 +02002740 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02002741 goto out;
2742
2743 rc = -ENOMEM;
2744
Michael Muellerb110fea2013-06-12 13:54:54 +02002745 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002746 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02002747 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002748
QingFeng Haoda72ca42017-06-07 11:41:19 +02002749 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002750 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2751 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002752 goto out_free_cpu;
2753
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002754 vcpu->arch.sie_block = &sie_page->sie_block;
2755 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2756
David Hildenbrandefed1102015-04-16 12:32:41 +02002757 /* the real guest size will always be smaller than msl */
2758 vcpu->arch.sie_block->mso = 0;
2759 vcpu->arch.sie_block->msl = sclp.hamax;
2760
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002761 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002762 spin_lock_init(&vcpu->arch.local_int.lock);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002763 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa;
Michael Mueller4b9f9522017-06-23 13:51:25 +02002764 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
2765 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002766 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002767
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002768 rc = kvm_vcpu_init(vcpu, kvm, id);
2769 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002770 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002771 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002772 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02002773 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002774
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002775 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08002776out_free_sie_block:
2777 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002778out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02002779 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02002780out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002781 return ERR_PTR(rc);
2782}
2783
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002784int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2785{
David Hildenbrand9a022062014-08-05 17:40:47 +02002786 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002787}
2788
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002789bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
2790{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08002791 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002792}
2793
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002794void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002795{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002796 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002797 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002798}
2799
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002800void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002801{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002802 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002803}
2804
Christian Borntraeger8e236542015-04-09 13:49:04 +02002805static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2806{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002807 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002808 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002809}
2810
David Hildenbrand9ea59722018-09-25 19:16:16 -04002811bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
2812{
2813 return atomic_read(&vcpu->arch.sie_block->prog20) &
2814 (PROG_BLOCK_SIE | PROG_REQUEST);
2815}
2816
Christian Borntraeger8e236542015-04-09 13:49:04 +02002817static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2818{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04002819 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002820}
2821
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002822/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04002823 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002824 * If the CPU is not running (e.g. waiting as idle) the function will
2825 * return immediately. */
2826void exit_sie(struct kvm_vcpu *vcpu)
2827{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002828 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04002829 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002830 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2831 cpu_relax();
2832}
2833
Christian Borntraeger8e236542015-04-09 13:49:04 +02002834/* Kick a guest cpu out of SIE to process a request synchronously */
2835void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002836{
Christian Borntraeger8e236542015-04-09 13:49:04 +02002837 kvm_make_request(req, vcpu);
2838 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002839}
2840
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002841static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2842 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002843{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002844 struct kvm *kvm = gmap->private;
2845 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002846 unsigned long prefix;
2847 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002848
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02002849 if (gmap_is_shadow(gmap))
2850 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002851 if (start >= 1UL << 31)
2852 /* We are only interested in prefix pages */
2853 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002854 kvm_for_each_vcpu(i, vcpu, kvm) {
2855 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002856 prefix = kvm_s390_get_prefix(vcpu);
2857 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2858 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2859 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002860 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002861 }
2862 }
2863}
2864
Christoffer Dallb6d33832012-03-08 16:44:24 -05002865int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2866{
2867 /* kvm common code refers to this, but never calls it */
2868 BUG();
2869 return 0;
2870}
2871
Carsten Otte14eebd92012-05-15 14:15:26 +02002872static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2873 struct kvm_one_reg *reg)
2874{
2875 int r = -EINVAL;
2876
2877 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002878 case KVM_REG_S390_TODPR:
2879 r = put_user(vcpu->arch.sie_block->todpr,
2880 (u32 __user *)reg->addr);
2881 break;
2882 case KVM_REG_S390_EPOCHDIFF:
2883 r = put_user(vcpu->arch.sie_block->epoch,
2884 (u64 __user *)reg->addr);
2885 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002886 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002887 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002888 (u64 __user *)reg->addr);
2889 break;
2890 case KVM_REG_S390_CLOCK_COMP:
2891 r = put_user(vcpu->arch.sie_block->ckc,
2892 (u64 __user *)reg->addr);
2893 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002894 case KVM_REG_S390_PFTOKEN:
2895 r = put_user(vcpu->arch.pfault_token,
2896 (u64 __user *)reg->addr);
2897 break;
2898 case KVM_REG_S390_PFCOMPARE:
2899 r = put_user(vcpu->arch.pfault_compare,
2900 (u64 __user *)reg->addr);
2901 break;
2902 case KVM_REG_S390_PFSELECT:
2903 r = put_user(vcpu->arch.pfault_select,
2904 (u64 __user *)reg->addr);
2905 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002906 case KVM_REG_S390_PP:
2907 r = put_user(vcpu->arch.sie_block->pp,
2908 (u64 __user *)reg->addr);
2909 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002910 case KVM_REG_S390_GBEA:
2911 r = put_user(vcpu->arch.sie_block->gbea,
2912 (u64 __user *)reg->addr);
2913 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002914 default:
2915 break;
2916 }
2917
2918 return r;
2919}
2920
2921static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2922 struct kvm_one_reg *reg)
2923{
2924 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002925 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002926
2927 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002928 case KVM_REG_S390_TODPR:
2929 r = get_user(vcpu->arch.sie_block->todpr,
2930 (u32 __user *)reg->addr);
2931 break;
2932 case KVM_REG_S390_EPOCHDIFF:
2933 r = get_user(vcpu->arch.sie_block->epoch,
2934 (u64 __user *)reg->addr);
2935 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002936 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002937 r = get_user(val, (u64 __user *)reg->addr);
2938 if (!r)
2939 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002940 break;
2941 case KVM_REG_S390_CLOCK_COMP:
2942 r = get_user(vcpu->arch.sie_block->ckc,
2943 (u64 __user *)reg->addr);
2944 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002945 case KVM_REG_S390_PFTOKEN:
2946 r = get_user(vcpu->arch.pfault_token,
2947 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002948 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2949 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002950 break;
2951 case KVM_REG_S390_PFCOMPARE:
2952 r = get_user(vcpu->arch.pfault_compare,
2953 (u64 __user *)reg->addr);
2954 break;
2955 case KVM_REG_S390_PFSELECT:
2956 r = get_user(vcpu->arch.pfault_select,
2957 (u64 __user *)reg->addr);
2958 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002959 case KVM_REG_S390_PP:
2960 r = get_user(vcpu->arch.sie_block->pp,
2961 (u64 __user *)reg->addr);
2962 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002963 case KVM_REG_S390_GBEA:
2964 r = get_user(vcpu->arch.sie_block->gbea,
2965 (u64 __user *)reg->addr);
2966 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002967 default:
2968 break;
2969 }
2970
2971 return r;
2972}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002973
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002974static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2975{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002976 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002977 return 0;
2978}
2979
2980int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2981{
Christoffer Dall875656f2017-12-04 21:35:27 +01002982 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002983 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01002984 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002985 return 0;
2986}
2987
2988int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2989{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01002990 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002991 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01002992 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002993 return 0;
2994}
2995
2996int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2997 struct kvm_sregs *sregs)
2998{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01002999 vcpu_load(vcpu);
3000
Christian Borntraeger59674c12012-01-11 11:20:33 +01003001 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003002 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003003
3004 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003005 return 0;
3006}
3007
3008int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3009 struct kvm_sregs *sregs)
3010{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003011 vcpu_load(vcpu);
3012
Christian Borntraeger59674c12012-01-11 11:20:33 +01003013 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003014 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003015
3016 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003017 return 0;
3018}
3019
3020int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3021{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003022 int ret = 0;
3023
3024 vcpu_load(vcpu);
3025
3026 if (test_fp_ctl(fpu->fpc)) {
3027 ret = -EINVAL;
3028 goto out;
3029 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003030 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003031 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003032 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3033 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003034 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003035 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003036
3037out:
3038 vcpu_put(vcpu);
3039 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003040}
3041
3042int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3043{
Christoffer Dall13931232017-12-04 21:35:34 +01003044 vcpu_load(vcpu);
3045
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003046 /* make sure we have the latest values */
3047 save_fpu_regs();
3048 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003049 convert_vx_to_fp((freg_t *) fpu->fprs,
3050 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003051 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003052 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003053 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003054
3055 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003056 return 0;
3057}
3058
3059static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3060{
3061 int rc = 0;
3062
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003063 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003064 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003065 else {
3066 vcpu->run->psw_mask = psw.mask;
3067 vcpu->run->psw_addr = psw.addr;
3068 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003069 return rc;
3070}
3071
3072int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3073 struct kvm_translation *tr)
3074{
3075 return -EINVAL; /* not implemented yet */
3076}
3077
David Hildenbrand27291e22014-01-23 12:26:52 +01003078#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3079 KVM_GUESTDBG_USE_HW_BP | \
3080 KVM_GUESTDBG_ENABLE)
3081
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003082int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3083 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003084{
David Hildenbrand27291e22014-01-23 12:26:52 +01003085 int rc = 0;
3086
Christoffer Dall66b56562017-12-04 21:35:33 +01003087 vcpu_load(vcpu);
3088
David Hildenbrand27291e22014-01-23 12:26:52 +01003089 vcpu->guest_debug = 0;
3090 kvm_s390_clear_bp_data(vcpu);
3091
Christoffer Dall66b56562017-12-04 21:35:33 +01003092 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3093 rc = -EINVAL;
3094 goto out;
3095 }
3096 if (!sclp.has_gpere) {
3097 rc = -EINVAL;
3098 goto out;
3099 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003100
3101 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3102 vcpu->guest_debug = dbg->control;
3103 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003104 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003105
3106 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3107 rc = kvm_s390_import_bp_data(vcpu, dbg);
3108 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003109 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003110 vcpu->arch.guestdbg.last_bp = 0;
3111 }
3112
3113 if (rc) {
3114 vcpu->guest_debug = 0;
3115 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003116 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003117 }
3118
Christoffer Dall66b56562017-12-04 21:35:33 +01003119out:
3120 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003121 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003122}
3123
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003124int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3125 struct kvm_mp_state *mp_state)
3126{
Christoffer Dallfd232562017-12-04 21:35:30 +01003127 int ret;
3128
3129 vcpu_load(vcpu);
3130
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003131 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003132 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3133 KVM_MP_STATE_OPERATING;
3134
3135 vcpu_put(vcpu);
3136 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003137}
3138
3139int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3140 struct kvm_mp_state *mp_state)
3141{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003142 int rc = 0;
3143
Christoffer Dalle83dff52017-12-04 21:35:31 +01003144 vcpu_load(vcpu);
3145
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003146 /* user space knows about this interface - let it control the state */
3147 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3148
3149 switch (mp_state->mp_state) {
3150 case KVM_MP_STATE_STOPPED:
3151 kvm_s390_vcpu_stop(vcpu);
3152 break;
3153 case KVM_MP_STATE_OPERATING:
3154 kvm_s390_vcpu_start(vcpu);
3155 break;
3156 case KVM_MP_STATE_LOAD:
3157 case KVM_MP_STATE_CHECK_STOP:
3158 /* fall through - CHECK_STOP and LOAD are not supported yet */
3159 default:
3160 rc = -ENXIO;
3161 }
3162
Christoffer Dalle83dff52017-12-04 21:35:31 +01003163 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003164 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003165}
3166
David Hildenbrand8ad35752014-03-14 11:00:21 +01003167static bool ibs_enabled(struct kvm_vcpu *vcpu)
3168{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003169 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003170}
3171
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003172static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3173{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003174retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003175 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003176 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003177 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003178 /*
3179 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003180 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003181 * This ensures that the ipte instruction for this request has
3182 * already finished. We might race against a second unmapper that
3183 * wants to set the blocking bit. Lets just retry the request loop.
3184 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003185 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003186 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003187 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3188 kvm_s390_get_prefix(vcpu),
3189 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003190 if (rc) {
3191 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003192 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003193 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003194 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003195 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003196
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003197 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3198 vcpu->arch.sie_block->ihcpu = 0xffff;
3199 goto retry;
3200 }
3201
David Hildenbrand8ad35752014-03-14 11:00:21 +01003202 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3203 if (!ibs_enabled(vcpu)) {
3204 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003205 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003206 }
3207 goto retry;
3208 }
3209
3210 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3211 if (ibs_enabled(vcpu)) {
3212 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003213 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003214 }
3215 goto retry;
3216 }
3217
David Hildenbrand6502a342016-06-21 14:19:51 +02003218 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3219 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3220 goto retry;
3221 }
3222
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003223 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3224 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003225 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003226 * instruction manually, in order to provide additional
3227 * functionalities needed for live migration.
3228 */
3229 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3230 goto retry;
3231 }
3232
3233 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3234 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003235 * Re-enable CMM virtualization if CMMA is available and
3236 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003237 */
3238 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003239 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003240 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3241 goto retry;
3242 }
3243
David Hildenbrand0759d062014-05-13 16:54:32 +02003244 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003245 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003246 /* we left the vsie handler, nothing to do, just clear the request */
3247 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003248
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003249 return 0;
3250}
3251
David Hildenbrand0e7def52018-02-07 12:46:43 +01003252void kvm_s390_set_tod_clock(struct kvm *kvm,
3253 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003254{
3255 struct kvm_vcpu *vcpu;
3256 struct kvm_s390_tod_clock_ext htod;
3257 int i;
3258
3259 mutex_lock(&kvm->lock);
3260 preempt_disable();
3261
3262 get_tod_clock_ext((char *)&htod);
3263
3264 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003265 kvm->arch.epdx = 0;
3266 if (test_kvm_facility(kvm, 139)) {
3267 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3268 if (kvm->arch.epoch > gtod->tod)
3269 kvm->arch.epdx -= 1;
3270 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003271
3272 kvm_s390_vcpu_block_all(kvm);
3273 kvm_for_each_vcpu(i, vcpu, kvm) {
3274 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3275 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3276 }
3277
3278 kvm_s390_vcpu_unblock_all(kvm);
3279 preempt_enable();
3280 mutex_unlock(&kvm->lock);
3281}
3282
Thomas Huthfa576c52014-05-06 17:20:16 +02003283/**
3284 * kvm_arch_fault_in_page - fault-in guest page if necessary
3285 * @vcpu: The corresponding virtual cpu
3286 * @gpa: Guest physical address
3287 * @writable: Whether the page should be writable or not
3288 *
3289 * Make sure that a guest page has been faulted-in on the host.
3290 *
3291 * Return: Zero on success, negative error code otherwise.
3292 */
3293long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003294{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003295 return gmap_fault(vcpu->arch.gmap, gpa,
3296 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003297}
3298
Dominik Dingel3c038e62013-10-07 17:11:48 +02003299static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3300 unsigned long token)
3301{
3302 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003303 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003304
3305 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003306 irq.u.ext.ext_params2 = token;
3307 irq.type = KVM_S390_INT_PFAULT_INIT;
3308 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003309 } else {
3310 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003311 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003312 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3313 }
3314}
3315
3316void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3317 struct kvm_async_pf *work)
3318{
3319 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3320 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3321}
3322
3323void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3324 struct kvm_async_pf *work)
3325{
3326 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3327 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3328}
3329
3330void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3331 struct kvm_async_pf *work)
3332{
3333 /* s390 will always inject the page directly */
3334}
3335
3336bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3337{
3338 /*
3339 * s390 will always inject the page directly,
3340 * but we still want check_async_completion to cleanup
3341 */
3342 return true;
3343}
3344
3345static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3346{
3347 hva_t hva;
3348 struct kvm_arch_async_pf arch;
3349 int rc;
3350
3351 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3352 return 0;
3353 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3354 vcpu->arch.pfault_compare)
3355 return 0;
3356 if (psw_extint_disabled(vcpu))
3357 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003358 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003359 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003360 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003361 return 0;
3362 if (!vcpu->arch.gmap->pfault_enabled)
3363 return 0;
3364
Heiko Carstens81480cc2014-01-01 16:36:07 +01003365 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3366 hva += current->thread.gmap_addr & ~PAGE_MASK;
3367 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003368 return 0;
3369
3370 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3371 return rc;
3372}
3373
Thomas Huth3fb4c402013-09-12 10:33:43 +02003374static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003375{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003376 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003377
Dominik Dingel3c038e62013-10-07 17:11:48 +02003378 /*
3379 * On s390 notifications for arriving pages will be delivered directly
3380 * to the guest but the house keeping for completed pfaults is
3381 * handled outside the worker.
3382 */
3383 kvm_check_async_pf_completion(vcpu);
3384
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003385 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3386 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003387
3388 if (need_resched())
3389 schedule();
3390
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003391 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003392 s390_handle_mcck();
3393
Jens Freimann79395032014-04-17 10:10:30 +02003394 if (!kvm_is_ucontrol(vcpu->kvm)) {
3395 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3396 if (rc)
3397 return rc;
3398 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003399
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003400 rc = kvm_s390_handle_requests(vcpu);
3401 if (rc)
3402 return rc;
3403
David Hildenbrand27291e22014-01-23 12:26:52 +01003404 if (guestdbg_enabled(vcpu)) {
3405 kvm_s390_backup_guest_per_regs(vcpu);
3406 kvm_s390_patch_guest_per_regs(vcpu);
3407 }
3408
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003409 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003410 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3411 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3412 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003413
Thomas Huth3fb4c402013-09-12 10:33:43 +02003414 return 0;
3415}
3416
Thomas Huth492d8642015-02-10 16:11:01 +01003417static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3418{
David Hildenbrand56317922016-01-12 17:37:58 +01003419 struct kvm_s390_pgm_info pgm_info = {
3420 .code = PGM_ADDRESSING,
3421 };
3422 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003423 int rc;
3424
3425 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3426 trace_kvm_s390_sie_fault(vcpu);
3427
3428 /*
3429 * We want to inject an addressing exception, which is defined as a
3430 * suppressing or terminating exception. However, since we came here
3431 * by a DAT access exception, the PSW still points to the faulting
3432 * instruction since DAT exceptions are nullifying. So we've got
3433 * to look up the current opcode to get the length of the instruction
3434 * to be able to forward the PSW.
3435 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003436 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003437 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003438 if (rc < 0) {
3439 return rc;
3440 } else if (rc) {
3441 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3442 * Forward by arbitrary ilc, injection will take care of
3443 * nullification if necessary.
3444 */
3445 pgm_info = vcpu->arch.pgm;
3446 ilen = 4;
3447 }
David Hildenbrand56317922016-01-12 17:37:58 +01003448 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3449 kvm_s390_forward_psw(vcpu, ilen);
3450 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003451}
3452
Thomas Huth3fb4c402013-09-12 10:33:43 +02003453static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3454{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003455 struct mcck_volatile_info *mcck_info;
3456 struct sie_page *sie_page;
3457
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003458 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3459 vcpu->arch.sie_block->icptcode);
3460 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3461
David Hildenbrand27291e22014-01-23 12:26:52 +01003462 if (guestdbg_enabled(vcpu))
3463 kvm_s390_restore_guest_per_regs(vcpu);
3464
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003465 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3466 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003467
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003468 if (exit_reason == -EINTR) {
3469 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3470 sie_page = container_of(vcpu->arch.sie_block,
3471 struct sie_page, sie_block);
3472 mcck_info = &sie_page->mcck_info;
3473 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3474 return 0;
3475 }
3476
David Hildenbrand71f116b2015-10-19 16:24:28 +02003477 if (vcpu->arch.sie_block->icptcode > 0) {
3478 int rc = kvm_handle_sie_intercept(vcpu);
3479
3480 if (rc != -EOPNOTSUPP)
3481 return rc;
3482 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3483 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3484 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3485 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3486 return -EREMOTE;
3487 } else if (exit_reason != -EFAULT) {
3488 vcpu->stat.exit_null++;
3489 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003490 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3491 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3492 vcpu->run->s390_ucontrol.trans_exc_code =
3493 current->thread.gmap_addr;
3494 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003495 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003496 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003497 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003498 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003499 if (kvm_arch_setup_async_pf(vcpu))
3500 return 0;
3501 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003502 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003503 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003504}
3505
3506static int __vcpu_run(struct kvm_vcpu *vcpu)
3507{
3508 int rc, exit_reason;
3509
Thomas Huth800c1062013-09-12 10:33:45 +02003510 /*
3511 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3512 * ning the guest), so that memslots (and other stuff) are protected
3513 */
3514 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3515
Thomas Hutha76ccff2013-09-12 10:33:44 +02003516 do {
3517 rc = vcpu_pre_run(vcpu);
3518 if (rc)
3519 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003520
Thomas Huth800c1062013-09-12 10:33:45 +02003521 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003522 /*
3523 * As PF_VCPU will be used in fault handler, between
3524 * guest_enter and guest_exit should be no uaccess.
3525 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003526 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003527 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003528 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003529 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003530 exit_reason = sie64a(vcpu->arch.sie_block,
3531 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003532 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003533 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003534 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003535 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003536 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003537
Thomas Hutha76ccff2013-09-12 10:33:44 +02003538 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003539 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003540
Thomas Huth800c1062013-09-12 10:33:45 +02003541 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003542 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003543}
3544
David Hildenbrandb028ee32014-07-17 10:47:43 +02003545static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3546{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003547 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003548 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003549
3550 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003551 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003552 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3553 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3554 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3555 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3556 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3557 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003558 /* some control register changes require a tlb flush */
3559 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003560 }
3561 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003562 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003563 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3564 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3565 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3566 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3567 }
3568 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3569 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3570 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3571 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003572 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3573 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003574 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003575 /*
3576 * If userspace sets the riccb (e.g. after migration) to a valid state,
3577 * we should enable RI here instead of doing the lazy enablement.
3578 */
3579 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003580 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003581 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003582 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003583 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003584 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003585 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003586 /*
3587 * If userspace sets the gscb (e.g. after migration) to non-zero,
3588 * we should enable GS here instead of doing the lazy enablement.
3589 */
3590 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3591 test_kvm_facility(vcpu->kvm, 133) &&
3592 gscb->gssm &&
3593 !vcpu->arch.gs_enabled) {
3594 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3595 vcpu->arch.sie_block->ecb |= ECB_GS;
3596 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3597 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003598 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003599 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3600 test_kvm_facility(vcpu->kvm, 82)) {
3601 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3602 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3603 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003604 save_access_regs(vcpu->arch.host_acrs);
3605 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003606 /* save host (userspace) fprs/vrs */
3607 save_fpu_regs();
3608 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3609 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3610 if (MACHINE_HAS_VX)
3611 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3612 else
3613 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3614 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3615 if (test_fp_ctl(current->thread.fpu.fpc))
3616 /* User space provided an invalid FPC, let's clear it */
3617 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003618 if (MACHINE_HAS_GS) {
3619 preempt_disable();
3620 __ctl_set_bit(2, 4);
3621 if (current->thread.gs_cb) {
3622 vcpu->arch.host_gscb = current->thread.gs_cb;
3623 save_gs_cb(vcpu->arch.host_gscb);
3624 }
3625 if (vcpu->arch.gs_enabled) {
3626 current->thread.gs_cb = (struct gs_cb *)
3627 &vcpu->run->s.regs.gscb;
3628 restore_gs_cb(current->thread.gs_cb);
3629 }
3630 preempt_enable();
3631 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003632 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Fan Zhang80cd8762016-08-15 04:53:22 +02003633
David Hildenbrandb028ee32014-07-17 10:47:43 +02003634 kvm_run->kvm_dirty_regs = 0;
3635}
3636
3637static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3638{
3639 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3640 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3641 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3642 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003643 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003644 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3645 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3646 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3647 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3648 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3649 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3650 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003651 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003652 save_access_regs(vcpu->run->s.regs.acrs);
3653 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003654 /* Save guest register state */
3655 save_fpu_regs();
3656 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3657 /* Restore will be done lazily at return */
3658 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3659 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003660 if (MACHINE_HAS_GS) {
3661 __ctl_set_bit(2, 4);
3662 if (vcpu->arch.gs_enabled)
3663 save_gs_cb(current->thread.gs_cb);
3664 preempt_disable();
3665 current->thread.gs_cb = vcpu->arch.host_gscb;
3666 restore_gs_cb(vcpu->arch.host_gscb);
3667 preempt_enable();
3668 if (!vcpu->arch.host_gscb)
3669 __ctl_clear_bit(2, 4);
3670 vcpu->arch.host_gscb = NULL;
3671 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003672 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02003673}
3674
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003675int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3676{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003677 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003678
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003679 if (kvm_run->immediate_exit)
3680 return -EINTR;
3681
Christoffer Dallaccb7572017-12-04 21:35:25 +01003682 vcpu_load(vcpu);
3683
David Hildenbrand27291e22014-01-23 12:26:52 +01003684 if (guestdbg_exit_pending(vcpu)) {
3685 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01003686 rc = 0;
3687 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01003688 }
3689
Jan H. Schönherr20b70352017-11-24 22:39:01 +01003690 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003691
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003692 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3693 kvm_s390_vcpu_start(vcpu);
3694 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003695 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003696 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01003697 rc = -EINVAL;
3698 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003699 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003700
David Hildenbrandb028ee32014-07-17 10:47:43 +02003701 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003702 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003703
Heiko Carstensdab4079d2009-06-12 10:26:32 +02003704 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003705 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02003706
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003707 if (signal_pending(current) && !rc) {
3708 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003709 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003710 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003711
David Hildenbrand27291e22014-01-23 12:26:52 +01003712 if (guestdbg_exit_pending(vcpu) && !rc) {
3713 kvm_s390_prepare_debug_exit(vcpu);
3714 rc = 0;
3715 }
3716
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003717 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02003718 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003719 rc = 0;
3720 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003721
David Hildenbranddb0758b2016-02-15 09:42:25 +01003722 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003723 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003724
Jan H. Schönherr20b70352017-11-24 22:39:01 +01003725 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003726
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003727 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01003728out:
3729 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02003730 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003731}
3732
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003733/*
3734 * store status at address
3735 * we use have two special cases:
3736 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3737 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3738 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01003739int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003740{
Carsten Otte092670c2011-07-24 10:48:22 +02003741 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003742 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02003743 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01003744 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003745 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003746
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003747 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01003748 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
3749 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003750 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003751 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003752 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
3753 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003754 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003755 gpa = px;
3756 } else
3757 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003758
3759 /* manually convert vector registers if necessary */
3760 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01003761 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003762 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3763 fprs, 128);
3764 } else {
3765 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01003766 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003767 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003768 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003769 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003770 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003771 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003772 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02003773 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003774 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003775 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003776 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003777 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01003778 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003779 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01003780 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01003781 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003782 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003783 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003784 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003785 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003786 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003787 &vcpu->arch.sie_block->gcr, 128);
3788 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003789}
3790
Thomas Huthe8798922013-11-06 15:46:33 +01003791int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
3792{
3793 /*
3794 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003795 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01003796 * it into the save area
3797 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02003798 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003799 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01003800 save_access_regs(vcpu->run->s.regs.acrs);
3801
3802 return kvm_s390_store_status_unloaded(vcpu, addr);
3803}
3804
David Hildenbrand8ad35752014-03-14 11:00:21 +01003805static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3806{
3807 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003808 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003809}
3810
3811static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
3812{
3813 unsigned int i;
3814 struct kvm_vcpu *vcpu;
3815
3816 kvm_for_each_vcpu(i, vcpu, kvm) {
3817 __disable_ibs_on_vcpu(vcpu);
3818 }
3819}
3820
3821static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3822{
David Hildenbrand09a400e2016-04-04 15:57:08 +02003823 if (!sclp.has_ibs)
3824 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01003825 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003826 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003827}
3828
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003829void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
3830{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003831 int i, online_vcpus, started_vcpus = 0;
3832
3833 if (!is_vcpu_stopped(vcpu))
3834 return;
3835
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003836 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003837 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003838 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003839 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3840
3841 for (i = 0; i < online_vcpus; i++) {
3842 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
3843 started_vcpus++;
3844 }
3845
3846 if (started_vcpus == 0) {
3847 /* we're the only active VCPU -> speed it up */
3848 __enable_ibs_on_vcpu(vcpu);
3849 } else if (started_vcpus == 1) {
3850 /*
3851 * As we are starting a second VCPU, we have to disable
3852 * the IBS facility on all VCPUs to remove potentially
3853 * oustanding ENABLE requests.
3854 */
3855 __disable_ibs_on_all_vcpus(vcpu->kvm);
3856 }
3857
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003858 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003859 /*
3860 * Another VCPU might have used IBS while we were offline.
3861 * Let's play safe and flush the VCPU at startup.
3862 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003863 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003864 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003865 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003866}
3867
3868void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
3869{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003870 int i, online_vcpus, started_vcpus = 0;
3871 struct kvm_vcpu *started_vcpu = NULL;
3872
3873 if (is_vcpu_stopped(vcpu))
3874 return;
3875
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003876 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003877 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003878 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003879 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3880
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003881 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02003882 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003883
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003884 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003885 __disable_ibs_on_vcpu(vcpu);
3886
3887 for (i = 0; i < online_vcpus; i++) {
3888 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3889 started_vcpus++;
3890 started_vcpu = vcpu->kvm->vcpus[i];
3891 }
3892 }
3893
3894 if (started_vcpus == 1) {
3895 /*
3896 * As we only have one VCPU left, we want to enable the
3897 * IBS facility for that VCPU to speed it up.
3898 */
3899 __enable_ibs_on_vcpu(started_vcpu);
3900 }
3901
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003902 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003903 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003904}
3905
Cornelia Huckd6712df2012-12-20 15:32:11 +01003906static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3907 struct kvm_enable_cap *cap)
3908{
3909 int r;
3910
3911 if (cap->flags)
3912 return -EINVAL;
3913
3914 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003915 case KVM_CAP_S390_CSS_SUPPORT:
3916 if (!vcpu->kvm->arch.css_support) {
3917 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02003918 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003919 trace_kvm_s390_enable_css(vcpu->kvm);
3920 }
3921 r = 0;
3922 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01003923 default:
3924 r = -EINVAL;
3925 break;
3926 }
3927 return r;
3928}
3929
Thomas Huth41408c282015-02-06 15:01:21 +01003930static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3931 struct kvm_s390_mem_op *mop)
3932{
3933 void __user *uaddr = (void __user *)mop->buf;
3934 void *tmpbuf = NULL;
3935 int r, srcu_idx;
3936 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3937 | KVM_S390_MEMOP_F_CHECK_ONLY;
3938
3939 if (mop->flags & ~supported_flags)
3940 return -EINVAL;
3941
3942 if (mop->size > MEM_OP_MAX_SIZE)
3943 return -E2BIG;
3944
3945 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3946 tmpbuf = vmalloc(mop->size);
3947 if (!tmpbuf)
3948 return -ENOMEM;
3949 }
3950
3951 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3952
3953 switch (mop->op) {
3954 case KVM_S390_MEMOP_LOGICAL_READ:
3955 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003956 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3957 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01003958 break;
3959 }
3960 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3961 if (r == 0) {
3962 if (copy_to_user(uaddr, tmpbuf, mop->size))
3963 r = -EFAULT;
3964 }
3965 break;
3966 case KVM_S390_MEMOP_LOGICAL_WRITE:
3967 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003968 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3969 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01003970 break;
3971 }
3972 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3973 r = -EFAULT;
3974 break;
3975 }
3976 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3977 break;
3978 default:
3979 r = -EINVAL;
3980 }
3981
3982 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3983
3984 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3985 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3986
3987 vfree(tmpbuf);
3988 return r;
3989}
3990
Paolo Bonzini5cb09442017-12-12 17:41:34 +01003991long kvm_arch_vcpu_async_ioctl(struct file *filp,
3992 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003993{
3994 struct kvm_vcpu *vcpu = filp->private_data;
3995 void __user *argp = (void __user *)arg;
3996
Avi Kivity93736622010-05-13 12:35:17 +03003997 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01003998 case KVM_S390_IRQ: {
3999 struct kvm_s390_irq s390irq;
4000
Jens Freimann47b43c52014-11-11 20:57:06 +01004001 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004002 return -EFAULT;
4003 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004004 }
Avi Kivity93736622010-05-13 12:35:17 +03004005 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004006 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02004007 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01004008
4009 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004010 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004011 if (s390int_to_s390irq(&s390int, &s390irq))
4012 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004013 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004014 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004015 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004016 return -ENOIOCTLCMD;
4017}
4018
4019long kvm_arch_vcpu_ioctl(struct file *filp,
4020 unsigned int ioctl, unsigned long arg)
4021{
4022 struct kvm_vcpu *vcpu = filp->private_data;
4023 void __user *argp = (void __user *)arg;
4024 int idx;
4025 long r;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004026
4027 vcpu_load(vcpu);
4028
4029 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004030 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004031 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004032 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004033 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004034 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004035 case KVM_S390_SET_INITIAL_PSW: {
4036 psw_t psw;
4037
Avi Kivitybc923cc2010-05-13 12:21:46 +03004038 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004039 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004040 break;
4041 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4042 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004043 }
4044 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03004045 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4046 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004047 case KVM_SET_ONE_REG:
4048 case KVM_GET_ONE_REG: {
4049 struct kvm_one_reg reg;
4050 r = -EFAULT;
4051 if (copy_from_user(&reg, argp, sizeof(reg)))
4052 break;
4053 if (ioctl == KVM_SET_ONE_REG)
4054 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4055 else
4056 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4057 break;
4058 }
Carsten Otte27e03932012-01-04 10:25:21 +01004059#ifdef CONFIG_KVM_S390_UCONTROL
4060 case KVM_S390_UCAS_MAP: {
4061 struct kvm_s390_ucas_mapping ucasmap;
4062
4063 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4064 r = -EFAULT;
4065 break;
4066 }
4067
4068 if (!kvm_is_ucontrol(vcpu->kvm)) {
4069 r = -EINVAL;
4070 break;
4071 }
4072
4073 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4074 ucasmap.vcpu_addr, ucasmap.length);
4075 break;
4076 }
4077 case KVM_S390_UCAS_UNMAP: {
4078 struct kvm_s390_ucas_mapping ucasmap;
4079
4080 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4081 r = -EFAULT;
4082 break;
4083 }
4084
4085 if (!kvm_is_ucontrol(vcpu->kvm)) {
4086 r = -EINVAL;
4087 break;
4088 }
4089
4090 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4091 ucasmap.length);
4092 break;
4093 }
4094#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004095 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004096 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004097 break;
4098 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004099 case KVM_ENABLE_CAP:
4100 {
4101 struct kvm_enable_cap cap;
4102 r = -EFAULT;
4103 if (copy_from_user(&cap, argp, sizeof(cap)))
4104 break;
4105 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4106 break;
4107 }
Thomas Huth41408c282015-02-06 15:01:21 +01004108 case KVM_S390_MEM_OP: {
4109 struct kvm_s390_mem_op mem_op;
4110
4111 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4112 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4113 else
4114 r = -EFAULT;
4115 break;
4116 }
Jens Freimann816c7662014-11-24 17:13:46 +01004117 case KVM_S390_SET_IRQ_STATE: {
4118 struct kvm_s390_irq_state irq_state;
4119
4120 r = -EFAULT;
4121 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4122 break;
4123 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4124 irq_state.len == 0 ||
4125 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4126 r = -EINVAL;
4127 break;
4128 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004129 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004130 r = kvm_s390_set_irq_state(vcpu,
4131 (void __user *) irq_state.buf,
4132 irq_state.len);
4133 break;
4134 }
4135 case KVM_S390_GET_IRQ_STATE: {
4136 struct kvm_s390_irq_state irq_state;
4137
4138 r = -EFAULT;
4139 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4140 break;
4141 if (irq_state.len == 0) {
4142 r = -EINVAL;
4143 break;
4144 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004145 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004146 r = kvm_s390_get_irq_state(vcpu,
4147 (__u8 __user *) irq_state.buf,
4148 irq_state.len);
4149 break;
4150 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004151 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004152 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004153 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004154
4155 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004156 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004157}
4158
Souptick Joarder1499fa82018-04-19 00:49:58 +05304159vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004160{
4161#ifdef CONFIG_KVM_S390_UCONTROL
4162 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4163 && (kvm_is_ucontrol(vcpu->kvm))) {
4164 vmf->page = virt_to_page(vcpu->arch.sie_block);
4165 get_page(vmf->page);
4166 return 0;
4167 }
4168#endif
4169 return VM_FAULT_SIGBUS;
4170}
4171
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05304172int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4173 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09004174{
4175 return 0;
4176}
4177
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004178/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004179int kvm_arch_prepare_memory_region(struct kvm *kvm,
4180 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004181 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004182 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004183{
Nick Wangdd2887e2013-03-25 17:22:57 +01004184 /* A few sanity checks. We can have memory slots which have to be
4185 located/ended at a segment boundary (1MB). The memory in userland is
4186 ok to be fragmented into various different vmas. It is okay to mmap()
4187 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004188
Carsten Otte598841c2011-07-24 10:48:21 +02004189 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004190 return -EINVAL;
4191
Carsten Otte598841c2011-07-24 10:48:21 +02004192 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004193 return -EINVAL;
4194
Dominik Dingela3a92c32014-12-01 17:24:42 +01004195 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4196 return -EINVAL;
4197
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004198 return 0;
4199}
4200
4201void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004202 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004203 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004204 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004205 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004206{
Carsten Ottef7850c92011-07-24 10:48:23 +02004207 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004208
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01004209 /* If the basics of the memslot do not change, we do not want
4210 * to update the gmap. Every update causes several unnecessary
4211 * segment translation exceptions. This is usually handled just
4212 * fine by the normal fault handler + gmap, but it will also
4213 * cause faults on the prefix page of running guest CPUs.
4214 */
4215 if (old->userspace_addr == mem->userspace_addr &&
4216 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
4217 old->npages * PAGE_SIZE == mem->memory_size)
4218 return;
Carsten Otte598841c2011-07-24 10:48:21 +02004219
4220 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4221 mem->guest_phys_addr, mem->memory_size);
4222 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004223 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02004224 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004225}
4226
Alexander Yarygin60a37702016-04-01 15:38:57 +03004227static inline unsigned long nonhyp_mask(int i)
4228{
4229 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4230
4231 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4232}
4233
Christian Borntraeger3491caf2016-05-13 12:16:35 +02004234void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4235{
4236 vcpu->valid_wakeup = false;
4237}
4238
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004239static int __init kvm_s390_init(void)
4240{
Alexander Yarygin60a37702016-04-01 15:38:57 +03004241 int i;
4242
David Hildenbrand07197fd2015-01-30 16:01:38 +01004243 if (!sclp.has_sief2) {
4244 pr_info("SIE not available\n");
4245 return -ENODEV;
4246 }
4247
Janosch Franka4499382018-07-13 11:28:31 +01004248 if (nested && hpage) {
4249 pr_info("nested (vSIE) and hpage (huge page backing) can currently not be activated concurrently");
4250 return -EINVAL;
4251 }
4252
Alexander Yarygin60a37702016-04-01 15:38:57 +03004253 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00004254 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03004255 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4256
Michael Mueller9d8d5782015-02-02 15:42:51 +01004257 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004258}
4259
4260static void __exit kvm_s390_exit(void)
4261{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004262 kvm_exit();
4263}
4264
4265module_init(kvm_s390_init);
4266module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02004267
4268/*
4269 * Enable autoloading of the kvm module.
4270 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4271 * since x86 takes a different approach.
4272 */
4273#include <linux/miscdevice.h>
4274MODULE_ALIAS_MISCDEV(KVM_MINOR);
4275MODULE_ALIAS("devname:kvm");