blob: 7e4a982bfea3c472c296509b24256ae8af9bff53 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Janosch Frank3e6c5562019-10-02 04:46:58 -04005 * Copyright IBM Corp. 2008, 2020
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010047#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010048#include "gaccess.h"
49
Cornelia Huck5786fff2012-07-23 17:20:29 +020050#define CREATE_TRACE_POINTS
51#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020052#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020053
Thomas Huth41408c282015-02-06 15:01:21 +010054#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010055#define LOCAL_IRQS 32
56#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010058
Heiko Carstensb0c632d2008-03-25 18:47:20 +010059#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Christian Borntraegerccc40c52018-03-08 12:48:05 +000060#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061
62struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020064 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010065 { "exit_validity", VCPU_STAT(exit_validity) },
66 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 { "exit_external_request", VCPU_STAT(exit_external_request) },
Christian Borntraegera5e0ace2018-02-23 07:57:33 +000068 { "exit_io_request", VCPU_STAT(exit_io_request) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010069 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030071 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020074 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010075 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020076 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020077 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
Christian Borntraeger8b905d22019-03-05 05:30:02 -050078 { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020079 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020080 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010082 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000084 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
85 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010086 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020087 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010088 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000089 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010090 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
91 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
92 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000093 { "deliver_program", VCPU_STAT(deliver_program) },
94 { "deliver_io", VCPU_STAT(deliver_io) },
QingFeng Hao32de0742018-03-02 11:56:47 +010095 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010096 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000097 { "inject_ckc", VCPU_STAT(inject_ckc) },
98 { "inject_cputm", VCPU_STAT(inject_cputm) },
99 { "inject_external_call", VCPU_STAT(inject_external_call) },
100 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
101 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
102 { "inject_io", VM_STAT(inject_io) },
103 { "inject_mchk", VCPU_STAT(inject_mchk) },
104 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
105 { "inject_program", VCPU_STAT(inject_program) },
106 { "inject_restart", VCPU_STAT(inject_restart) },
107 { "inject_service_signal", VM_STAT(inject_service_signal) },
108 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
109 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
110 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
111 { "inject_virtio", VM_STAT(inject_virtio) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100112 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
113 { "instruction_gs", VCPU_STAT(instruction_gs) },
114 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
115 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
116 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200117 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100118 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100119 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100120 { "instruction_sck", VCPU_STAT(instruction_sck) },
121 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100122 { "instruction_spx", VCPU_STAT(instruction_spx) },
123 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
124 { "instruction_stap", VCPU_STAT(instruction_stap) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100125 { "instruction_iske", VCPU_STAT(instruction_iske) },
126 { "instruction_ri", VCPU_STAT(instruction_ri) },
127 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
128 { "instruction_sske", VCPU_STAT(instruction_sske) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100129 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200130 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100131 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
132 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100133 { "instruction_tb", VCPU_STAT(instruction_tb) },
134 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200135 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100136 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200137 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200138 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100139 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100140 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200141 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100142 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200143 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
144 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100145 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200146 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
147 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500148 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100149 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
150 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
151 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200152 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
153 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
154 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100155 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
156 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
157 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger8474e5c2019-02-15 13:47:20 +0100158 { "diag_9c_ignored", VCPU_STAT(diagnose_9c_ignored) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100159 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
160 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
161 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100162 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100163 { NULL }
164};
165
Collin L. Walling8fa16962016-07-26 15:29:44 -0400166struct kvm_s390_tod_clock_ext {
167 __u8 epoch_idx;
168 __u64 tod;
169 __u8 reserved[7];
170} __packed;
171
David Hildenbranda411edf2016-02-02 15:41:22 +0100172/* allow nested virtualization in KVM (if enabled by user space) */
173static int nested;
174module_param(nested, int, S_IRUGO);
175MODULE_PARM_DESC(nested, "Nested virtualization support");
176
Janosch Franka4499382018-07-13 11:28:31 +0100177/* allow 1m huge page guest backing, if !nested */
178static int hpage;
179module_param(hpage, int, 0444);
180MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100181
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500182/* maximum percentage of steal time for polling. >100 is treated like 100 */
183static u8 halt_poll_max_steal = 10;
184module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000185MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500186
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000187/*
188 * For now we handle at most 16 double words as this is what the s390 base
189 * kernel handles and stores in the prefix page. If we ever need to go beyond
190 * this, this requires changes to code, but the external uapi can stay.
191 */
192#define SIZE_INTERNAL 16
193
194/*
195 * Base feature mask that defines default mask for facilities. Consists of the
196 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
197 */
198static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
199/*
200 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
201 * and defines the facilities that can be enabled via a cpu model.
202 */
203static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
204
205static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200206{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000207 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
208 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
209 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
210 sizeof(S390_lowcore.stfle_fac_list));
211
212 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200213}
214
David Hildenbrand15c97052015-03-19 17:36:43 +0100215/* available cpu features supported by kvm */
216static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200217/* available subfunctions indicated via query / "test bit" */
218static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100219
Michael Mueller9d8d5782015-02-02 15:42:51 +0100220static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200221static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200222debug_info_t *kvm_s390_dbf;
Janosch Frank3e6c5562019-10-02 04:46:58 -0400223debug_info_t *kvm_s390_dbf_uv;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100224
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100225/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200226int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100227{
228 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200229 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100230}
231
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700232int kvm_arch_check_processor_compat(void)
233{
234 return 0;
235}
236
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100237static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
238 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200239
David Hildenbrand15757672018-02-07 12:46:45 +0100240static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
241{
242 u8 delta_idx = 0;
243
244 /*
245 * The TOD jumps by delta, we have to compensate this by adding
246 * -delta to the epoch.
247 */
248 delta = -delta;
249
250 /* sign-extension - we're adding to signed values below */
251 if ((s64)delta < 0)
252 delta_idx = -1;
253
254 scb->epoch += delta;
255 if (scb->ecd & ECD_MEF) {
256 scb->epdx += delta_idx;
257 if (scb->epoch < delta)
258 scb->epdx += 1;
259 }
260}
261
Fan Zhangfdf03652015-05-13 10:58:41 +0200262/*
263 * This callback is executed during stop_machine(). All CPUs are therefore
264 * temporarily stopped. In order not to change guest behavior, we have to
265 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
266 * so a CPU won't be stopped while calculating with the epoch.
267 */
268static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
269 void *v)
270{
271 struct kvm *kvm;
272 struct kvm_vcpu *vcpu;
273 int i;
274 unsigned long long *delta = v;
275
276 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200277 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100278 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
279 if (i == 0) {
280 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
281 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
282 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100283 if (vcpu->arch.cputm_enabled)
284 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100285 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100286 kvm_clock_sync_scb(vcpu->arch.vsie_block,
287 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200288 }
289 }
290 return NOTIFY_OK;
291}
292
293static struct notifier_block kvm_clock_notifier = {
294 .notifier_call = kvm_clock_sync,
295};
296
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100297int kvm_arch_hardware_setup(void)
298{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200299 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100300 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200301 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
302 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200303 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
304 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100305 return 0;
306}
307
308void kvm_arch_hardware_unsetup(void)
309{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100310 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200311 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200312 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
313 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100314}
315
David Hildenbrand22be5a12016-01-21 13:22:54 +0100316static void allow_cpu_feat(unsigned long nr)
317{
318 set_bit_inv(nr, kvm_s390_available_cpu_feat);
319}
320
David Hildenbrand0a763c72016-05-18 16:03:47 +0200321static inline int plo_test_bit(unsigned char nr)
322{
323 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100324 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200325
326 asm volatile(
327 /* Parameter registers are ignored for "test bit" */
328 " plo 0,0,0,0(0)\n"
329 " ipm %0\n"
330 " srl %0,28\n"
331 : "=d" (cc)
332 : "d" (r0)
333 : "cc");
334 return cc == 0;
335}
336
Heiko Carstensd0dea732019-10-02 14:34:37 +0200337static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
Christian Borntraegerd6681392019-02-20 03:04:07 -0500338{
339 register unsigned long r0 asm("0") = 0; /* query function */
340 register unsigned long r1 asm("1") = (unsigned long) query;
341
342 asm volatile(
343 /* Parameter regs are ignored */
344 " .insn rrf,%[opc] << 16,2,4,6,0\n"
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200345 :
Christian Borntraegerd6681392019-02-20 03:04:07 -0500346 : "d" (r0), "a" (r1), [opc] "i" (opcode)
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200347 : "cc", "memory");
Christian Borntraegerd6681392019-02-20 03:04:07 -0500348}
349
Christian Borntraeger173aec22018-12-28 10:59:06 +0100350#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100351#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100352
David Hildenbrand22be5a12016-01-21 13:22:54 +0100353static void kvm_s390_cpu_feat_init(void)
354{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200355 int i;
356
357 for (i = 0; i < 256; ++i) {
358 if (plo_test_bit(i))
359 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
360 }
361
362 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400363 ptff(kvm_s390_available_subfunc.ptff,
364 sizeof(kvm_s390_available_subfunc.ptff),
365 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200366
367 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200368 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
369 kvm_s390_available_subfunc.kmac);
370 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
371 kvm_s390_available_subfunc.kmc);
372 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
373 kvm_s390_available_subfunc.km);
374 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
375 kvm_s390_available_subfunc.kimd);
376 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
377 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200378 }
379 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200380 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
381 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200382 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200383 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
384 kvm_s390_available_subfunc.kmctr);
385 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
386 kvm_s390_available_subfunc.kmf);
387 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
388 kvm_s390_available_subfunc.kmo);
389 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
390 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200391 }
392 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100393 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200394 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200395
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400396 if (test_facility(146)) /* MSA8 */
397 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
398 kvm_s390_available_subfunc.kma);
399
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100400 if (test_facility(155)) /* MSA9 */
401 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
402 kvm_s390_available_subfunc.kdsa);
403
Christian Borntraeger173aec22018-12-28 10:59:06 +0100404 if (test_facility(150)) /* SORTL */
405 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
406
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100407 if (test_facility(151)) /* DFLTCC */
408 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
409
David Hildenbrand22be5a12016-01-21 13:22:54 +0100410 if (MACHINE_HAS_ESOP)
411 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200412 /*
413 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
414 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
415 */
416 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100417 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200418 return;
419 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100420 if (sclp.has_64bscao)
421 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100422 if (sclp.has_siif)
423 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100424 if (sclp.has_gpere)
425 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100426 if (sclp.has_gsls)
427 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100428 if (sclp.has_ib)
429 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100430 if (sclp.has_cei)
431 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100432 if (sclp.has_ibs)
433 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500434 if (sclp.has_kss)
435 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200436 /*
437 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
438 * all skey handling functions read/set the skey from the PGSTE
439 * instead of the real storage key.
440 *
441 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
442 * pages being detected as preserved although they are resident.
443 *
444 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
445 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
446 *
447 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
448 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
449 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
450 *
451 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
452 * cannot easily shadow the SCA because of the ipte lock.
453 */
David Hildenbrand22be5a12016-01-21 13:22:54 +0100454}
455
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100456int kvm_arch_init(void *opaque)
457{
Janosch Frankf76f6372019-10-02 03:56:27 -0400458 int rc = -ENOMEM;
Michael Mueller308c3e62018-11-30 15:32:06 +0100459
Christian Borntraeger78f26132015-07-22 15:50:58 +0200460 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
461 if (!kvm_s390_dbf)
462 return -ENOMEM;
463
Janosch Frank3e6c5562019-10-02 04:46:58 -0400464 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
465 if (!kvm_s390_dbf_uv)
466 goto out;
467
468 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
469 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
Janosch Frankf76f6372019-10-02 03:56:27 -0400470 goto out;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200471
David Hildenbrand22be5a12016-01-21 13:22:54 +0100472 kvm_s390_cpu_feat_init();
473
Cornelia Huck84877d92014-09-02 10:27:35 +0100474 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100475 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
476 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100477 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Janosch Frankf76f6372019-10-02 03:56:27 -0400478 goto out;
Michael Mueller308c3e62018-11-30 15:32:06 +0100479 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100480
481 rc = kvm_s390_gib_init(GAL_ISC);
482 if (rc)
Janosch Frankf76f6372019-10-02 03:56:27 -0400483 goto out;
Michael Muellerb1d1e762019-01-31 09:52:45 +0100484
Michael Mueller308c3e62018-11-30 15:32:06 +0100485 return 0;
486
Janosch Frankf76f6372019-10-02 03:56:27 -0400487out:
488 kvm_arch_exit();
Michael Mueller308c3e62018-11-30 15:32:06 +0100489 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100490}
491
Christian Borntraeger78f26132015-07-22 15:50:58 +0200492void kvm_arch_exit(void)
493{
Michael Mueller1282c212019-01-31 09:52:40 +0100494 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200495 debug_unregister(kvm_s390_dbf);
Janosch Frank3e6c5562019-10-02 04:46:58 -0400496 debug_unregister(kvm_s390_dbf_uv);
Christian Borntraeger78f26132015-07-22 15:50:58 +0200497}
498
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100499/* Section: device related */
500long kvm_arch_dev_ioctl(struct file *filp,
501 unsigned int ioctl, unsigned long arg)
502{
503 if (ioctl == KVM_S390_ENABLE_SIE)
504 return s390_enable_sie();
505 return -EINVAL;
506}
507
Alexander Graf784aa3d2014-07-14 18:27:35 +0200508int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100509{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100510 int r;
511
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200512 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100513 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200514 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100515 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100516#ifdef CONFIG_KVM_S390_UCONTROL
517 case KVM_CAP_S390_UCONTROL:
518#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200519 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100520 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200521 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100522 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100523 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100524 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200525 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200526 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200527 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200528 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100529 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100530 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200531 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100532 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400533 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100534 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200535 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200536 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100537 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100538 case KVM_CAP_S390_AIS_MIGRATION:
Janosch Frank7de3f142020-01-31 05:02:02 -0500539 case KVM_CAP_S390_VCPU_RESETS:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100540 r = 1;
541 break;
Janosch Franka4499382018-07-13 11:28:31 +0100542 case KVM_CAP_S390_HPAGE_1M:
543 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100544 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100545 r = 1;
546 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100547 case KVM_CAP_S390_MEM_OP:
548 r = MEM_OP_MAX_SIZE;
549 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200550 case KVM_CAP_NR_VCPUS:
551 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200552 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100553 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200554 if (!kvm_s390_use_sca_entries())
555 r = KVM_MAX_VCPUS;
556 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100557 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200558 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200559 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100560 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200561 break;
Eric Farman68c55752014-06-09 10:57:26 -0400562 case KVM_CAP_S390_VECTOR_REGISTERS:
563 r = MACHINE_HAS_VX;
564 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800565 case KVM_CAP_S390_RI:
566 r = test_facility(64);
567 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100568 case KVM_CAP_S390_GS:
569 r = test_facility(133);
570 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100571 case KVM_CAP_S390_BPB:
572 r = test_facility(82);
573 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200574 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100575 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200576 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100577 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100578}
579
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400580static void kvm_s390_sync_dirty_log(struct kvm *kvm,
Janosch Frank0959e162018-07-17 13:21:22 +0100581 struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400582{
Janosch Frank0959e162018-07-17 13:21:22 +0100583 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400584 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100585 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400586 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100587 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400588
Janosch Frank0959e162018-07-17 13:21:22 +0100589 /* Loop over all guest segments */
590 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400591 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100592 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
593 gaddr = gfn_to_gpa(cur_gfn);
594 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
595 if (kvm_is_error_hva(vmaddr))
596 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400597
Janosch Frank0959e162018-07-17 13:21:22 +0100598 bitmap_zero(bitmap, _PAGE_ENTRIES);
599 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
600 for (i = 0; i < _PAGE_ENTRIES; i++) {
601 if (test_bit(i, bitmap))
602 mark_page_dirty(kvm, cur_gfn + i);
603 }
604
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100605 if (fatal_signal_pending(current))
606 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100607 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400608 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400609}
610
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100611/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200612static void sca_del_vcpu(struct kvm_vcpu *vcpu);
613
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100614/*
615 * Get (and clear) the dirty memory log for a memory slot.
616 */
617int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
618 struct kvm_dirty_log *log)
619{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400620 int r;
621 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200622 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400623 struct kvm_memory_slot *memslot;
624 int is_dirty = 0;
625
Janosch Franke1e8a962017-02-02 16:39:31 +0100626 if (kvm_is_ucontrol(kvm))
627 return -EINVAL;
628
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400629 mutex_lock(&kvm->slots_lock);
630
631 r = -EINVAL;
632 if (log->slot >= KVM_USER_MEM_SLOTS)
633 goto out;
634
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200635 slots = kvm_memslots(kvm);
636 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400637 r = -ENOENT;
638 if (!memslot->dirty_bitmap)
639 goto out;
640
641 kvm_s390_sync_dirty_log(kvm, memslot);
642 r = kvm_get_dirty_log(kvm, log, &is_dirty);
643 if (r)
644 goto out;
645
646 /* Clear the dirty log */
647 if (is_dirty) {
648 n = kvm_dirty_bitmap_bytes(memslot);
649 memset(memslot->dirty_bitmap, 0, n);
650 }
651 r = 0;
652out:
653 mutex_unlock(&kvm->slots_lock);
654 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100655}
656
David Hildenbrand6502a342016-06-21 14:19:51 +0200657static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
658{
659 unsigned int i;
660 struct kvm_vcpu *vcpu;
661
662 kvm_for_each_vcpu(i, vcpu, kvm) {
663 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
664 }
665}
666
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100667int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200668{
669 int r;
670
671 if (cap->flags)
672 return -EINVAL;
673
674 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200675 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200676 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200677 kvm->arch.use_irqchip = 1;
678 r = 0;
679 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200680 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200681 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200682 kvm->arch.user_sigp = 1;
683 r = 0;
684 break;
Eric Farman68c55752014-06-09 10:57:26 -0400685 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100686 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200687 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100688 r = -EBUSY;
689 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100690 set_kvm_facility(kvm->arch.model.fac_mask, 129);
691 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200692 if (test_facility(134)) {
693 set_kvm_facility(kvm->arch.model.fac_mask, 134);
694 set_kvm_facility(kvm->arch.model.fac_list, 134);
695 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100696 if (test_facility(135)) {
697 set_kvm_facility(kvm->arch.model.fac_mask, 135);
698 set_kvm_facility(kvm->arch.model.fac_list, 135);
699 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100700 if (test_facility(148)) {
701 set_kvm_facility(kvm->arch.model.fac_mask, 148);
702 set_kvm_facility(kvm->arch.model.fac_list, 148);
703 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100704 if (test_facility(152)) {
705 set_kvm_facility(kvm->arch.model.fac_mask, 152);
706 set_kvm_facility(kvm->arch.model.fac_list, 152);
707 }
Michael Mueller18280d82015-03-16 16:05:41 +0100708 r = 0;
709 } else
710 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100711 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200712 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
713 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400714 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800715 case KVM_CAP_S390_RI:
716 r = -EINVAL;
717 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200718 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800719 r = -EBUSY;
720 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100721 set_kvm_facility(kvm->arch.model.fac_mask, 64);
722 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800723 r = 0;
724 }
725 mutex_unlock(&kvm->lock);
726 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
727 r ? "(not available)" : "(success)");
728 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100729 case KVM_CAP_S390_AIS:
730 mutex_lock(&kvm->lock);
731 if (kvm->created_vcpus) {
732 r = -EBUSY;
733 } else {
734 set_kvm_facility(kvm->arch.model.fac_mask, 72);
735 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100736 r = 0;
737 }
738 mutex_unlock(&kvm->lock);
739 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
740 r ? "(not available)" : "(success)");
741 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100742 case KVM_CAP_S390_GS:
743 r = -EINVAL;
744 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100745 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100746 r = -EBUSY;
747 } else if (test_facility(133)) {
748 set_kvm_facility(kvm->arch.model.fac_mask, 133);
749 set_kvm_facility(kvm->arch.model.fac_list, 133);
750 r = 0;
751 }
752 mutex_unlock(&kvm->lock);
753 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
754 r ? "(not available)" : "(success)");
755 break;
Janosch Franka4499382018-07-13 11:28:31 +0100756 case KVM_CAP_S390_HPAGE_1M:
757 mutex_lock(&kvm->lock);
758 if (kvm->created_vcpus)
759 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100760 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100761 r = -EINVAL;
762 else {
763 r = 0;
Janosch Frankdf88f312018-08-30 16:14:18 +0200764 down_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100765 kvm->mm->context.allow_gmap_hpage_1m = 1;
Janosch Frankdf88f312018-08-30 16:14:18 +0200766 up_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100767 /*
768 * We might have to create fake 4k page
769 * tables. To avoid that the hardware works on
770 * stale PGSTEs, we emulate these instructions.
771 */
772 kvm->arch.use_skf = 0;
773 kvm->arch.use_pfmfi = 0;
774 }
775 mutex_unlock(&kvm->lock);
776 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
777 r ? "(not available)" : "(success)");
778 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100779 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200780 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100781 kvm->arch.user_stsi = 1;
782 r = 0;
783 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200784 case KVM_CAP_S390_USER_INSTR0:
785 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
786 kvm->arch.user_instr0 = 1;
787 icpt_operexc_on_all_vcpus(kvm);
788 r = 0;
789 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200790 default:
791 r = -EINVAL;
792 break;
793 }
794 return r;
795}
796
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100797static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
798{
799 int ret;
800
801 switch (attr->attr) {
802 case KVM_S390_VM_MEM_LIMIT_SIZE:
803 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200804 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100805 kvm->arch.mem_limit);
806 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100807 ret = -EFAULT;
808 break;
809 default:
810 ret = -ENXIO;
811 break;
812 }
813 return ret;
814}
815
816static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200817{
818 int ret;
819 unsigned int idx;
820 switch (attr->attr) {
821 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100822 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100823 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200824 break;
825
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200826 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200827 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100828 if (kvm->created_vcpus)
829 ret = -EBUSY;
830 else if (kvm->mm->context.allow_gmap_hpage_1m)
831 ret = -EINVAL;
832 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200833 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100834 /* Not compatible with cmma. */
835 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200836 ret = 0;
837 }
838 mutex_unlock(&kvm->lock);
839 break;
840 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100841 ret = -ENXIO;
842 if (!sclp.has_cmma)
843 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200844 ret = -EINVAL;
845 if (!kvm->arch.use_cmma)
846 break;
847
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200848 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200849 mutex_lock(&kvm->lock);
850 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200851 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200852 srcu_read_unlock(&kvm->srcu, idx);
853 mutex_unlock(&kvm->lock);
854 ret = 0;
855 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100856 case KVM_S390_VM_MEM_LIMIT_SIZE: {
857 unsigned long new_limit;
858
859 if (kvm_is_ucontrol(kvm))
860 return -EINVAL;
861
862 if (get_user(new_limit, (u64 __user *)attr->addr))
863 return -EFAULT;
864
Dominik Dingela3a92c32014-12-01 17:24:42 +0100865 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
866 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100867 return -E2BIG;
868
Dominik Dingela3a92c32014-12-01 17:24:42 +0100869 if (!new_limit)
870 return -EINVAL;
871
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100872 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100873 if (new_limit != KVM_S390_NO_MEM_LIMIT)
874 new_limit -= 1;
875
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100876 ret = -EBUSY;
877 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200878 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100879 /* gmap_create will round the limit up */
880 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100881
882 if (!new) {
883 ret = -ENOMEM;
884 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100885 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100886 new->private = kvm;
887 kvm->arch.gmap = new;
888 ret = 0;
889 }
890 }
891 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100892 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
893 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
894 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100895 break;
896 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200897 default:
898 ret = -ENXIO;
899 break;
900 }
901 return ret;
902}
903
Tony Krowiaka374e892014-09-03 10:13:53 +0200904static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
905
Tony Krowiak20c922f2018-04-22 11:37:03 -0400906void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200907{
908 struct kvm_vcpu *vcpu;
909 int i;
910
Tony Krowiak20c922f2018-04-22 11:37:03 -0400911 kvm_s390_vcpu_block_all(kvm);
912
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400913 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400914 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400915 /* recreate the shadow crycb by leaving the VSIE handler */
916 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
917 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400918
919 kvm_s390_vcpu_unblock_all(kvm);
920}
921
922static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
923{
Tony Krowiaka374e892014-09-03 10:13:53 +0200924 mutex_lock(&kvm->lock);
925 switch (attr->attr) {
926 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200927 if (!test_kvm_facility(kvm, 76)) {
928 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400929 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200930 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200931 get_random_bytes(
932 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
933 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
934 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200935 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200936 break;
937 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200938 if (!test_kvm_facility(kvm, 76)) {
939 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400940 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200941 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200942 get_random_bytes(
943 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
944 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
945 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200946 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200947 break;
948 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200949 if (!test_kvm_facility(kvm, 76)) {
950 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400951 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200952 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200953 kvm->arch.crypto.aes_kw = 0;
954 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
955 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200956 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200957 break;
958 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200959 if (!test_kvm_facility(kvm, 76)) {
960 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400961 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200962 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200963 kvm->arch.crypto.dea_kw = 0;
964 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
965 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200966 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200967 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400968 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
969 if (!ap_instructions_available()) {
970 mutex_unlock(&kvm->lock);
971 return -EOPNOTSUPP;
972 }
973 kvm->arch.crypto.apie = 1;
974 break;
975 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
976 if (!ap_instructions_available()) {
977 mutex_unlock(&kvm->lock);
978 return -EOPNOTSUPP;
979 }
980 kvm->arch.crypto.apie = 0;
981 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200982 default:
983 mutex_unlock(&kvm->lock);
984 return -ENXIO;
985 }
986
Tony Krowiak20c922f2018-04-22 11:37:03 -0400987 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200988 mutex_unlock(&kvm->lock);
989 return 0;
990}
991
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200992static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
993{
994 int cx;
995 struct kvm_vcpu *vcpu;
996
997 kvm_for_each_vcpu(cx, vcpu, kvm)
998 kvm_s390_sync_request(req, vcpu);
999}
1000
1001/*
1002 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001003 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001004 */
1005static int kvm_s390_vm_start_migration(struct kvm *kvm)
1006{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001007 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001008 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001009 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001010 int slotnr;
1011
1012 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001013 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001014 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001015 slots = kvm_memslots(kvm);
1016 if (!slots || !slots->used_slots)
1017 return -EINVAL;
1018
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001019 if (!kvm->arch.use_cmma) {
1020 kvm->arch.migration_mode = 1;
1021 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001022 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001023 /* mark all the pages in active slots as dirty */
1024 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1025 ms = slots->memslots + slotnr;
Igor Mammedov13a17cc2019-09-11 03:52:18 -04001026 if (!ms->dirty_bitmap)
1027 return -EINVAL;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001028 /*
1029 * The second half of the bitmap is only used on x86,
1030 * and would be wasted otherwise, so we put it to good
1031 * use here to keep track of the state of the storage
1032 * attributes.
1033 */
1034 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1035 ram_pages += ms->npages;
1036 }
1037 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1038 kvm->arch.migration_mode = 1;
1039 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001040 return 0;
1041}
1042
1043/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001044 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001045 * kvm_s390_vm_start_migration.
1046 */
1047static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1048{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001049 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001050 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001051 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001052 kvm->arch.migration_mode = 0;
1053 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001054 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001055 return 0;
1056}
1057
1058static int kvm_s390_vm_set_migration(struct kvm *kvm,
1059 struct kvm_device_attr *attr)
1060{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001061 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001062
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001063 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001064 switch (attr->attr) {
1065 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001066 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001067 break;
1068 case KVM_S390_VM_MIGRATION_STOP:
1069 res = kvm_s390_vm_stop_migration(kvm);
1070 break;
1071 default:
1072 break;
1073 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001074 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001075
1076 return res;
1077}
1078
1079static int kvm_s390_vm_get_migration(struct kvm *kvm,
1080 struct kvm_device_attr *attr)
1081{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001082 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001083
1084 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1085 return -ENXIO;
1086
1087 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1088 return -EFAULT;
1089 return 0;
1090}
1091
Collin L. Walling8fa16962016-07-26 15:29:44 -04001092static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1093{
1094 struct kvm_s390_vm_tod_clock gtod;
1095
1096 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1097 return -EFAULT;
1098
David Hildenbrand0e7def52018-02-07 12:46:43 +01001099 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001100 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001101 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001102
1103 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1104 gtod.epoch_idx, gtod.tod);
1105
1106 return 0;
1107}
1108
Jason J. Herne72f25022014-11-25 09:46:02 -05001109static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1110{
1111 u8 gtod_high;
1112
1113 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1114 sizeof(gtod_high)))
1115 return -EFAULT;
1116
1117 if (gtod_high != 0)
1118 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001119 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001120
1121 return 0;
1122}
1123
1124static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1125{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001126 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001127
David Hildenbrand0e7def52018-02-07 12:46:43 +01001128 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1129 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001130 return -EFAULT;
1131
David Hildenbrand0e7def52018-02-07 12:46:43 +01001132 kvm_s390_set_tod_clock(kvm, &gtod);
1133 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001134 return 0;
1135}
1136
1137static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1138{
1139 int ret;
1140
1141 if (attr->flags)
1142 return -EINVAL;
1143
1144 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001145 case KVM_S390_VM_TOD_EXT:
1146 ret = kvm_s390_set_tod_ext(kvm, attr);
1147 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001148 case KVM_S390_VM_TOD_HIGH:
1149 ret = kvm_s390_set_tod_high(kvm, attr);
1150 break;
1151 case KVM_S390_VM_TOD_LOW:
1152 ret = kvm_s390_set_tod_low(kvm, attr);
1153 break;
1154 default:
1155 ret = -ENXIO;
1156 break;
1157 }
1158 return ret;
1159}
1160
David Hildenbrand33d1b272018-04-27 14:36:13 +02001161static void kvm_s390_get_tod_clock(struct kvm *kvm,
1162 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001163{
1164 struct kvm_s390_tod_clock_ext htod;
1165
1166 preempt_disable();
1167
1168 get_tod_clock_ext((char *)&htod);
1169
1170 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001171 gtod->epoch_idx = 0;
1172 if (test_kvm_facility(kvm, 139)) {
1173 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1174 if (gtod->tod < htod.tod)
1175 gtod->epoch_idx += 1;
1176 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001177
1178 preempt_enable();
1179}
1180
1181static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1182{
1183 struct kvm_s390_vm_tod_clock gtod;
1184
1185 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001186 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001187 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1188 return -EFAULT;
1189
1190 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1191 gtod.epoch_idx, gtod.tod);
1192 return 0;
1193}
1194
Jason J. Herne72f25022014-11-25 09:46:02 -05001195static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1196{
1197 u8 gtod_high = 0;
1198
1199 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1200 sizeof(gtod_high)))
1201 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001202 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001203
1204 return 0;
1205}
1206
1207static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1208{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001209 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001210
David Hildenbrand60417fc2015-09-29 16:20:36 +02001211 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001212 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1213 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001214 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001215
1216 return 0;
1217}
1218
1219static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1220{
1221 int ret;
1222
1223 if (attr->flags)
1224 return -EINVAL;
1225
1226 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001227 case KVM_S390_VM_TOD_EXT:
1228 ret = kvm_s390_get_tod_ext(kvm, attr);
1229 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001230 case KVM_S390_VM_TOD_HIGH:
1231 ret = kvm_s390_get_tod_high(kvm, attr);
1232 break;
1233 case KVM_S390_VM_TOD_LOW:
1234 ret = kvm_s390_get_tod_low(kvm, attr);
1235 break;
1236 default:
1237 ret = -ENXIO;
1238 break;
1239 }
1240 return ret;
1241}
1242
Michael Mueller658b6ed2015-02-02 15:49:35 +01001243static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1244{
1245 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001246 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001247 int ret = 0;
1248
1249 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001250 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001251 ret = -EBUSY;
1252 goto out;
1253 }
1254 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1255 if (!proc) {
1256 ret = -ENOMEM;
1257 goto out;
1258 }
1259 if (!copy_from_user(proc, (void __user *)attr->addr,
1260 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001261 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001262 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1263 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001264 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001265 if (proc->ibc > unblocked_ibc)
1266 kvm->arch.model.ibc = unblocked_ibc;
1267 else if (proc->ibc < lowest_ibc)
1268 kvm->arch.model.ibc = lowest_ibc;
1269 else
1270 kvm->arch.model.ibc = proc->ibc;
1271 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001272 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001273 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001274 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1275 kvm->arch.model.ibc,
1276 kvm->arch.model.cpuid);
1277 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1278 kvm->arch.model.fac_list[0],
1279 kvm->arch.model.fac_list[1],
1280 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001281 } else
1282 ret = -EFAULT;
1283 kfree(proc);
1284out:
1285 mutex_unlock(&kvm->lock);
1286 return ret;
1287}
1288
David Hildenbrand15c97052015-03-19 17:36:43 +01001289static int kvm_s390_set_processor_feat(struct kvm *kvm,
1290 struct kvm_device_attr *attr)
1291{
1292 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001293
1294 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1295 return -EFAULT;
1296 if (!bitmap_subset((unsigned long *) data.feat,
1297 kvm_s390_available_cpu_feat,
1298 KVM_S390_VM_CPU_FEAT_NR_BITS))
1299 return -EINVAL;
1300
1301 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001302 if (kvm->created_vcpus) {
1303 mutex_unlock(&kvm->lock);
1304 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001305 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001306 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1307 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001308 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001309 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1310 data.feat[0],
1311 data.feat[1],
1312 data.feat[2]);
1313 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001314}
1315
David Hildenbrand0a763c72016-05-18 16:03:47 +02001316static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1317 struct kvm_device_attr *attr)
1318{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001319 mutex_lock(&kvm->lock);
1320 if (kvm->created_vcpus) {
1321 mutex_unlock(&kvm->lock);
1322 return -EBUSY;
1323 }
1324
1325 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1326 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1327 mutex_unlock(&kvm->lock);
1328 return -EFAULT;
1329 }
1330 mutex_unlock(&kvm->lock);
1331
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001332 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1333 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1334 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1335 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1336 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1337 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1338 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1339 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1340 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1341 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1342 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1343 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1344 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1345 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1346 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1347 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1348 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1349 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1350 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1351 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1352 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1353 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1354 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1355 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1356 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1357 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1358 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1359 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1360 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1361 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1362 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1363 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1364 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1365 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1366 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1367 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1368 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1369 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1370 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1371 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1372 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1373 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1374 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1375 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001376 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1377 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1378 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001379 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1380 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1381 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1382 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1383 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001384 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1385 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1386 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1387 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1388 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001389
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001390 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001391}
1392
Michael Mueller658b6ed2015-02-02 15:49:35 +01001393static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1394{
1395 int ret = -ENXIO;
1396
1397 switch (attr->attr) {
1398 case KVM_S390_VM_CPU_PROCESSOR:
1399 ret = kvm_s390_set_processor(kvm, attr);
1400 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001401 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1402 ret = kvm_s390_set_processor_feat(kvm, attr);
1403 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001404 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1405 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1406 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001407 }
1408 return ret;
1409}
1410
1411static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1412{
1413 struct kvm_s390_vm_cpu_processor *proc;
1414 int ret = 0;
1415
1416 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1417 if (!proc) {
1418 ret = -ENOMEM;
1419 goto out;
1420 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001421 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001422 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001423 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1424 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001425 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1426 kvm->arch.model.ibc,
1427 kvm->arch.model.cpuid);
1428 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1429 kvm->arch.model.fac_list[0],
1430 kvm->arch.model.fac_list[1],
1431 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001432 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1433 ret = -EFAULT;
1434 kfree(proc);
1435out:
1436 return ret;
1437}
1438
1439static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1440{
1441 struct kvm_s390_vm_cpu_machine *mach;
1442 int ret = 0;
1443
1444 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1445 if (!mach) {
1446 ret = -ENOMEM;
1447 goto out;
1448 }
1449 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001450 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001451 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001452 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001453 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001454 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001455 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1456 kvm->arch.model.ibc,
1457 kvm->arch.model.cpuid);
1458 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1459 mach->fac_mask[0],
1460 mach->fac_mask[1],
1461 mach->fac_mask[2]);
1462 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1463 mach->fac_list[0],
1464 mach->fac_list[1],
1465 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001466 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1467 ret = -EFAULT;
1468 kfree(mach);
1469out:
1470 return ret;
1471}
1472
David Hildenbrand15c97052015-03-19 17:36:43 +01001473static int kvm_s390_get_processor_feat(struct kvm *kvm,
1474 struct kvm_device_attr *attr)
1475{
1476 struct kvm_s390_vm_cpu_feat data;
1477
1478 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1479 KVM_S390_VM_CPU_FEAT_NR_BITS);
1480 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1481 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001482 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1483 data.feat[0],
1484 data.feat[1],
1485 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001486 return 0;
1487}
1488
1489static int kvm_s390_get_machine_feat(struct kvm *kvm,
1490 struct kvm_device_attr *attr)
1491{
1492 struct kvm_s390_vm_cpu_feat data;
1493
1494 bitmap_copy((unsigned long *) data.feat,
1495 kvm_s390_available_cpu_feat,
1496 KVM_S390_VM_CPU_FEAT_NR_BITS);
1497 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1498 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001499 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1500 data.feat[0],
1501 data.feat[1],
1502 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001503 return 0;
1504}
1505
David Hildenbrand0a763c72016-05-18 16:03:47 +02001506static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1507 struct kvm_device_attr *attr)
1508{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001509 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1510 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1511 return -EFAULT;
1512
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001513 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1514 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1515 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1516 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1517 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1518 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1519 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1520 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1521 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1522 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1523 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1524 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1525 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1526 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1527 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1528 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1529 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1530 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1531 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1532 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1533 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1534 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1535 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1536 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1537 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1538 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1539 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1540 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1541 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1542 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1544 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1545 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1546 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1547 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1548 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1549 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1550 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1551 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1552 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1553 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1554 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1555 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1556 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001557 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1558 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1559 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001560 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1561 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1562 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1563 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1564 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001565 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1566 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1567 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1568 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1569 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001570
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001571 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001572}
1573
1574static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1575 struct kvm_device_attr *attr)
1576{
1577 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1578 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1579 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001580
1581 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1582 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1583 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1584 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1585 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1586 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1587 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1588 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1589 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1590 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1591 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1592 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1593 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1594 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1595 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1596 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1597 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1598 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1599 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1600 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1601 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1602 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1603 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1604 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1605 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1606 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1607 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1608 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1609 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1610 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1611 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1612 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1613 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1614 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1615 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1616 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1617 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1618 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1619 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1620 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1621 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1622 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1623 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1624 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001625 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1626 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1627 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001628 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1629 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1630 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1631 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1632 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001633 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1634 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1635 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1636 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1637 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001638
David Hildenbrand0a763c72016-05-18 16:03:47 +02001639 return 0;
1640}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001641
Michael Mueller658b6ed2015-02-02 15:49:35 +01001642static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1643{
1644 int ret = -ENXIO;
1645
1646 switch (attr->attr) {
1647 case KVM_S390_VM_CPU_PROCESSOR:
1648 ret = kvm_s390_get_processor(kvm, attr);
1649 break;
1650 case KVM_S390_VM_CPU_MACHINE:
1651 ret = kvm_s390_get_machine(kvm, attr);
1652 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001653 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1654 ret = kvm_s390_get_processor_feat(kvm, attr);
1655 break;
1656 case KVM_S390_VM_CPU_MACHINE_FEAT:
1657 ret = kvm_s390_get_machine_feat(kvm, attr);
1658 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001659 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1660 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1661 break;
1662 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1663 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1664 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001665 }
1666 return ret;
1667}
1668
Dominik Dingelf2061652014-04-09 13:13:00 +02001669static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1670{
1671 int ret;
1672
1673 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001674 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001675 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001676 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001677 case KVM_S390_VM_TOD:
1678 ret = kvm_s390_set_tod(kvm, attr);
1679 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001680 case KVM_S390_VM_CPU_MODEL:
1681 ret = kvm_s390_set_cpu_model(kvm, attr);
1682 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001683 case KVM_S390_VM_CRYPTO:
1684 ret = kvm_s390_vm_set_crypto(kvm, attr);
1685 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001686 case KVM_S390_VM_MIGRATION:
1687 ret = kvm_s390_vm_set_migration(kvm, attr);
1688 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001689 default:
1690 ret = -ENXIO;
1691 break;
1692 }
1693
1694 return ret;
1695}
1696
1697static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1698{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001699 int ret;
1700
1701 switch (attr->group) {
1702 case KVM_S390_VM_MEM_CTRL:
1703 ret = kvm_s390_get_mem_control(kvm, attr);
1704 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001705 case KVM_S390_VM_TOD:
1706 ret = kvm_s390_get_tod(kvm, attr);
1707 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001708 case KVM_S390_VM_CPU_MODEL:
1709 ret = kvm_s390_get_cpu_model(kvm, attr);
1710 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001711 case KVM_S390_VM_MIGRATION:
1712 ret = kvm_s390_vm_get_migration(kvm, attr);
1713 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001714 default:
1715 ret = -ENXIO;
1716 break;
1717 }
1718
1719 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001720}
1721
1722static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1723{
1724 int ret;
1725
1726 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001727 case KVM_S390_VM_MEM_CTRL:
1728 switch (attr->attr) {
1729 case KVM_S390_VM_MEM_ENABLE_CMMA:
1730 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001731 ret = sclp.has_cmma ? 0 : -ENXIO;
1732 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001733 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001734 ret = 0;
1735 break;
1736 default:
1737 ret = -ENXIO;
1738 break;
1739 }
1740 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001741 case KVM_S390_VM_TOD:
1742 switch (attr->attr) {
1743 case KVM_S390_VM_TOD_LOW:
1744 case KVM_S390_VM_TOD_HIGH:
1745 ret = 0;
1746 break;
1747 default:
1748 ret = -ENXIO;
1749 break;
1750 }
1751 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001752 case KVM_S390_VM_CPU_MODEL:
1753 switch (attr->attr) {
1754 case KVM_S390_VM_CPU_PROCESSOR:
1755 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001756 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1757 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001758 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001759 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001760 ret = 0;
1761 break;
1762 default:
1763 ret = -ENXIO;
1764 break;
1765 }
1766 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001767 case KVM_S390_VM_CRYPTO:
1768 switch (attr->attr) {
1769 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1770 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1771 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1772 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1773 ret = 0;
1774 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001775 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1776 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1777 ret = ap_instructions_available() ? 0 : -ENXIO;
1778 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001779 default:
1780 ret = -ENXIO;
1781 break;
1782 }
1783 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001784 case KVM_S390_VM_MIGRATION:
1785 ret = 0;
1786 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001787 default:
1788 ret = -ENXIO;
1789 break;
1790 }
1791
1792 return ret;
1793}
1794
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001795static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1796{
1797 uint8_t *keys;
1798 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001799 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001800
1801 if (args->flags != 0)
1802 return -EINVAL;
1803
1804 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001805 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001806 return KVM_S390_GET_SKEYS_NONE;
1807
1808 /* Enforce sane limit on memory allocation */
1809 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1810 return -EINVAL;
1811
Michal Hocko752ade62017-05-08 15:57:27 -07001812 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001813 if (!keys)
1814 return -ENOMEM;
1815
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001816 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001817 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001818 for (i = 0; i < args->count; i++) {
1819 hva = gfn_to_hva(kvm, args->start_gfn + i);
1820 if (kvm_is_error_hva(hva)) {
1821 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001822 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001823 }
1824
David Hildenbrand154c8c12016-05-09 11:22:34 +02001825 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1826 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001827 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001828 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001829 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001830 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001831
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001832 if (!r) {
1833 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1834 sizeof(uint8_t) * args->count);
1835 if (r)
1836 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001837 }
1838
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001839 kvfree(keys);
1840 return r;
1841}
1842
1843static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1844{
1845 uint8_t *keys;
1846 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001847 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001848 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001849
1850 if (args->flags != 0)
1851 return -EINVAL;
1852
1853 /* Enforce sane limit on memory allocation */
1854 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1855 return -EINVAL;
1856
Michal Hocko752ade62017-05-08 15:57:27 -07001857 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001858 if (!keys)
1859 return -ENOMEM;
1860
1861 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1862 sizeof(uint8_t) * args->count);
1863 if (r) {
1864 r = -EFAULT;
1865 goto out;
1866 }
1867
1868 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001869 r = s390_enable_skey();
1870 if (r)
1871 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001872
Janosch Frankbd096f62018-07-18 13:40:22 +01001873 i = 0;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001874 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001875 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001876 while (i < args->count) {
1877 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001878 hva = gfn_to_hva(kvm, args->start_gfn + i);
1879 if (kvm_is_error_hva(hva)) {
1880 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001881 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001882 }
1883
1884 /* Lowest order bit is reserved */
1885 if (keys[i] & 0x01) {
1886 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001887 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001888 }
1889
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001890 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001891 if (r) {
1892 r = fixup_user_fault(current, current->mm, hva,
1893 FAULT_FLAG_WRITE, &unlocked);
1894 if (r)
1895 break;
1896 }
1897 if (!r)
1898 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001899 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001900 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001901 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001902out:
1903 kvfree(keys);
1904 return r;
1905}
1906
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001907/*
1908 * Base address and length must be sent at the start of each block, therefore
1909 * it's cheaper to send some clean data, as long as it's less than the size of
1910 * two longs.
1911 */
1912#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1913/* for consistency */
1914#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1915
1916/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001917 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1918 * address falls in a hole. In that case the index of one of the memslots
1919 * bordering the hole is returned.
1920 */
1921static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1922{
1923 int start = 0, end = slots->used_slots;
1924 int slot = atomic_read(&slots->lru_slot);
1925 struct kvm_memory_slot *memslots = slots->memslots;
1926
1927 if (gfn >= memslots[slot].base_gfn &&
1928 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1929 return slot;
1930
1931 while (start < end) {
1932 slot = start + (end - start) / 2;
1933
1934 if (gfn >= memslots[slot].base_gfn)
1935 end = slot;
1936 else
1937 start = slot + 1;
1938 }
1939
1940 if (gfn >= memslots[start].base_gfn &&
1941 gfn < memslots[start].base_gfn + memslots[start].npages) {
1942 atomic_set(&slots->lru_slot, start);
1943 }
1944
1945 return start;
1946}
1947
1948static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1949 u8 *res, unsigned long bufsize)
1950{
1951 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1952
1953 args->count = 0;
1954 while (args->count < bufsize) {
1955 hva = gfn_to_hva(kvm, cur_gfn);
1956 /*
1957 * We return an error if the first value was invalid, but we
1958 * return successfully if at least one value was copied.
1959 */
1960 if (kvm_is_error_hva(hva))
1961 return args->count ? 0 : -EFAULT;
1962 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1963 pgstev = 0;
1964 res[args->count++] = (pgstev >> 24) & 0x43;
1965 cur_gfn++;
1966 }
1967
1968 return 0;
1969}
1970
1971static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1972 unsigned long cur_gfn)
1973{
1974 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1975 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1976 unsigned long ofs = cur_gfn - ms->base_gfn;
1977
1978 if (ms->base_gfn + ms->npages <= cur_gfn) {
1979 slotidx--;
1980 /* If we are above the highest slot, wrap around */
1981 if (slotidx < 0)
1982 slotidx = slots->used_slots - 1;
1983
1984 ms = slots->memslots + slotidx;
1985 ofs = 0;
1986 }
1987 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1988 while ((slotidx > 0) && (ofs >= ms->npages)) {
1989 slotidx--;
1990 ms = slots->memslots + slotidx;
1991 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1992 }
1993 return ms->base_gfn + ofs;
1994}
1995
1996static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1997 u8 *res, unsigned long bufsize)
1998{
1999 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2000 struct kvm_memslots *slots = kvm_memslots(kvm);
2001 struct kvm_memory_slot *ms;
2002
2003 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2004 ms = gfn_to_memslot(kvm, cur_gfn);
2005 args->count = 0;
2006 args->start_gfn = cur_gfn;
2007 if (!ms)
2008 return 0;
2009 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2010 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2011
2012 while (args->count < bufsize) {
2013 hva = gfn_to_hva(kvm, cur_gfn);
2014 if (kvm_is_error_hva(hva))
2015 return 0;
2016 /* Decrement only if we actually flipped the bit to 0 */
2017 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2018 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2019 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2020 pgstev = 0;
2021 /* Save the value */
2022 res[args->count++] = (pgstev >> 24) & 0x43;
2023 /* If the next bit is too far away, stop. */
2024 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2025 return 0;
2026 /* If we reached the previous "next", find the next one */
2027 if (cur_gfn == next_gfn)
2028 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2029 /* Reached the end of memory or of the buffer, stop */
2030 if ((next_gfn >= mem_end) ||
2031 (next_gfn - args->start_gfn >= bufsize))
2032 return 0;
2033 cur_gfn++;
2034 /* Reached the end of the current memslot, take the next one. */
2035 if (cur_gfn - ms->base_gfn >= ms->npages) {
2036 ms = gfn_to_memslot(kvm, cur_gfn);
2037 if (!ms)
2038 return 0;
2039 }
2040 }
2041 return 0;
2042}
2043
2044/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002045 * This function searches for the next page with dirty CMMA attributes, and
2046 * saves the attributes in the buffer up to either the end of the buffer or
2047 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2048 * no trailing clean bytes are saved.
2049 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2050 * output buffer will indicate 0 as length.
2051 */
2052static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2053 struct kvm_s390_cmma_log *args)
2054{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002055 unsigned long bufsize;
2056 int srcu_idx, peek, ret;
2057 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002058
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002059 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002060 return -ENXIO;
2061 /* Invalid/unsupported flags were specified */
2062 if (args->flags & ~KVM_S390_CMMA_PEEK)
2063 return -EINVAL;
2064 /* Migration mode query, and we are not doing a migration */
2065 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002066 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002067 return -EINVAL;
2068 /* CMMA is disabled or was not used, or the buffer has length zero */
2069 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002070 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002071 memset(args, 0, sizeof(*args));
2072 return 0;
2073 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002074 /* We are not peeking, and there are no dirty pages */
2075 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2076 memset(args, 0, sizeof(*args));
2077 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002078 }
2079
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002080 values = vmalloc(bufsize);
2081 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002082 return -ENOMEM;
2083
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002084 down_read(&kvm->mm->mmap_sem);
2085 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002086 if (peek)
2087 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2088 else
2089 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002090 srcu_read_unlock(&kvm->srcu, srcu_idx);
2091 up_read(&kvm->mm->mmap_sem);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002092
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002093 if (kvm->arch.migration_mode)
2094 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2095 else
2096 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002097
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002098 if (copy_to_user((void __user *)args->values, values, args->count))
2099 ret = -EFAULT;
2100
2101 vfree(values);
2102 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002103}
2104
2105/*
2106 * This function sets the CMMA attributes for the given pages. If the input
2107 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002108 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002109 */
2110static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2111 const struct kvm_s390_cmma_log *args)
2112{
2113 unsigned long hva, mask, pgstev, i;
2114 uint8_t *bits;
2115 int srcu_idx, r = 0;
2116
2117 mask = args->mask;
2118
2119 if (!kvm->arch.use_cmma)
2120 return -ENXIO;
2121 /* invalid/unsupported flags */
2122 if (args->flags != 0)
2123 return -EINVAL;
2124 /* Enforce sane limit on memory allocation */
2125 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2126 return -EINVAL;
2127 /* Nothing to do */
2128 if (args->count == 0)
2129 return 0;
2130
Kees Cook42bc47b2018-06-12 14:27:11 -07002131 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002132 if (!bits)
2133 return -ENOMEM;
2134
2135 r = copy_from_user(bits, (void __user *)args->values, args->count);
2136 if (r) {
2137 r = -EFAULT;
2138 goto out;
2139 }
2140
2141 down_read(&kvm->mm->mmap_sem);
2142 srcu_idx = srcu_read_lock(&kvm->srcu);
2143 for (i = 0; i < args->count; i++) {
2144 hva = gfn_to_hva(kvm, args->start_gfn + i);
2145 if (kvm_is_error_hva(hva)) {
2146 r = -EFAULT;
2147 break;
2148 }
2149
2150 pgstev = bits[i];
2151 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002152 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002153 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2154 }
2155 srcu_read_unlock(&kvm->srcu, srcu_idx);
2156 up_read(&kvm->mm->mmap_sem);
2157
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002158 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002159 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002160 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002161 up_write(&kvm->mm->mmap_sem);
2162 }
2163out:
2164 vfree(bits);
2165 return r;
2166}
2167
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002168long kvm_arch_vm_ioctl(struct file *filp,
2169 unsigned int ioctl, unsigned long arg)
2170{
2171 struct kvm *kvm = filp->private_data;
2172 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002173 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002174 int r;
2175
2176 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002177 case KVM_S390_INTERRUPT: {
2178 struct kvm_s390_interrupt s390int;
2179
2180 r = -EFAULT;
2181 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2182 break;
2183 r = kvm_s390_inject_vm(kvm, &s390int);
2184 break;
2185 }
Cornelia Huck84223592013-07-15 13:36:01 +02002186 case KVM_CREATE_IRQCHIP: {
2187 struct kvm_irq_routing_entry routing;
2188
2189 r = -EINVAL;
2190 if (kvm->arch.use_irqchip) {
2191 /* Set up dummy routing. */
2192 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002193 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002194 }
2195 break;
2196 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002197 case KVM_SET_DEVICE_ATTR: {
2198 r = -EFAULT;
2199 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2200 break;
2201 r = kvm_s390_vm_set_attr(kvm, &attr);
2202 break;
2203 }
2204 case KVM_GET_DEVICE_ATTR: {
2205 r = -EFAULT;
2206 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2207 break;
2208 r = kvm_s390_vm_get_attr(kvm, &attr);
2209 break;
2210 }
2211 case KVM_HAS_DEVICE_ATTR: {
2212 r = -EFAULT;
2213 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2214 break;
2215 r = kvm_s390_vm_has_attr(kvm, &attr);
2216 break;
2217 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002218 case KVM_S390_GET_SKEYS: {
2219 struct kvm_s390_skeys args;
2220
2221 r = -EFAULT;
2222 if (copy_from_user(&args, argp,
2223 sizeof(struct kvm_s390_skeys)))
2224 break;
2225 r = kvm_s390_get_skeys(kvm, &args);
2226 break;
2227 }
2228 case KVM_S390_SET_SKEYS: {
2229 struct kvm_s390_skeys args;
2230
2231 r = -EFAULT;
2232 if (copy_from_user(&args, argp,
2233 sizeof(struct kvm_s390_skeys)))
2234 break;
2235 r = kvm_s390_set_skeys(kvm, &args);
2236 break;
2237 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002238 case KVM_S390_GET_CMMA_BITS: {
2239 struct kvm_s390_cmma_log args;
2240
2241 r = -EFAULT;
2242 if (copy_from_user(&args, argp, sizeof(args)))
2243 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002244 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002245 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002246 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002247 if (!r) {
2248 r = copy_to_user(argp, &args, sizeof(args));
2249 if (r)
2250 r = -EFAULT;
2251 }
2252 break;
2253 }
2254 case KVM_S390_SET_CMMA_BITS: {
2255 struct kvm_s390_cmma_log args;
2256
2257 r = -EFAULT;
2258 if (copy_from_user(&args, argp, sizeof(args)))
2259 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002260 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002261 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002262 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002263 break;
2264 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002265 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002266 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002267 }
2268
2269 return r;
2270}
2271
Tony Krowiak45c9b472015-01-13 11:33:26 -05002272static int kvm_s390_apxa_installed(void)
2273{
Tony Krowiake585b242018-09-25 19:16:18 -04002274 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002275
Tony Krowiake585b242018-09-25 19:16:18 -04002276 if (ap_instructions_available()) {
2277 if (ap_qci(&info) == 0)
2278 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002279 }
2280
2281 return 0;
2282}
2283
Tony Krowiake585b242018-09-25 19:16:18 -04002284/*
2285 * The format of the crypto control block (CRYCB) is specified in the 3 low
2286 * order bits of the CRYCB designation (CRYCBD) field as follows:
2287 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2288 * AP extended addressing (APXA) facility are installed.
2289 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2290 * Format 2: Both the APXA and MSAX3 facilities are installed
2291 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002292static void kvm_s390_set_crycb_format(struct kvm *kvm)
2293{
2294 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2295
Tony Krowiake585b242018-09-25 19:16:18 -04002296 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2297 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2298
2299 /* Check whether MSAX3 is installed */
2300 if (!test_kvm_facility(kvm, 76))
2301 return;
2302
Tony Krowiak45c9b472015-01-13 11:33:26 -05002303 if (kvm_s390_apxa_installed())
2304 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2305 else
2306 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2307}
2308
Pierre Morel0e237e42018-10-05 10:31:09 +02002309void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2310 unsigned long *aqm, unsigned long *adm)
2311{
2312 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2313
2314 mutex_lock(&kvm->lock);
2315 kvm_s390_vcpu_block_all(kvm);
2316
2317 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2318 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2319 memcpy(crycb->apcb1.apm, apm, 32);
2320 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2321 apm[0], apm[1], apm[2], apm[3]);
2322 memcpy(crycb->apcb1.aqm, aqm, 32);
2323 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2324 aqm[0], aqm[1], aqm[2], aqm[3]);
2325 memcpy(crycb->apcb1.adm, adm, 32);
2326 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2327 adm[0], adm[1], adm[2], adm[3]);
2328 break;
2329 case CRYCB_FORMAT1:
2330 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2331 memcpy(crycb->apcb0.apm, apm, 8);
2332 memcpy(crycb->apcb0.aqm, aqm, 2);
2333 memcpy(crycb->apcb0.adm, adm, 2);
2334 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2335 apm[0], *((unsigned short *)aqm),
2336 *((unsigned short *)adm));
2337 break;
2338 default: /* Can not happen */
2339 break;
2340 }
2341
2342 /* recreate the shadow crycb for each vcpu */
2343 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2344 kvm_s390_vcpu_unblock_all(kvm);
2345 mutex_unlock(&kvm->lock);
2346}
2347EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2348
Tony Krowiak421045982018-09-25 19:16:25 -04002349void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2350{
2351 mutex_lock(&kvm->lock);
2352 kvm_s390_vcpu_block_all(kvm);
2353
2354 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2355 sizeof(kvm->arch.crypto.crycb->apcb0));
2356 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2357 sizeof(kvm->arch.crypto.crycb->apcb1));
2358
Pierre Morel0e237e42018-10-05 10:31:09 +02002359 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002360 /* recreate the shadow crycb for each vcpu */
2361 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002362 kvm_s390_vcpu_unblock_all(kvm);
2363 mutex_unlock(&kvm->lock);
2364}
2365EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2366
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002367static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002368{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002369 struct cpuid cpuid;
2370
2371 get_cpu_id(&cpuid);
2372 cpuid.version = 0xff;
2373 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002374}
2375
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002376static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002377{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002378 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002379 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002380
Tony Krowiake585b242018-09-25 19:16:18 -04002381 if (!test_kvm_facility(kvm, 76))
2382 return;
2383
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002384 /* Enable AES/DEA protected key functions by default */
2385 kvm->arch.crypto.aes_kw = 1;
2386 kvm->arch.crypto.dea_kw = 1;
2387 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2388 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2389 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2390 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002391}
2392
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002393static void sca_dispose(struct kvm *kvm)
2394{
2395 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002396 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002397 else
2398 free_page((unsigned long)(kvm->arch.sca));
2399 kvm->arch.sca = NULL;
2400}
2401
Carsten Ottee08b9632012-01-04 10:25:20 +01002402int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002403{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002404 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002405 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002406 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002407 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002408
Carsten Ottee08b9632012-01-04 10:25:20 +01002409 rc = -EINVAL;
2410#ifdef CONFIG_KVM_S390_UCONTROL
2411 if (type & ~KVM_VM_S390_UCONTROL)
2412 goto out_err;
2413 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2414 goto out_err;
2415#else
2416 if (type)
2417 goto out_err;
2418#endif
2419
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002420 rc = s390_enable_sie();
2421 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002422 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002423
Carsten Otteb2904112011-10-18 12:27:13 +02002424 rc = -ENOMEM;
2425
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002426 if (!sclp.has_64bscao)
2427 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002428 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002429 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002430 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002431 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002432 goto out_err;
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002433 mutex_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002434 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002435 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002436 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002437 kvm->arch.sca = (struct bsca_block *)
2438 ((char *) kvm->arch.sca + sca_offset);
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002439 mutex_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002440
2441 sprintf(debug_name, "kvm-%u", current->pid);
2442
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002443 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002444 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002445 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002446
Michael Mueller19114be2017-05-30 14:26:02 +02002447 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002448 kvm->arch.sie_page2 =
2449 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2450 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002451 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002452
Michael Mueller25c84db2019-01-31 09:52:41 +01002453 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002454 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002455
2456 for (i = 0; i < kvm_s390_fac_size(); i++) {
2457 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2458 (kvm_s390_fac_base[i] |
2459 kvm_s390_fac_ext[i]);
2460 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2461 kvm_s390_fac_base[i];
2462 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002463 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002464
David Hildenbrand19352222017-08-29 16:31:08 +02002465 /* we are always in czam mode - even on pre z14 machines */
2466 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2467 set_kvm_facility(kvm->arch.model.fac_list, 138);
2468 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002469 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2470 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002471 if (MACHINE_HAS_TLB_GUEST) {
2472 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2473 set_kvm_facility(kvm->arch.model.fac_list, 147);
2474 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002475
Pierre Morel05f31e32019-05-21 17:34:37 +02002476 if (css_general_characteristics.aiv && test_facility(65))
2477 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2478
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002479 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002480 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002481
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002482 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002483
Fei Li51978392017-02-17 17:06:26 +08002484 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002485 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002486 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2487 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002488 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002489 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002490
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002491 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002492 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002493
Carsten Ottee08b9632012-01-04 10:25:20 +01002494 if (type & KVM_VM_S390_UCONTROL) {
2495 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002496 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002497 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002498 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002499 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002500 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002501 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002502 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002503 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002504 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002505 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002506 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002507 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002508 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002509
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002510 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002511 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002512 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002513 kvm_s390_vsie_init(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002514 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002515 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002516
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002517 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002518out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002519 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002520 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002521 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002522 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002523 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002524}
2525
Christian Borntraegerd329c032008-11-26 14:50:27 +01002526void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2527{
2528 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002529 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002530 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002531 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002532 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002533 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002534
2535 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002536 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002537
Dominik Dingele6db1d62015-05-07 15:41:57 +02002538 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002539 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002540 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraegerd329c032008-11-26 14:50:27 +01002541}
2542
2543static void kvm_free_vcpus(struct kvm *kvm)
2544{
2545 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002546 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002547
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002548 kvm_for_each_vcpu(i, vcpu, kvm)
Sean Christopherson4543bdc2019-12-18 13:55:14 -08002549 kvm_vcpu_destroy(vcpu);
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002550
2551 mutex_lock(&kvm->lock);
2552 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2553 kvm->vcpus[i] = NULL;
2554
2555 atomic_set(&kvm->online_vcpus, 0);
2556 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002557}
2558
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002559void kvm_arch_destroy_vm(struct kvm *kvm)
2560{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002561 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002562 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002563 debug_unregister(kvm->arch.dbf);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002564 kvm_s390_gisa_destroy(kvm);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002565 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002566 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002567 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002568 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002569 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002570 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002571 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002572}
2573
2574/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002575static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2576{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002577 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002578 if (!vcpu->arch.gmap)
2579 return -ENOMEM;
2580 vcpu->arch.gmap->private = vcpu->kvm;
2581
2582 return 0;
2583}
2584
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002585static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2586{
David Hildenbranda6940672016-08-08 22:39:32 +02002587 if (!kvm_s390_use_sca_entries())
2588 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002589 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002590 if (vcpu->kvm->arch.use_esca) {
2591 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002592
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002593 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002594 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002595 } else {
2596 struct bsca_block *sca = vcpu->kvm->arch.sca;
2597
2598 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002599 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002600 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002601 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002602}
2603
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002604static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002605{
David Hildenbranda6940672016-08-08 22:39:32 +02002606 if (!kvm_s390_use_sca_entries()) {
2607 struct bsca_block *sca = vcpu->kvm->arch.sca;
2608
2609 /* we still need the basic sca for the ipte control */
2610 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2611 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002612 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002613 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002614 read_lock(&vcpu->kvm->arch.sca_lock);
2615 if (vcpu->kvm->arch.use_esca) {
2616 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002617
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002618 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002619 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2620 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002621 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002622 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002623 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002624 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002625
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002626 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002627 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2628 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002629 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002630 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002631 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002632}
2633
2634/* Basic SCA to Extended SCA data copy routines */
2635static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2636{
2637 d->sda = s->sda;
2638 d->sigp_ctrl.c = s->sigp_ctrl.c;
2639 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2640}
2641
2642static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2643{
2644 int i;
2645
2646 d->ipte_control = s->ipte_control;
2647 d->mcn[0] = s->mcn;
2648 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2649 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2650}
2651
2652static int sca_switch_to_extended(struct kvm *kvm)
2653{
2654 struct bsca_block *old_sca = kvm->arch.sca;
2655 struct esca_block *new_sca;
2656 struct kvm_vcpu *vcpu;
2657 unsigned int vcpu_idx;
2658 u32 scaol, scaoh;
2659
2660 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2661 if (!new_sca)
2662 return -ENOMEM;
2663
2664 scaoh = (u32)((u64)(new_sca) >> 32);
2665 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2666
2667 kvm_s390_vcpu_block_all(kvm);
2668 write_lock(&kvm->arch.sca_lock);
2669
2670 sca_copy_b_to_e(new_sca, old_sca);
2671
2672 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2673 vcpu->arch.sie_block->scaoh = scaoh;
2674 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002675 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002676 }
2677 kvm->arch.sca = new_sca;
2678 kvm->arch.use_esca = 1;
2679
2680 write_unlock(&kvm->arch.sca_lock);
2681 kvm_s390_vcpu_unblock_all(kvm);
2682
2683 free_page((unsigned long)old_sca);
2684
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002685 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2686 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002687 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002688}
2689
2690static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2691{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002692 int rc;
2693
David Hildenbranda6940672016-08-08 22:39:32 +02002694 if (!kvm_s390_use_sca_entries()) {
2695 if (id < KVM_MAX_VCPUS)
2696 return true;
2697 return false;
2698 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002699 if (id < KVM_S390_BSCA_CPU_SLOTS)
2700 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002701 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002702 return false;
2703
2704 mutex_lock(&kvm->lock);
2705 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2706 mutex_unlock(&kvm->lock);
2707
2708 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002709}
2710
David Hildenbranddb0758b2016-02-15 09:42:25 +01002711/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2712static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2713{
2714 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002715 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002716 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002717 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002718}
2719
2720/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2721static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2722{
2723 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002724 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002725 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2726 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002727 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002728}
2729
2730/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2731static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2732{
2733 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2734 vcpu->arch.cputm_enabled = true;
2735 __start_cpu_timer_accounting(vcpu);
2736}
2737
2738/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2739static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2740{
2741 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2742 __stop_cpu_timer_accounting(vcpu);
2743 vcpu->arch.cputm_enabled = false;
2744}
2745
2746static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2747{
2748 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2749 __enable_cpu_timer_accounting(vcpu);
2750 preempt_enable();
2751}
2752
2753static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2754{
2755 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2756 __disable_cpu_timer_accounting(vcpu);
2757 preempt_enable();
2758}
2759
David Hildenbrand4287f242016-02-15 09:40:12 +01002760/* set the cpu timer - may only be called from the VCPU thread itself */
2761void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2762{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002763 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002764 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002765 if (vcpu->arch.cputm_enabled)
2766 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002767 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002768 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002769 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002770}
2771
David Hildenbranddb0758b2016-02-15 09:42:25 +01002772/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002773__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2774{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002775 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002776 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002777
2778 if (unlikely(!vcpu->arch.cputm_enabled))
2779 return vcpu->arch.sie_block->cputm;
2780
David Hildenbrand9c23a132016-02-17 21:53:33 +01002781 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2782 do {
2783 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2784 /*
2785 * If the writer would ever execute a read in the critical
2786 * section, e.g. in irq context, we have a deadlock.
2787 */
2788 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2789 value = vcpu->arch.sie_block->cputm;
2790 /* if cputm_start is 0, accounting is being started/stopped */
2791 if (likely(vcpu->arch.cputm_start))
2792 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2793 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2794 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002795 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002796}
2797
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002798void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2799{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002800
David Hildenbrand37d9df92015-03-11 16:47:33 +01002801 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002802 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002803 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002804 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002805 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002806}
2807
2808void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2809{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002810 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002811 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002812 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01002813 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002814 vcpu->arch.enabled_gmap = gmap_get_enabled();
2815 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002816
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002817}
2818
Dominik Dingel31928aa2014-12-04 15:47:07 +01002819void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002820{
Jason J. Herne72f25022014-11-25 09:46:02 -05002821 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002822 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002823 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01002824 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02002825 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002826 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002827 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002828 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002829 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002830 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002831 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2832 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002833 /* make vcpu_load load the right gmap on the first trigger */
2834 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002835}
2836
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002837static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
2838{
2839 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
2840 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
2841 return true;
2842 return false;
2843}
2844
2845static bool kvm_has_pckmo_ecc(struct kvm *kvm)
2846{
2847 /* At least one ECC subfunction must be present */
2848 return kvm_has_pckmo_subfunc(kvm, 32) ||
2849 kvm_has_pckmo_subfunc(kvm, 33) ||
2850 kvm_has_pckmo_subfunc(kvm, 34) ||
2851 kvm_has_pckmo_subfunc(kvm, 40) ||
2852 kvm_has_pckmo_subfunc(kvm, 41);
2853
2854}
2855
Tony Krowiak5102ee82014-06-27 14:46:01 -04002856static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2857{
Tony Krowiake585b242018-09-25 19:16:18 -04002858 /*
2859 * If the AP instructions are not being interpreted and the MSAX3
2860 * facility is not configured for the guest, there is nothing to set up.
2861 */
2862 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002863 return;
2864
Tony Krowiake585b242018-09-25 19:16:18 -04002865 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02002866 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04002867 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002868 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02002869
Tony Krowiake585b242018-09-25 19:16:18 -04002870 if (vcpu->kvm->arch.crypto.apie)
2871 vcpu->arch.sie_block->eca |= ECA_APIE;
2872
2873 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002874 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02002875 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002876 /* ecc is also wrapped with AES key */
2877 if (kvm_has_pckmo_ecc(vcpu->kvm))
2878 vcpu->arch.sie_block->ecd |= ECD_ECC;
2879 }
2880
Tony Krowiaka374e892014-09-03 10:13:53 +02002881 if (vcpu->kvm->arch.crypto.dea_kw)
2882 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04002883}
2884
Dominik Dingelb31605c2014-03-25 13:47:11 +01002885void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2886{
2887 free_page(vcpu->arch.sie_block->cbrlo);
2888 vcpu->arch.sie_block->cbrlo = 0;
2889}
2890
2891int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2892{
2893 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2894 if (!vcpu->arch.sie_block->cbrlo)
2895 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002896 return 0;
2897}
2898
Michael Mueller91520f12015-02-27 14:32:11 +01002899static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2900{
2901 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2902
Michael Mueller91520f12015-02-27 14:32:11 +01002903 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002904 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002905 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002906}
2907
Sean Christophersonff72bb52019-12-18 13:55:20 -08002908static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
2909{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002910 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002911
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002912 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2913 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002914 CPUSTAT_STOPPED);
2915
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002916 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002917 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002918 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002919 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002920
Michael Mueller91520f12015-02-27 14:32:11 +01002921 kvm_s390_vcpu_setup_model(vcpu);
2922
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002923 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2924 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002925 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002926 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002927 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002928 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002929 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002930
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002931 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002932 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002933 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002934 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2935 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002936 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002937 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002938 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002939 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002940 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002941 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002942 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002943 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002944 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002945 vcpu->arch.sie_block->eca |= ECA_VX;
2946 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002947 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04002948 if (test_kvm_facility(vcpu->kvm, 139))
2949 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002950 if (test_kvm_facility(vcpu->kvm, 156))
2951 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002952 if (vcpu->arch.sie_block->gd) {
2953 vcpu->arch.sie_block->eca |= ECA_AIV;
2954 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
2955 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
2956 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002957 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2958 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002959 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002960
2961 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002962 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05002963 else
2964 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002965
Dominik Dingele6db1d62015-05-07 15:41:57 +02002966 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002967 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2968 if (rc)
2969 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002970 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01002971 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002972 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002973
Collin Walling67d49d52018-08-31 12:51:19 -04002974 vcpu->arch.sie_block->hpid = HPID_KVM;
2975
Tony Krowiak5102ee82014-06-27 14:46:01 -04002976 kvm_s390_vcpu_crypto_setup(vcpu);
2977
Dominik Dingelb31605c2014-03-25 13:47:11 +01002978 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002979}
2980
Sean Christopherson897cc382019-12-18 13:55:09 -08002981int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
2982{
2983 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
2984 return -EINVAL;
2985 return 0;
2986}
2987
Sean Christophersone529ef62019-12-18 13:55:15 -08002988int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002989{
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002990 struct sie_page *sie_page;
Sean Christopherson897cc382019-12-18 13:55:09 -08002991 int rc;
Carsten Otte4d475552011-10-18 12:27:12 +02002992
QingFeng Haoda72ca42017-06-07 11:41:19 +02002993 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002994 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2995 if (!sie_page)
Sean Christophersone529ef62019-12-18 13:55:15 -08002996 return -ENOMEM;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002997
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002998 vcpu->arch.sie_block = &sie_page->sie_block;
2999 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3000
David Hildenbrandefed1102015-04-16 12:32:41 +02003001 /* the real guest size will always be smaller than msl */
3002 vcpu->arch.sie_block->mso = 0;
3003 vcpu->arch.sie_block->msl = sclp.hamax;
3004
Sean Christophersone529ef62019-12-18 13:55:15 -08003005 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003006 spin_lock_init(&vcpu->arch.local_int.lock);
Sean Christophersone529ef62019-12-18 13:55:15 -08003007 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003008 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3009 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003010 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003011
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003012 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3013 kvm_clear_async_pf_completion_queue(vcpu);
3014 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3015 KVM_SYNC_GPRS |
3016 KVM_SYNC_ACRS |
3017 KVM_SYNC_CRS |
3018 KVM_SYNC_ARCH0 |
3019 KVM_SYNC_PFAULT;
3020 kvm_s390_set_prefix(vcpu, 0);
3021 if (test_kvm_facility(vcpu->kvm, 64))
3022 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3023 if (test_kvm_facility(vcpu->kvm, 82))
3024 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3025 if (test_kvm_facility(vcpu->kvm, 133))
3026 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3027 if (test_kvm_facility(vcpu->kvm, 156))
3028 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3029 /* fprs can be synchronized via vrs, even if the guest has no vx. With
3030 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3031 */
3032 if (MACHINE_HAS_VX)
3033 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3034 else
3035 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3036
3037 if (kvm_is_ucontrol(vcpu->kvm)) {
3038 rc = __kvm_ucontrol_vcpu_init(vcpu);
3039 if (rc)
Sean Christophersona2017f12019-12-18 13:55:11 -08003040 goto out_free_sie_block;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003041 }
3042
Sean Christophersone529ef62019-12-18 13:55:15 -08003043 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3044 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3045 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003046
Sean Christophersonff72bb52019-12-18 13:55:20 -08003047 rc = kvm_s390_vcpu_setup(vcpu);
3048 if (rc)
3049 goto out_ucontrol_uninit;
Sean Christophersone529ef62019-12-18 13:55:15 -08003050 return 0;
3051
Sean Christophersonff72bb52019-12-18 13:55:20 -08003052out_ucontrol_uninit:
3053 if (kvm_is_ucontrol(vcpu->kvm))
3054 gmap_remove(vcpu->arch.gmap);
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003055out_free_sie_block:
3056 free_page((unsigned long)(vcpu->arch.sie_block));
Sean Christophersone529ef62019-12-18 13:55:15 -08003057 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003058}
3059
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003060int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3061{
David Hildenbrand9a022062014-08-05 17:40:47 +02003062 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003063}
3064
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003065bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3066{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003067 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003068}
3069
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003070void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003071{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003072 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003073 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003074}
3075
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003076void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003077{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003078 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003079}
3080
Christian Borntraeger8e236542015-04-09 13:49:04 +02003081static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3082{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003083 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003084 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003085}
3086
David Hildenbrand9ea59722018-09-25 19:16:16 -04003087bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3088{
3089 return atomic_read(&vcpu->arch.sie_block->prog20) &
3090 (PROG_BLOCK_SIE | PROG_REQUEST);
3091}
3092
Christian Borntraeger8e236542015-04-09 13:49:04 +02003093static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3094{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003095 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003096}
3097
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003098/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003099 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003100 * If the CPU is not running (e.g. waiting as idle) the function will
3101 * return immediately. */
3102void exit_sie(struct kvm_vcpu *vcpu)
3103{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003104 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003105 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003106 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3107 cpu_relax();
3108}
3109
Christian Borntraeger8e236542015-04-09 13:49:04 +02003110/* Kick a guest cpu out of SIE to process a request synchronously */
3111void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003112{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003113 kvm_make_request(req, vcpu);
3114 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003115}
3116
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003117static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3118 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003119{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003120 struct kvm *kvm = gmap->private;
3121 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003122 unsigned long prefix;
3123 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003124
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003125 if (gmap_is_shadow(gmap))
3126 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003127 if (start >= 1UL << 31)
3128 /* We are only interested in prefix pages */
3129 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003130 kvm_for_each_vcpu(i, vcpu, kvm) {
3131 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003132 prefix = kvm_s390_get_prefix(vcpu);
3133 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3134 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3135 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003136 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003137 }
3138 }
3139}
3140
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003141bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3142{
3143 /* do not poll with more than halt_poll_max_steal percent of steal time */
3144 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3145 halt_poll_max_steal) {
3146 vcpu->stat.halt_no_poll_steal++;
3147 return true;
3148 }
3149 return false;
3150}
3151
Christoffer Dallb6d33832012-03-08 16:44:24 -05003152int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3153{
3154 /* kvm common code refers to this, but never calls it */
3155 BUG();
3156 return 0;
3157}
3158
Carsten Otte14eebd92012-05-15 14:15:26 +02003159static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3160 struct kvm_one_reg *reg)
3161{
3162 int r = -EINVAL;
3163
3164 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003165 case KVM_REG_S390_TODPR:
3166 r = put_user(vcpu->arch.sie_block->todpr,
3167 (u32 __user *)reg->addr);
3168 break;
3169 case KVM_REG_S390_EPOCHDIFF:
3170 r = put_user(vcpu->arch.sie_block->epoch,
3171 (u64 __user *)reg->addr);
3172 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003173 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003174 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003175 (u64 __user *)reg->addr);
3176 break;
3177 case KVM_REG_S390_CLOCK_COMP:
3178 r = put_user(vcpu->arch.sie_block->ckc,
3179 (u64 __user *)reg->addr);
3180 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003181 case KVM_REG_S390_PFTOKEN:
3182 r = put_user(vcpu->arch.pfault_token,
3183 (u64 __user *)reg->addr);
3184 break;
3185 case KVM_REG_S390_PFCOMPARE:
3186 r = put_user(vcpu->arch.pfault_compare,
3187 (u64 __user *)reg->addr);
3188 break;
3189 case KVM_REG_S390_PFSELECT:
3190 r = put_user(vcpu->arch.pfault_select,
3191 (u64 __user *)reg->addr);
3192 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003193 case KVM_REG_S390_PP:
3194 r = put_user(vcpu->arch.sie_block->pp,
3195 (u64 __user *)reg->addr);
3196 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003197 case KVM_REG_S390_GBEA:
3198 r = put_user(vcpu->arch.sie_block->gbea,
3199 (u64 __user *)reg->addr);
3200 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003201 default:
3202 break;
3203 }
3204
3205 return r;
3206}
3207
3208static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3209 struct kvm_one_reg *reg)
3210{
3211 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003212 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003213
3214 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003215 case KVM_REG_S390_TODPR:
3216 r = get_user(vcpu->arch.sie_block->todpr,
3217 (u32 __user *)reg->addr);
3218 break;
3219 case KVM_REG_S390_EPOCHDIFF:
3220 r = get_user(vcpu->arch.sie_block->epoch,
3221 (u64 __user *)reg->addr);
3222 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003223 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003224 r = get_user(val, (u64 __user *)reg->addr);
3225 if (!r)
3226 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003227 break;
3228 case KVM_REG_S390_CLOCK_COMP:
3229 r = get_user(vcpu->arch.sie_block->ckc,
3230 (u64 __user *)reg->addr);
3231 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003232 case KVM_REG_S390_PFTOKEN:
3233 r = get_user(vcpu->arch.pfault_token,
3234 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003235 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3236 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003237 break;
3238 case KVM_REG_S390_PFCOMPARE:
3239 r = get_user(vcpu->arch.pfault_compare,
3240 (u64 __user *)reg->addr);
3241 break;
3242 case KVM_REG_S390_PFSELECT:
3243 r = get_user(vcpu->arch.pfault_select,
3244 (u64 __user *)reg->addr);
3245 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003246 case KVM_REG_S390_PP:
3247 r = get_user(vcpu->arch.sie_block->pp,
3248 (u64 __user *)reg->addr);
3249 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003250 case KVM_REG_S390_GBEA:
3251 r = get_user(vcpu->arch.sie_block->gbea,
3252 (u64 __user *)reg->addr);
3253 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003254 default:
3255 break;
3256 }
3257
3258 return r;
3259}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003260
Janosch Frank7de3f142020-01-31 05:02:02 -05003261static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003262{
Janosch Frank7de3f142020-01-31 05:02:02 -05003263 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3264 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3265 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
3266
3267 kvm_clear_async_pf_completion_queue(vcpu);
3268 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
3269 kvm_s390_vcpu_stop(vcpu);
3270 kvm_s390_clear_local_irqs(vcpu);
3271}
3272
3273static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3274{
3275 /* Initial reset is a superset of the normal reset */
3276 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
3277
3278 /* this equals initial cpu reset in pop, but we don't switch to ESA */
3279 vcpu->arch.sie_block->gpsw.mask = 0;
3280 vcpu->arch.sie_block->gpsw.addr = 0;
3281 kvm_s390_set_prefix(vcpu, 0);
3282 kvm_s390_set_cpu_timer(vcpu, 0);
3283 vcpu->arch.sie_block->ckc = 0;
3284 vcpu->arch.sie_block->todpr = 0;
3285 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3286 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3287 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
3288 vcpu->run->s.regs.fpc = 0;
3289 vcpu->arch.sie_block->gbea = 1;
3290 vcpu->arch.sie_block->pp = 0;
3291 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3292}
3293
3294static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
3295{
3296 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
3297
3298 /* Clear reset is a superset of the initial reset */
3299 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3300
3301 memset(&regs->gprs, 0, sizeof(regs->gprs));
3302 memset(&regs->vrs, 0, sizeof(regs->vrs));
3303 memset(&regs->acrs, 0, sizeof(regs->acrs));
3304 memset(&regs->gscb, 0, sizeof(regs->gscb));
3305
3306 regs->etoken = 0;
3307 regs->etoken_extension = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003308}
3309
3310int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3311{
Christoffer Dall875656f2017-12-04 21:35:27 +01003312 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003313 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003314 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003315 return 0;
3316}
3317
3318int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3319{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003320 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003321 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003322 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003323 return 0;
3324}
3325
3326int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3327 struct kvm_sregs *sregs)
3328{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003329 vcpu_load(vcpu);
3330
Christian Borntraeger59674c12012-01-11 11:20:33 +01003331 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003332 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003333
3334 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003335 return 0;
3336}
3337
3338int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3339 struct kvm_sregs *sregs)
3340{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003341 vcpu_load(vcpu);
3342
Christian Borntraeger59674c12012-01-11 11:20:33 +01003343 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003344 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003345
3346 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003347 return 0;
3348}
3349
3350int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3351{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003352 int ret = 0;
3353
3354 vcpu_load(vcpu);
3355
3356 if (test_fp_ctl(fpu->fpc)) {
3357 ret = -EINVAL;
3358 goto out;
3359 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003360 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003361 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003362 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3363 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003364 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003365 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003366
3367out:
3368 vcpu_put(vcpu);
3369 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003370}
3371
3372int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3373{
Christoffer Dall13931232017-12-04 21:35:34 +01003374 vcpu_load(vcpu);
3375
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003376 /* make sure we have the latest values */
3377 save_fpu_regs();
3378 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003379 convert_vx_to_fp((freg_t *) fpu->fprs,
3380 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003381 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003382 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003383 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003384
3385 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003386 return 0;
3387}
3388
3389static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3390{
3391 int rc = 0;
3392
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003393 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003394 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003395 else {
3396 vcpu->run->psw_mask = psw.mask;
3397 vcpu->run->psw_addr = psw.addr;
3398 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003399 return rc;
3400}
3401
3402int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3403 struct kvm_translation *tr)
3404{
3405 return -EINVAL; /* not implemented yet */
3406}
3407
David Hildenbrand27291e22014-01-23 12:26:52 +01003408#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3409 KVM_GUESTDBG_USE_HW_BP | \
3410 KVM_GUESTDBG_ENABLE)
3411
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003412int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3413 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003414{
David Hildenbrand27291e22014-01-23 12:26:52 +01003415 int rc = 0;
3416
Christoffer Dall66b56562017-12-04 21:35:33 +01003417 vcpu_load(vcpu);
3418
David Hildenbrand27291e22014-01-23 12:26:52 +01003419 vcpu->guest_debug = 0;
3420 kvm_s390_clear_bp_data(vcpu);
3421
Christoffer Dall66b56562017-12-04 21:35:33 +01003422 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3423 rc = -EINVAL;
3424 goto out;
3425 }
3426 if (!sclp.has_gpere) {
3427 rc = -EINVAL;
3428 goto out;
3429 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003430
3431 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3432 vcpu->guest_debug = dbg->control;
3433 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003434 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003435
3436 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3437 rc = kvm_s390_import_bp_data(vcpu, dbg);
3438 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003439 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003440 vcpu->arch.guestdbg.last_bp = 0;
3441 }
3442
3443 if (rc) {
3444 vcpu->guest_debug = 0;
3445 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003446 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003447 }
3448
Christoffer Dall66b56562017-12-04 21:35:33 +01003449out:
3450 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003451 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003452}
3453
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003454int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3455 struct kvm_mp_state *mp_state)
3456{
Christoffer Dallfd232562017-12-04 21:35:30 +01003457 int ret;
3458
3459 vcpu_load(vcpu);
3460
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003461 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003462 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3463 KVM_MP_STATE_OPERATING;
3464
3465 vcpu_put(vcpu);
3466 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003467}
3468
3469int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3470 struct kvm_mp_state *mp_state)
3471{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003472 int rc = 0;
3473
Christoffer Dalle83dff52017-12-04 21:35:31 +01003474 vcpu_load(vcpu);
3475
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003476 /* user space knows about this interface - let it control the state */
3477 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3478
3479 switch (mp_state->mp_state) {
3480 case KVM_MP_STATE_STOPPED:
3481 kvm_s390_vcpu_stop(vcpu);
3482 break;
3483 case KVM_MP_STATE_OPERATING:
3484 kvm_s390_vcpu_start(vcpu);
3485 break;
3486 case KVM_MP_STATE_LOAD:
3487 case KVM_MP_STATE_CHECK_STOP:
3488 /* fall through - CHECK_STOP and LOAD are not supported yet */
3489 default:
3490 rc = -ENXIO;
3491 }
3492
Christoffer Dalle83dff52017-12-04 21:35:31 +01003493 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003494 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003495}
3496
David Hildenbrand8ad35752014-03-14 11:00:21 +01003497static bool ibs_enabled(struct kvm_vcpu *vcpu)
3498{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003499 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003500}
3501
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003502static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3503{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003504retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003505 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003506 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003507 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003508 /*
3509 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003510 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003511 * This ensures that the ipte instruction for this request has
3512 * already finished. We might race against a second unmapper that
3513 * wants to set the blocking bit. Lets just retry the request loop.
3514 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003515 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003516 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003517 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3518 kvm_s390_get_prefix(vcpu),
3519 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003520 if (rc) {
3521 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003522 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003523 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003524 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003525 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003526
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003527 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3528 vcpu->arch.sie_block->ihcpu = 0xffff;
3529 goto retry;
3530 }
3531
David Hildenbrand8ad35752014-03-14 11:00:21 +01003532 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3533 if (!ibs_enabled(vcpu)) {
3534 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003535 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003536 }
3537 goto retry;
3538 }
3539
3540 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3541 if (ibs_enabled(vcpu)) {
3542 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003543 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003544 }
3545 goto retry;
3546 }
3547
David Hildenbrand6502a342016-06-21 14:19:51 +02003548 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3549 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3550 goto retry;
3551 }
3552
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003553 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3554 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003555 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003556 * instruction manually, in order to provide additional
3557 * functionalities needed for live migration.
3558 */
3559 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3560 goto retry;
3561 }
3562
3563 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3564 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003565 * Re-enable CMM virtualization if CMMA is available and
3566 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003567 */
3568 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003569 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003570 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3571 goto retry;
3572 }
3573
David Hildenbrand0759d062014-05-13 16:54:32 +02003574 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003575 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003576 /* we left the vsie handler, nothing to do, just clear the request */
3577 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003578
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003579 return 0;
3580}
3581
David Hildenbrand0e7def52018-02-07 12:46:43 +01003582void kvm_s390_set_tod_clock(struct kvm *kvm,
3583 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003584{
3585 struct kvm_vcpu *vcpu;
3586 struct kvm_s390_tod_clock_ext htod;
3587 int i;
3588
3589 mutex_lock(&kvm->lock);
3590 preempt_disable();
3591
3592 get_tod_clock_ext((char *)&htod);
3593
3594 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003595 kvm->arch.epdx = 0;
3596 if (test_kvm_facility(kvm, 139)) {
3597 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3598 if (kvm->arch.epoch > gtod->tod)
3599 kvm->arch.epdx -= 1;
3600 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003601
3602 kvm_s390_vcpu_block_all(kvm);
3603 kvm_for_each_vcpu(i, vcpu, kvm) {
3604 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3605 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3606 }
3607
3608 kvm_s390_vcpu_unblock_all(kvm);
3609 preempt_enable();
3610 mutex_unlock(&kvm->lock);
3611}
3612
Thomas Huthfa576c52014-05-06 17:20:16 +02003613/**
3614 * kvm_arch_fault_in_page - fault-in guest page if necessary
3615 * @vcpu: The corresponding virtual cpu
3616 * @gpa: Guest physical address
3617 * @writable: Whether the page should be writable or not
3618 *
3619 * Make sure that a guest page has been faulted-in on the host.
3620 *
3621 * Return: Zero on success, negative error code otherwise.
3622 */
3623long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003624{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003625 return gmap_fault(vcpu->arch.gmap, gpa,
3626 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003627}
3628
Dominik Dingel3c038e62013-10-07 17:11:48 +02003629static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3630 unsigned long token)
3631{
3632 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003633 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003634
3635 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003636 irq.u.ext.ext_params2 = token;
3637 irq.type = KVM_S390_INT_PFAULT_INIT;
3638 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003639 } else {
3640 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003641 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003642 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3643 }
3644}
3645
3646void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3647 struct kvm_async_pf *work)
3648{
3649 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3650 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3651}
3652
3653void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3654 struct kvm_async_pf *work)
3655{
3656 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3657 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3658}
3659
3660void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3661 struct kvm_async_pf *work)
3662{
3663 /* s390 will always inject the page directly */
3664}
3665
3666bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3667{
3668 /*
3669 * s390 will always inject the page directly,
3670 * but we still want check_async_completion to cleanup
3671 */
3672 return true;
3673}
3674
3675static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3676{
3677 hva_t hva;
3678 struct kvm_arch_async_pf arch;
3679 int rc;
3680
3681 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3682 return 0;
3683 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3684 vcpu->arch.pfault_compare)
3685 return 0;
3686 if (psw_extint_disabled(vcpu))
3687 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003688 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003689 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003690 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003691 return 0;
3692 if (!vcpu->arch.gmap->pfault_enabled)
3693 return 0;
3694
Heiko Carstens81480cc2014-01-01 16:36:07 +01003695 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3696 hva += current->thread.gmap_addr & ~PAGE_MASK;
3697 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003698 return 0;
3699
3700 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3701 return rc;
3702}
3703
Thomas Huth3fb4c402013-09-12 10:33:43 +02003704static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003705{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003706 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003707
Dominik Dingel3c038e62013-10-07 17:11:48 +02003708 /*
3709 * On s390 notifications for arriving pages will be delivered directly
3710 * to the guest but the house keeping for completed pfaults is
3711 * handled outside the worker.
3712 */
3713 kvm_check_async_pf_completion(vcpu);
3714
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003715 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3716 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003717
3718 if (need_resched())
3719 schedule();
3720
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003721 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003722 s390_handle_mcck();
3723
Jens Freimann79395032014-04-17 10:10:30 +02003724 if (!kvm_is_ucontrol(vcpu->kvm)) {
3725 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3726 if (rc)
3727 return rc;
3728 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003729
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003730 rc = kvm_s390_handle_requests(vcpu);
3731 if (rc)
3732 return rc;
3733
David Hildenbrand27291e22014-01-23 12:26:52 +01003734 if (guestdbg_enabled(vcpu)) {
3735 kvm_s390_backup_guest_per_regs(vcpu);
3736 kvm_s390_patch_guest_per_regs(vcpu);
3737 }
3738
Michael Mueller9f30f622019-01-31 09:52:44 +01003739 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
3740
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003741 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003742 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3743 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3744 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003745
Thomas Huth3fb4c402013-09-12 10:33:43 +02003746 return 0;
3747}
3748
Thomas Huth492d8642015-02-10 16:11:01 +01003749static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3750{
David Hildenbrand56317922016-01-12 17:37:58 +01003751 struct kvm_s390_pgm_info pgm_info = {
3752 .code = PGM_ADDRESSING,
3753 };
3754 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003755 int rc;
3756
3757 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3758 trace_kvm_s390_sie_fault(vcpu);
3759
3760 /*
3761 * We want to inject an addressing exception, which is defined as a
3762 * suppressing or terminating exception. However, since we came here
3763 * by a DAT access exception, the PSW still points to the faulting
3764 * instruction since DAT exceptions are nullifying. So we've got
3765 * to look up the current opcode to get the length of the instruction
3766 * to be able to forward the PSW.
3767 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003768 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003769 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003770 if (rc < 0) {
3771 return rc;
3772 } else if (rc) {
3773 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3774 * Forward by arbitrary ilc, injection will take care of
3775 * nullification if necessary.
3776 */
3777 pgm_info = vcpu->arch.pgm;
3778 ilen = 4;
3779 }
David Hildenbrand56317922016-01-12 17:37:58 +01003780 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3781 kvm_s390_forward_psw(vcpu, ilen);
3782 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003783}
3784
Thomas Huth3fb4c402013-09-12 10:33:43 +02003785static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3786{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003787 struct mcck_volatile_info *mcck_info;
3788 struct sie_page *sie_page;
3789
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003790 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3791 vcpu->arch.sie_block->icptcode);
3792 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3793
David Hildenbrand27291e22014-01-23 12:26:52 +01003794 if (guestdbg_enabled(vcpu))
3795 kvm_s390_restore_guest_per_regs(vcpu);
3796
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003797 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3798 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003799
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003800 if (exit_reason == -EINTR) {
3801 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3802 sie_page = container_of(vcpu->arch.sie_block,
3803 struct sie_page, sie_block);
3804 mcck_info = &sie_page->mcck_info;
3805 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3806 return 0;
3807 }
3808
David Hildenbrand71f116b2015-10-19 16:24:28 +02003809 if (vcpu->arch.sie_block->icptcode > 0) {
3810 int rc = kvm_handle_sie_intercept(vcpu);
3811
3812 if (rc != -EOPNOTSUPP)
3813 return rc;
3814 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3815 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3816 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3817 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3818 return -EREMOTE;
3819 } else if (exit_reason != -EFAULT) {
3820 vcpu->stat.exit_null++;
3821 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003822 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3823 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3824 vcpu->run->s390_ucontrol.trans_exc_code =
3825 current->thread.gmap_addr;
3826 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003827 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003828 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003829 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003830 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003831 if (kvm_arch_setup_async_pf(vcpu))
3832 return 0;
3833 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003834 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003835 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003836}
3837
3838static int __vcpu_run(struct kvm_vcpu *vcpu)
3839{
3840 int rc, exit_reason;
3841
Thomas Huth800c1062013-09-12 10:33:45 +02003842 /*
3843 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3844 * ning the guest), so that memslots (and other stuff) are protected
3845 */
3846 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3847
Thomas Hutha76ccff2013-09-12 10:33:44 +02003848 do {
3849 rc = vcpu_pre_run(vcpu);
3850 if (rc)
3851 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003852
Thomas Huth800c1062013-09-12 10:33:45 +02003853 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003854 /*
3855 * As PF_VCPU will be used in fault handler, between
3856 * guest_enter and guest_exit should be no uaccess.
3857 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003858 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003859 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003860 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003861 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003862 exit_reason = sie64a(vcpu->arch.sie_block,
3863 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003864 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003865 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003866 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003867 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003868 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003869
Thomas Hutha76ccff2013-09-12 10:33:44 +02003870 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003871 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003872
Thomas Huth800c1062013-09-12 10:33:45 +02003873 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003874 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003875}
3876
David Hildenbrandb028ee32014-07-17 10:47:43 +02003877static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3878{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003879 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003880 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003881
3882 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003883 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003884 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3885 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3886 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3887 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3888 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3889 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003890 /* some control register changes require a tlb flush */
3891 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003892 }
3893 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003894 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003895 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3896 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3897 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3898 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3899 }
3900 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3901 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3902 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3903 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003904 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3905 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003906 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003907 /*
3908 * If userspace sets the riccb (e.g. after migration) to a valid state,
3909 * we should enable RI here instead of doing the lazy enablement.
3910 */
3911 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003912 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003913 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003914 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003915 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003916 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003917 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003918 /*
3919 * If userspace sets the gscb (e.g. after migration) to non-zero,
3920 * we should enable GS here instead of doing the lazy enablement.
3921 */
3922 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3923 test_kvm_facility(vcpu->kvm, 133) &&
3924 gscb->gssm &&
3925 !vcpu->arch.gs_enabled) {
3926 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3927 vcpu->arch.sie_block->ecb |= ECB_GS;
3928 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3929 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003930 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003931 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3932 test_kvm_facility(vcpu->kvm, 82)) {
3933 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3934 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3935 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003936 save_access_regs(vcpu->arch.host_acrs);
3937 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003938 /* save host (userspace) fprs/vrs */
3939 save_fpu_regs();
3940 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3941 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3942 if (MACHINE_HAS_VX)
3943 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3944 else
3945 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3946 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3947 if (test_fp_ctl(current->thread.fpu.fpc))
3948 /* User space provided an invalid FPC, let's clear it */
3949 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003950 if (MACHINE_HAS_GS) {
3951 preempt_disable();
3952 __ctl_set_bit(2, 4);
3953 if (current->thread.gs_cb) {
3954 vcpu->arch.host_gscb = current->thread.gs_cb;
3955 save_gs_cb(vcpu->arch.host_gscb);
3956 }
3957 if (vcpu->arch.gs_enabled) {
3958 current->thread.gs_cb = (struct gs_cb *)
3959 &vcpu->run->s.regs.gscb;
3960 restore_gs_cb(current->thread.gs_cb);
3961 }
3962 preempt_enable();
3963 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003964 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Fan Zhang80cd8762016-08-15 04:53:22 +02003965
David Hildenbrandb028ee32014-07-17 10:47:43 +02003966 kvm_run->kvm_dirty_regs = 0;
3967}
3968
3969static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3970{
3971 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3972 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3973 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3974 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003975 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003976 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3977 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3978 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3979 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3980 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3981 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3982 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003983 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003984 save_access_regs(vcpu->run->s.regs.acrs);
3985 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003986 /* Save guest register state */
3987 save_fpu_regs();
3988 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3989 /* Restore will be done lazily at return */
3990 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3991 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003992 if (MACHINE_HAS_GS) {
3993 __ctl_set_bit(2, 4);
3994 if (vcpu->arch.gs_enabled)
3995 save_gs_cb(current->thread.gs_cb);
3996 preempt_disable();
3997 current->thread.gs_cb = vcpu->arch.host_gscb;
3998 restore_gs_cb(vcpu->arch.host_gscb);
3999 preempt_enable();
4000 if (!vcpu->arch.host_gscb)
4001 __ctl_clear_bit(2, 4);
4002 vcpu->arch.host_gscb = NULL;
4003 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004004 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02004005}
4006
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004007int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4008{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004009 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004010
Paolo Bonzini460df4c2017-02-08 11:50:15 +01004011 if (kvm_run->immediate_exit)
4012 return -EINTR;
4013
Thomas Huth200824f2019-09-04 10:51:59 +02004014 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4015 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4016 return -EINVAL;
4017
Christoffer Dallaccb7572017-12-04 21:35:25 +01004018 vcpu_load(vcpu);
4019
David Hildenbrand27291e22014-01-23 12:26:52 +01004020 if (guestdbg_exit_pending(vcpu)) {
4021 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004022 rc = 0;
4023 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004024 }
4025
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004026 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004027
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004028 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4029 kvm_s390_vcpu_start(vcpu);
4030 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004031 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004032 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004033 rc = -EINVAL;
4034 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004035 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004036
David Hildenbrandb028ee32014-07-17 10:47:43 +02004037 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004038 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004039
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004040 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004041 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004042
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004043 if (signal_pending(current) && !rc) {
4044 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004045 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004046 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004047
David Hildenbrand27291e22014-01-23 12:26:52 +01004048 if (guestdbg_exit_pending(vcpu) && !rc) {
4049 kvm_s390_prepare_debug_exit(vcpu);
4050 rc = 0;
4051 }
4052
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004053 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004054 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004055 rc = 0;
4056 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004057
David Hildenbranddb0758b2016-02-15 09:42:25 +01004058 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004059 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004060
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004061 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004062
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004063 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004064out:
4065 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004066 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004067}
4068
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004069/*
4070 * store status at address
4071 * we use have two special cases:
4072 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4073 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4074 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004075int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004076{
Carsten Otte092670c2011-07-24 10:48:22 +02004077 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004078 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004079 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004080 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004081 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004082
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004083 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004084 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4085 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004086 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004087 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004088 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4089 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004090 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004091 gpa = px;
4092 } else
4093 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004094
4095 /* manually convert vector registers if necessary */
4096 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004097 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004098 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4099 fprs, 128);
4100 } else {
4101 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004102 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004103 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004104 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004105 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004106 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004107 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004108 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004109 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004110 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004111 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004112 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004113 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004114 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004115 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004116 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004117 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004118 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004119 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004120 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004121 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004122 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004123 &vcpu->arch.sie_block->gcr, 128);
4124 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004125}
4126
Thomas Huthe8798922013-11-06 15:46:33 +01004127int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4128{
4129 /*
4130 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004131 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004132 * it into the save area
4133 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004134 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004135 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004136 save_access_regs(vcpu->run->s.regs.acrs);
4137
4138 return kvm_s390_store_status_unloaded(vcpu, addr);
4139}
4140
David Hildenbrand8ad35752014-03-14 11:00:21 +01004141static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4142{
4143 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004144 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004145}
4146
4147static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4148{
4149 unsigned int i;
4150 struct kvm_vcpu *vcpu;
4151
4152 kvm_for_each_vcpu(i, vcpu, kvm) {
4153 __disable_ibs_on_vcpu(vcpu);
4154 }
4155}
4156
4157static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4158{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004159 if (!sclp.has_ibs)
4160 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004161 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004162 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004163}
4164
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004165void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
4166{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004167 int i, online_vcpus, started_vcpus = 0;
4168
4169 if (!is_vcpu_stopped(vcpu))
4170 return;
4171
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004172 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004173 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004174 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004175 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4176
4177 for (i = 0; i < online_vcpus; i++) {
4178 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4179 started_vcpus++;
4180 }
4181
4182 if (started_vcpus == 0) {
4183 /* we're the only active VCPU -> speed it up */
4184 __enable_ibs_on_vcpu(vcpu);
4185 } else if (started_vcpus == 1) {
4186 /*
4187 * As we are starting a second VCPU, we have to disable
4188 * the IBS facility on all VCPUs to remove potentially
4189 * oustanding ENABLE requests.
4190 */
4191 __disable_ibs_on_all_vcpus(vcpu->kvm);
4192 }
4193
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004194 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004195 /*
4196 * Another VCPU might have used IBS while we were offline.
4197 * Let's play safe and flush the VCPU at startup.
4198 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004199 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004200 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004201 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004202}
4203
4204void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
4205{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004206 int i, online_vcpus, started_vcpus = 0;
4207 struct kvm_vcpu *started_vcpu = NULL;
4208
4209 if (is_vcpu_stopped(vcpu))
4210 return;
4211
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004212 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004213 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004214 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004215 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4216
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004217 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004218 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004219
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004220 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004221 __disable_ibs_on_vcpu(vcpu);
4222
4223 for (i = 0; i < online_vcpus; i++) {
4224 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4225 started_vcpus++;
4226 started_vcpu = vcpu->kvm->vcpus[i];
4227 }
4228 }
4229
4230 if (started_vcpus == 1) {
4231 /*
4232 * As we only have one VCPU left, we want to enable the
4233 * IBS facility for that VCPU to speed it up.
4234 */
4235 __enable_ibs_on_vcpu(started_vcpu);
4236 }
4237
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004238 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004239 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004240}
4241
Cornelia Huckd6712df2012-12-20 15:32:11 +01004242static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4243 struct kvm_enable_cap *cap)
4244{
4245 int r;
4246
4247 if (cap->flags)
4248 return -EINVAL;
4249
4250 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004251 case KVM_CAP_S390_CSS_SUPPORT:
4252 if (!vcpu->kvm->arch.css_support) {
4253 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004254 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004255 trace_kvm_s390_enable_css(vcpu->kvm);
4256 }
4257 r = 0;
4258 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004259 default:
4260 r = -EINVAL;
4261 break;
4262 }
4263 return r;
4264}
4265
Thomas Huth41408c282015-02-06 15:01:21 +01004266static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4267 struct kvm_s390_mem_op *mop)
4268{
4269 void __user *uaddr = (void __user *)mop->buf;
4270 void *tmpbuf = NULL;
4271 int r, srcu_idx;
4272 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4273 | KVM_S390_MEMOP_F_CHECK_ONLY;
4274
Thomas Hutha13b03b2019-08-29 14:25:17 +02004275 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
Thomas Huth41408c282015-02-06 15:01:21 +01004276 return -EINVAL;
4277
4278 if (mop->size > MEM_OP_MAX_SIZE)
4279 return -E2BIG;
4280
4281 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4282 tmpbuf = vmalloc(mop->size);
4283 if (!tmpbuf)
4284 return -ENOMEM;
4285 }
4286
4287 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4288
4289 switch (mop->op) {
4290 case KVM_S390_MEMOP_LOGICAL_READ:
4291 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004292 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4293 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004294 break;
4295 }
4296 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4297 if (r == 0) {
4298 if (copy_to_user(uaddr, tmpbuf, mop->size))
4299 r = -EFAULT;
4300 }
4301 break;
4302 case KVM_S390_MEMOP_LOGICAL_WRITE:
4303 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004304 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4305 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004306 break;
4307 }
4308 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4309 r = -EFAULT;
4310 break;
4311 }
4312 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4313 break;
4314 default:
4315 r = -EINVAL;
4316 }
4317
4318 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4319
4320 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4321 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4322
4323 vfree(tmpbuf);
4324 return r;
4325}
4326
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004327long kvm_arch_vcpu_async_ioctl(struct file *filp,
4328 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004329{
4330 struct kvm_vcpu *vcpu = filp->private_data;
4331 void __user *argp = (void __user *)arg;
4332
Avi Kivity93736622010-05-13 12:35:17 +03004333 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004334 case KVM_S390_IRQ: {
4335 struct kvm_s390_irq s390irq;
4336
Jens Freimann47b43c52014-11-11 20:57:06 +01004337 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004338 return -EFAULT;
4339 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004340 }
Avi Kivity93736622010-05-13 12:35:17 +03004341 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004342 struct kvm_s390_interrupt s390int;
Thomas Huth53936b52019-09-12 13:54:38 +02004343 struct kvm_s390_irq s390irq = {};
Carsten Otteba5c1e92008-03-25 18:47:26 +01004344
4345 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004346 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004347 if (s390int_to_s390irq(&s390int, &s390irq))
4348 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004349 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004350 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004351 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004352 return -ENOIOCTLCMD;
4353}
4354
4355long kvm_arch_vcpu_ioctl(struct file *filp,
4356 unsigned int ioctl, unsigned long arg)
4357{
4358 struct kvm_vcpu *vcpu = filp->private_data;
4359 void __user *argp = (void __user *)arg;
4360 int idx;
4361 long r;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004362
4363 vcpu_load(vcpu);
4364
4365 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004366 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004367 idx = srcu_read_lock(&vcpu->kvm->srcu);
Christian Borntraeger55680892020-01-31 05:02:00 -05004368 r = kvm_s390_store_status_unloaded(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004369 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004370 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004371 case KVM_S390_SET_INITIAL_PSW: {
4372 psw_t psw;
4373
Avi Kivitybc923cc2010-05-13 12:21:46 +03004374 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004375 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004376 break;
4377 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4378 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004379 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004380 case KVM_S390_CLEAR_RESET:
4381 r = 0;
4382 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
4383 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004384 case KVM_S390_INITIAL_RESET:
Janosch Frank7de3f142020-01-31 05:02:02 -05004385 r = 0;
4386 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4387 break;
4388 case KVM_S390_NORMAL_RESET:
4389 r = 0;
4390 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004391 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004392 case KVM_SET_ONE_REG:
4393 case KVM_GET_ONE_REG: {
4394 struct kvm_one_reg reg;
4395 r = -EFAULT;
4396 if (copy_from_user(&reg, argp, sizeof(reg)))
4397 break;
4398 if (ioctl == KVM_SET_ONE_REG)
4399 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4400 else
4401 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4402 break;
4403 }
Carsten Otte27e03932012-01-04 10:25:21 +01004404#ifdef CONFIG_KVM_S390_UCONTROL
4405 case KVM_S390_UCAS_MAP: {
4406 struct kvm_s390_ucas_mapping ucasmap;
4407
4408 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4409 r = -EFAULT;
4410 break;
4411 }
4412
4413 if (!kvm_is_ucontrol(vcpu->kvm)) {
4414 r = -EINVAL;
4415 break;
4416 }
4417
4418 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4419 ucasmap.vcpu_addr, ucasmap.length);
4420 break;
4421 }
4422 case KVM_S390_UCAS_UNMAP: {
4423 struct kvm_s390_ucas_mapping ucasmap;
4424
4425 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4426 r = -EFAULT;
4427 break;
4428 }
4429
4430 if (!kvm_is_ucontrol(vcpu->kvm)) {
4431 r = -EINVAL;
4432 break;
4433 }
4434
4435 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4436 ucasmap.length);
4437 break;
4438 }
4439#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004440 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004441 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004442 break;
4443 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004444 case KVM_ENABLE_CAP:
4445 {
4446 struct kvm_enable_cap cap;
4447 r = -EFAULT;
4448 if (copy_from_user(&cap, argp, sizeof(cap)))
4449 break;
4450 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4451 break;
4452 }
Thomas Huth41408c282015-02-06 15:01:21 +01004453 case KVM_S390_MEM_OP: {
4454 struct kvm_s390_mem_op mem_op;
4455
4456 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4457 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4458 else
4459 r = -EFAULT;
4460 break;
4461 }
Jens Freimann816c7662014-11-24 17:13:46 +01004462 case KVM_S390_SET_IRQ_STATE: {
4463 struct kvm_s390_irq_state irq_state;
4464
4465 r = -EFAULT;
4466 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4467 break;
4468 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4469 irq_state.len == 0 ||
4470 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4471 r = -EINVAL;
4472 break;
4473 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004474 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004475 r = kvm_s390_set_irq_state(vcpu,
4476 (void __user *) irq_state.buf,
4477 irq_state.len);
4478 break;
4479 }
4480 case KVM_S390_GET_IRQ_STATE: {
4481 struct kvm_s390_irq_state irq_state;
4482
4483 r = -EFAULT;
4484 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4485 break;
4486 if (irq_state.len == 0) {
4487 r = -EINVAL;
4488 break;
4489 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004490 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004491 r = kvm_s390_get_irq_state(vcpu,
4492 (__u8 __user *) irq_state.buf,
4493 irq_state.len);
4494 break;
4495 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004496 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004497 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004498 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004499
4500 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004501 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004502}
4503
Souptick Joarder1499fa82018-04-19 00:49:58 +05304504vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004505{
4506#ifdef CONFIG_KVM_S390_UCONTROL
4507 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4508 && (kvm_is_ucontrol(vcpu->kvm))) {
4509 vmf->page = virt_to_page(vcpu->arch.sie_block);
4510 get_page(vmf->page);
4511 return 0;
4512 }
4513#endif
4514 return VM_FAULT_SIGBUS;
4515}
4516
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05304517int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4518 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09004519{
4520 return 0;
4521}
4522
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004523/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004524int kvm_arch_prepare_memory_region(struct kvm *kvm,
4525 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004526 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004527 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004528{
Nick Wangdd2887e2013-03-25 17:22:57 +01004529 /* A few sanity checks. We can have memory slots which have to be
4530 located/ended at a segment boundary (1MB). The memory in userland is
4531 ok to be fragmented into various different vmas. It is okay to mmap()
4532 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004533
Carsten Otte598841c2011-07-24 10:48:21 +02004534 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004535 return -EINVAL;
4536
Carsten Otte598841c2011-07-24 10:48:21 +02004537 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004538 return -EINVAL;
4539
Dominik Dingela3a92c32014-12-01 17:24:42 +01004540 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4541 return -EINVAL;
4542
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004543 return 0;
4544}
4545
4546void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004547 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004548 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004549 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004550 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004551{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004552 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004553
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004554 switch (change) {
4555 case KVM_MR_DELETE:
4556 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4557 old->npages * PAGE_SIZE);
4558 break;
4559 case KVM_MR_MOVE:
4560 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4561 old->npages * PAGE_SIZE);
4562 if (rc)
4563 break;
4564 /* FALLTHROUGH */
4565 case KVM_MR_CREATE:
4566 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4567 mem->guest_phys_addr, mem->memory_size);
4568 break;
4569 case KVM_MR_FLAGS_ONLY:
4570 break;
4571 default:
4572 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
4573 }
Carsten Otte598841c2011-07-24 10:48:21 +02004574 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004575 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02004576 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004577}
4578
Alexander Yarygin60a37702016-04-01 15:38:57 +03004579static inline unsigned long nonhyp_mask(int i)
4580{
4581 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4582
4583 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4584}
4585
Christian Borntraeger3491caf2016-05-13 12:16:35 +02004586void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4587{
4588 vcpu->valid_wakeup = false;
4589}
4590
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004591static int __init kvm_s390_init(void)
4592{
Alexander Yarygin60a37702016-04-01 15:38:57 +03004593 int i;
4594
David Hildenbrand07197fd2015-01-30 16:01:38 +01004595 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004596 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01004597 return -ENODEV;
4598 }
4599
Janosch Franka4499382018-07-13 11:28:31 +01004600 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004601 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01004602 return -EINVAL;
4603 }
4604
Alexander Yarygin60a37702016-04-01 15:38:57 +03004605 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00004606 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03004607 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4608
Michael Mueller9d8d5782015-02-02 15:42:51 +01004609 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004610}
4611
4612static void __exit kvm_s390_exit(void)
4613{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004614 kvm_exit();
4615}
4616
4617module_init(kvm_s390_init);
4618module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02004619
4620/*
4621 * Enable autoloading of the kvm module.
4622 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4623 * since x86 takes a different approach.
4624 */
4625#include <linux/miscdevice.h>
4626MODULE_ALIAS_MISCDEV(KVM_MINOR);
4627MODULE_ALIAS("devname:kvm");