blob: 2e47c724679e32a2950a4c1e4422e63f58370703 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Christian Borntraegera37cb072018-01-23 13:28:40 +01005 * Copyright IBM Corp. 2008, 2018
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010047#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010048#include "gaccess.h"
49
Cornelia Huck5786fff2012-07-23 17:20:29 +020050#define CREATE_TRACE_POINTS
51#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020052#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020053
Thomas Huth41408c282015-02-06 15:01:21 +010054#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010055#define LOCAL_IRQS 32
56#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010058
Heiko Carstensb0c632d2008-03-25 18:47:20 +010059#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Christian Borntraegerccc40c52018-03-08 12:48:05 +000060#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061
62struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020064 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010065 { "exit_validity", VCPU_STAT(exit_validity) },
66 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 { "exit_external_request", VCPU_STAT(exit_external_request) },
Christian Borntraegera5e0acea92018-02-23 07:57:33 +000068 { "exit_io_request", VCPU_STAT(exit_io_request) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010069 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030071 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020074 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010075 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020076 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020077 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020078 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020079 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010080 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010081 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
82 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000083 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
84 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010085 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020086 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010087 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000088 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010089 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
90 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
91 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000092 { "deliver_program", VCPU_STAT(deliver_program) },
93 { "deliver_io", VCPU_STAT(deliver_io) },
QingFeng Hao32de0742018-03-02 11:56:47 +010094 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010095 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000096 { "inject_ckc", VCPU_STAT(inject_ckc) },
97 { "inject_cputm", VCPU_STAT(inject_cputm) },
98 { "inject_external_call", VCPU_STAT(inject_external_call) },
99 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
100 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
101 { "inject_io", VM_STAT(inject_io) },
102 { "inject_mchk", VCPU_STAT(inject_mchk) },
103 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
104 { "inject_program", VCPU_STAT(inject_program) },
105 { "inject_restart", VCPU_STAT(inject_restart) },
106 { "inject_service_signal", VM_STAT(inject_service_signal) },
107 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
108 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
109 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
110 { "inject_virtio", VM_STAT(inject_virtio) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100111 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
112 { "instruction_gs", VCPU_STAT(instruction_gs) },
113 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
114 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
115 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200116 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100117 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100118 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100119 { "instruction_sck", VCPU_STAT(instruction_sck) },
120 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100121 { "instruction_spx", VCPU_STAT(instruction_spx) },
122 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
123 { "instruction_stap", VCPU_STAT(instruction_stap) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100124 { "instruction_iske", VCPU_STAT(instruction_iske) },
125 { "instruction_ri", VCPU_STAT(instruction_ri) },
126 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
127 { "instruction_sske", VCPU_STAT(instruction_sske) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100128 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200129 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100130 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
131 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100132 { "instruction_tb", VCPU_STAT(instruction_tb) },
133 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200134 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100135 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200136 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200137 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100138 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100139 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200140 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100141 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200142 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
143 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100144 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200145 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
146 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500147 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100148 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
149 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
150 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200151 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
152 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
153 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100154 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
155 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
156 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
157 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
158 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
159 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100160 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100161 { NULL }
162};
163
Collin L. Walling8fa16962016-07-26 15:29:44 -0400164struct kvm_s390_tod_clock_ext {
165 __u8 epoch_idx;
166 __u64 tod;
167 __u8 reserved[7];
168} __packed;
169
David Hildenbranda411edf2016-02-02 15:41:22 +0100170/* allow nested virtualization in KVM (if enabled by user space) */
171static int nested;
172module_param(nested, int, S_IRUGO);
173MODULE_PARM_DESC(nested, "Nested virtualization support");
174
Janosch Franka4499382018-07-13 11:28:31 +0100175/* allow 1m huge page guest backing, if !nested */
176static int hpage;
177module_param(hpage, int, 0444);
178MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100179
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000180/*
181 * For now we handle at most 16 double words as this is what the s390 base
182 * kernel handles and stores in the prefix page. If we ever need to go beyond
183 * this, this requires changes to code, but the external uapi can stay.
184 */
185#define SIZE_INTERNAL 16
186
187/*
188 * Base feature mask that defines default mask for facilities. Consists of the
189 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
190 */
191static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
192/*
193 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
194 * and defines the facilities that can be enabled via a cpu model.
195 */
196static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
197
198static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200199{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000200 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
201 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
202 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
203 sizeof(S390_lowcore.stfle_fac_list));
204
205 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200206}
207
David Hildenbrand15c97052015-03-19 17:36:43 +0100208/* available cpu features supported by kvm */
209static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200210/* available subfunctions indicated via query / "test bit" */
211static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100212
Michael Mueller9d8d5782015-02-02 15:42:51 +0100213static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200214static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200215debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100216
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100217/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200218int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100219{
220 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200221 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100222}
223
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100224static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
225 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200226
David Hildenbrand15757672018-02-07 12:46:45 +0100227static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
228{
229 u8 delta_idx = 0;
230
231 /*
232 * The TOD jumps by delta, we have to compensate this by adding
233 * -delta to the epoch.
234 */
235 delta = -delta;
236
237 /* sign-extension - we're adding to signed values below */
238 if ((s64)delta < 0)
239 delta_idx = -1;
240
241 scb->epoch += delta;
242 if (scb->ecd & ECD_MEF) {
243 scb->epdx += delta_idx;
244 if (scb->epoch < delta)
245 scb->epdx += 1;
246 }
247}
248
Fan Zhangfdf03652015-05-13 10:58:41 +0200249/*
250 * This callback is executed during stop_machine(). All CPUs are therefore
251 * temporarily stopped. In order not to change guest behavior, we have to
252 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
253 * so a CPU won't be stopped while calculating with the epoch.
254 */
255static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
256 void *v)
257{
258 struct kvm *kvm;
259 struct kvm_vcpu *vcpu;
260 int i;
261 unsigned long long *delta = v;
262
263 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200264 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100265 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
266 if (i == 0) {
267 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
268 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
269 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100270 if (vcpu->arch.cputm_enabled)
271 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100272 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100273 kvm_clock_sync_scb(vcpu->arch.vsie_block,
274 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200275 }
276 }
277 return NOTIFY_OK;
278}
279
280static struct notifier_block kvm_clock_notifier = {
281 .notifier_call = kvm_clock_sync,
282};
283
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100284int kvm_arch_hardware_setup(void)
285{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200286 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100287 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200288 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
289 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200290 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
291 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100292 return 0;
293}
294
295void kvm_arch_hardware_unsetup(void)
296{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100297 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200298 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200299 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
300 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100301}
302
David Hildenbrand22be5a132016-01-21 13:22:54 +0100303static void allow_cpu_feat(unsigned long nr)
304{
305 set_bit_inv(nr, kvm_s390_available_cpu_feat);
306}
307
David Hildenbrand0a763c72016-05-18 16:03:47 +0200308static inline int plo_test_bit(unsigned char nr)
309{
310 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100311 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200312
313 asm volatile(
314 /* Parameter registers are ignored for "test bit" */
315 " plo 0,0,0,0(0)\n"
316 " ipm %0\n"
317 " srl %0,28\n"
318 : "=d" (cc)
319 : "d" (r0)
320 : "cc");
321 return cc == 0;
322}
323
David Hildenbrand22be5a132016-01-21 13:22:54 +0100324static void kvm_s390_cpu_feat_init(void)
325{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200326 int i;
327
328 for (i = 0; i < 256; ++i) {
329 if (plo_test_bit(i))
330 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
331 }
332
333 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400334 ptff(kvm_s390_available_subfunc.ptff,
335 sizeof(kvm_s390_available_subfunc.ptff),
336 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200337
338 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200339 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
340 kvm_s390_available_subfunc.kmac);
341 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
342 kvm_s390_available_subfunc.kmc);
343 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
344 kvm_s390_available_subfunc.km);
345 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
346 kvm_s390_available_subfunc.kimd);
347 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
348 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200349 }
350 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200351 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
352 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200353 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200354 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
355 kvm_s390_available_subfunc.kmctr);
356 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
357 kvm_s390_available_subfunc.kmf);
358 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
359 kvm_s390_available_subfunc.kmo);
360 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
361 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200362 }
363 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100364 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200365 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200366
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400367 if (test_facility(146)) /* MSA8 */
368 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
369 kvm_s390_available_subfunc.kma);
370
David Hildenbrand22be5a132016-01-21 13:22:54 +0100371 if (MACHINE_HAS_ESOP)
372 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200373 /*
374 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
375 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
376 */
377 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100378 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200379 return;
380 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100381 if (sclp.has_64bscao)
382 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100383 if (sclp.has_siif)
384 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100385 if (sclp.has_gpere)
386 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100387 if (sclp.has_gsls)
388 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100389 if (sclp.has_ib)
390 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100391 if (sclp.has_cei)
392 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100393 if (sclp.has_ibs)
394 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500395 if (sclp.has_kss)
396 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200397 /*
398 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
399 * all skey handling functions read/set the skey from the PGSTE
400 * instead of the real storage key.
401 *
402 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
403 * pages being detected as preserved although they are resident.
404 *
405 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
406 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
407 *
408 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
409 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
410 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
411 *
412 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
413 * cannot easily shadow the SCA because of the ipte lock.
414 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100415}
416
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100417int kvm_arch_init(void *opaque)
418{
Michael Mueller308c3e62018-11-30 15:32:06 +0100419 int rc;
420
Christian Borntraeger78f26132015-07-22 15:50:58 +0200421 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
422 if (!kvm_s390_dbf)
423 return -ENOMEM;
424
425 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
Michael Mueller308c3e62018-11-30 15:32:06 +0100426 rc = -ENOMEM;
427 goto out_debug_unreg;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200428 }
429
David Hildenbrand22be5a132016-01-21 13:22:54 +0100430 kvm_s390_cpu_feat_init();
431
Cornelia Huck84877d92014-09-02 10:27:35 +0100432 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100433 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
434 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100435 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Michael Mueller308c3e62018-11-30 15:32:06 +0100436 goto out_debug_unreg;
437 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100438
439 rc = kvm_s390_gib_init(GAL_ISC);
440 if (rc)
441 goto out_gib_destroy;
442
Michael Mueller308c3e62018-11-30 15:32:06 +0100443 return 0;
444
Michael Muellerb1d1e762019-01-31 09:52:45 +0100445out_gib_destroy:
446 kvm_s390_gib_destroy();
Michael Mueller308c3e62018-11-30 15:32:06 +0100447out_debug_unreg:
448 debug_unregister(kvm_s390_dbf);
449 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100450}
451
Christian Borntraeger78f26132015-07-22 15:50:58 +0200452void kvm_arch_exit(void)
453{
Michael Mueller1282c212019-01-31 09:52:40 +0100454 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200455 debug_unregister(kvm_s390_dbf);
456}
457
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100458/* Section: device related */
459long kvm_arch_dev_ioctl(struct file *filp,
460 unsigned int ioctl, unsigned long arg)
461{
462 if (ioctl == KVM_S390_ENABLE_SIE)
463 return s390_enable_sie();
464 return -EINVAL;
465}
466
Alexander Graf784aa3d2014-07-14 18:27:35 +0200467int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100468{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100469 int r;
470
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200471 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100472 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200473 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100474 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100475#ifdef CONFIG_KVM_S390_UCONTROL
476 case KVM_CAP_S390_UCONTROL:
477#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200478 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100479 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200480 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100481 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100482 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100483 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200484 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200485 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200486 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200487 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100488 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100489 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200490 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100491 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400492 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100493 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200494 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200495 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100496 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100497 case KVM_CAP_S390_AIS_MIGRATION:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100498 r = 1;
499 break;
Janosch Franka4499382018-07-13 11:28:31 +0100500 case KVM_CAP_S390_HPAGE_1M:
501 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100502 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100503 r = 1;
504 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100505 case KVM_CAP_S390_MEM_OP:
506 r = MEM_OP_MAX_SIZE;
507 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200508 case KVM_CAP_NR_VCPUS:
509 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100510 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200511 if (!kvm_s390_use_sca_entries())
512 r = KVM_MAX_VCPUS;
513 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100514 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200515 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100516 case KVM_CAP_NR_MEMSLOTS:
517 r = KVM_USER_MEM_SLOTS;
518 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200519 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100520 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200521 break;
Eric Farman68c55752014-06-09 10:57:26 -0400522 case KVM_CAP_S390_VECTOR_REGISTERS:
523 r = MACHINE_HAS_VX;
524 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800525 case KVM_CAP_S390_RI:
526 r = test_facility(64);
527 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100528 case KVM_CAP_S390_GS:
529 r = test_facility(133);
530 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100531 case KVM_CAP_S390_BPB:
532 r = test_facility(82);
533 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200534 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100535 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200536 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100537 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100538}
539
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400540static void kvm_s390_sync_dirty_log(struct kvm *kvm,
Janosch Frank0959e162018-07-17 13:21:22 +0100541 struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400542{
Janosch Frank0959e162018-07-17 13:21:22 +0100543 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400544 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100545 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400546 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100547 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400548
Janosch Frank0959e162018-07-17 13:21:22 +0100549 /* Loop over all guest segments */
550 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400551 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100552 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
553 gaddr = gfn_to_gpa(cur_gfn);
554 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
555 if (kvm_is_error_hva(vmaddr))
556 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400557
Janosch Frank0959e162018-07-17 13:21:22 +0100558 bitmap_zero(bitmap, _PAGE_ENTRIES);
559 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
560 for (i = 0; i < _PAGE_ENTRIES; i++) {
561 if (test_bit(i, bitmap))
562 mark_page_dirty(kvm, cur_gfn + i);
563 }
564
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100565 if (fatal_signal_pending(current))
566 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100567 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400568 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400569}
570
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100571/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200572static void sca_del_vcpu(struct kvm_vcpu *vcpu);
573
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100574/*
575 * Get (and clear) the dirty memory log for a memory slot.
576 */
577int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
578 struct kvm_dirty_log *log)
579{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400580 int r;
581 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200582 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400583 struct kvm_memory_slot *memslot;
584 int is_dirty = 0;
585
Janosch Franke1e8a962017-02-02 16:39:31 +0100586 if (kvm_is_ucontrol(kvm))
587 return -EINVAL;
588
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400589 mutex_lock(&kvm->slots_lock);
590
591 r = -EINVAL;
592 if (log->slot >= KVM_USER_MEM_SLOTS)
593 goto out;
594
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200595 slots = kvm_memslots(kvm);
596 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400597 r = -ENOENT;
598 if (!memslot->dirty_bitmap)
599 goto out;
600
601 kvm_s390_sync_dirty_log(kvm, memslot);
602 r = kvm_get_dirty_log(kvm, log, &is_dirty);
603 if (r)
604 goto out;
605
606 /* Clear the dirty log */
607 if (is_dirty) {
608 n = kvm_dirty_bitmap_bytes(memslot);
609 memset(memslot->dirty_bitmap, 0, n);
610 }
611 r = 0;
612out:
613 mutex_unlock(&kvm->slots_lock);
614 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100615}
616
David Hildenbrand6502a342016-06-21 14:19:51 +0200617static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
618{
619 unsigned int i;
620 struct kvm_vcpu *vcpu;
621
622 kvm_for_each_vcpu(i, vcpu, kvm) {
623 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
624 }
625}
626
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100627int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200628{
629 int r;
630
631 if (cap->flags)
632 return -EINVAL;
633
634 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200635 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200636 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200637 kvm->arch.use_irqchip = 1;
638 r = 0;
639 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200640 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200641 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200642 kvm->arch.user_sigp = 1;
643 r = 0;
644 break;
Eric Farman68c55752014-06-09 10:57:26 -0400645 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100646 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200647 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100648 r = -EBUSY;
649 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100650 set_kvm_facility(kvm->arch.model.fac_mask, 129);
651 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200652 if (test_facility(134)) {
653 set_kvm_facility(kvm->arch.model.fac_mask, 134);
654 set_kvm_facility(kvm->arch.model.fac_list, 134);
655 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100656 if (test_facility(135)) {
657 set_kvm_facility(kvm->arch.model.fac_mask, 135);
658 set_kvm_facility(kvm->arch.model.fac_list, 135);
659 }
Michael Mueller18280d82015-03-16 16:05:41 +0100660 r = 0;
661 } else
662 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100663 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200664 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
665 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400666 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800667 case KVM_CAP_S390_RI:
668 r = -EINVAL;
669 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200670 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800671 r = -EBUSY;
672 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100673 set_kvm_facility(kvm->arch.model.fac_mask, 64);
674 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800675 r = 0;
676 }
677 mutex_unlock(&kvm->lock);
678 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
679 r ? "(not available)" : "(success)");
680 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100681 case KVM_CAP_S390_AIS:
682 mutex_lock(&kvm->lock);
683 if (kvm->created_vcpus) {
684 r = -EBUSY;
685 } else {
686 set_kvm_facility(kvm->arch.model.fac_mask, 72);
687 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100688 r = 0;
689 }
690 mutex_unlock(&kvm->lock);
691 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
692 r ? "(not available)" : "(success)");
693 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100694 case KVM_CAP_S390_GS:
695 r = -EINVAL;
696 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100697 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100698 r = -EBUSY;
699 } else if (test_facility(133)) {
700 set_kvm_facility(kvm->arch.model.fac_mask, 133);
701 set_kvm_facility(kvm->arch.model.fac_list, 133);
702 r = 0;
703 }
704 mutex_unlock(&kvm->lock);
705 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
706 r ? "(not available)" : "(success)");
707 break;
Janosch Franka4499382018-07-13 11:28:31 +0100708 case KVM_CAP_S390_HPAGE_1M:
709 mutex_lock(&kvm->lock);
710 if (kvm->created_vcpus)
711 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100712 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100713 r = -EINVAL;
714 else {
715 r = 0;
Janosch Frankdf88f312018-08-30 16:14:18 +0200716 down_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100717 kvm->mm->context.allow_gmap_hpage_1m = 1;
Janosch Frankdf88f312018-08-30 16:14:18 +0200718 up_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100719 /*
720 * We might have to create fake 4k page
721 * tables. To avoid that the hardware works on
722 * stale PGSTEs, we emulate these instructions.
723 */
724 kvm->arch.use_skf = 0;
725 kvm->arch.use_pfmfi = 0;
726 }
727 mutex_unlock(&kvm->lock);
728 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
729 r ? "(not available)" : "(success)");
730 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100731 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200732 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100733 kvm->arch.user_stsi = 1;
734 r = 0;
735 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200736 case KVM_CAP_S390_USER_INSTR0:
737 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
738 kvm->arch.user_instr0 = 1;
739 icpt_operexc_on_all_vcpus(kvm);
740 r = 0;
741 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200742 default:
743 r = -EINVAL;
744 break;
745 }
746 return r;
747}
748
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100749static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
750{
751 int ret;
752
753 switch (attr->attr) {
754 case KVM_S390_VM_MEM_LIMIT_SIZE:
755 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200756 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100757 kvm->arch.mem_limit);
758 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100759 ret = -EFAULT;
760 break;
761 default:
762 ret = -ENXIO;
763 break;
764 }
765 return ret;
766}
767
768static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200769{
770 int ret;
771 unsigned int idx;
772 switch (attr->attr) {
773 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100774 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100775 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200776 break;
777
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200778 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200779 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100780 if (kvm->created_vcpus)
781 ret = -EBUSY;
782 else if (kvm->mm->context.allow_gmap_hpage_1m)
783 ret = -EINVAL;
784 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200785 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100786 /* Not compatible with cmma. */
787 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200788 ret = 0;
789 }
790 mutex_unlock(&kvm->lock);
791 break;
792 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100793 ret = -ENXIO;
794 if (!sclp.has_cmma)
795 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200796 ret = -EINVAL;
797 if (!kvm->arch.use_cmma)
798 break;
799
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200800 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200801 mutex_lock(&kvm->lock);
802 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200803 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200804 srcu_read_unlock(&kvm->srcu, idx);
805 mutex_unlock(&kvm->lock);
806 ret = 0;
807 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100808 case KVM_S390_VM_MEM_LIMIT_SIZE: {
809 unsigned long new_limit;
810
811 if (kvm_is_ucontrol(kvm))
812 return -EINVAL;
813
814 if (get_user(new_limit, (u64 __user *)attr->addr))
815 return -EFAULT;
816
Dominik Dingela3a92c32014-12-01 17:24:42 +0100817 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
818 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100819 return -E2BIG;
820
Dominik Dingela3a92c32014-12-01 17:24:42 +0100821 if (!new_limit)
822 return -EINVAL;
823
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100824 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100825 if (new_limit != KVM_S390_NO_MEM_LIMIT)
826 new_limit -= 1;
827
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100828 ret = -EBUSY;
829 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200830 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100831 /* gmap_create will round the limit up */
832 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100833
834 if (!new) {
835 ret = -ENOMEM;
836 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100837 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100838 new->private = kvm;
839 kvm->arch.gmap = new;
840 ret = 0;
841 }
842 }
843 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100844 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
845 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
846 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100847 break;
848 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200849 default:
850 ret = -ENXIO;
851 break;
852 }
853 return ret;
854}
855
Tony Krowiaka374e892014-09-03 10:13:53 +0200856static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
857
Tony Krowiak20c922f2018-04-22 11:37:03 -0400858void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200859{
860 struct kvm_vcpu *vcpu;
861 int i;
862
Tony Krowiak20c922f2018-04-22 11:37:03 -0400863 kvm_s390_vcpu_block_all(kvm);
864
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400865 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400866 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400867 /* recreate the shadow crycb by leaving the VSIE handler */
868 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
869 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400870
871 kvm_s390_vcpu_unblock_all(kvm);
872}
873
874static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
875{
Tony Krowiaka374e892014-09-03 10:13:53 +0200876 mutex_lock(&kvm->lock);
877 switch (attr->attr) {
878 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200879 if (!test_kvm_facility(kvm, 76)) {
880 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400881 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200882 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200883 get_random_bytes(
884 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
885 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
886 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200887 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200888 break;
889 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200890 if (!test_kvm_facility(kvm, 76)) {
891 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400892 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200893 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200894 get_random_bytes(
895 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
896 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
897 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200898 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200899 break;
900 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200901 if (!test_kvm_facility(kvm, 76)) {
902 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400903 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200904 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200905 kvm->arch.crypto.aes_kw = 0;
906 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
907 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200908 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200909 break;
910 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200911 if (!test_kvm_facility(kvm, 76)) {
912 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400913 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200914 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200915 kvm->arch.crypto.dea_kw = 0;
916 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
917 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200918 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200919 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400920 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
921 if (!ap_instructions_available()) {
922 mutex_unlock(&kvm->lock);
923 return -EOPNOTSUPP;
924 }
925 kvm->arch.crypto.apie = 1;
926 break;
927 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
928 if (!ap_instructions_available()) {
929 mutex_unlock(&kvm->lock);
930 return -EOPNOTSUPP;
931 }
932 kvm->arch.crypto.apie = 0;
933 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200934 default:
935 mutex_unlock(&kvm->lock);
936 return -ENXIO;
937 }
938
Tony Krowiak20c922f2018-04-22 11:37:03 -0400939 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200940 mutex_unlock(&kvm->lock);
941 return 0;
942}
943
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200944static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
945{
946 int cx;
947 struct kvm_vcpu *vcpu;
948
949 kvm_for_each_vcpu(cx, vcpu, kvm)
950 kvm_s390_sync_request(req, vcpu);
951}
952
953/*
954 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100955 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200956 */
957static int kvm_s390_vm_start_migration(struct kvm *kvm)
958{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200959 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200960 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200961 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200962 int slotnr;
963
964 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200965 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200966 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200967 slots = kvm_memslots(kvm);
968 if (!slots || !slots->used_slots)
969 return -EINVAL;
970
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200971 if (!kvm->arch.use_cmma) {
972 kvm->arch.migration_mode = 1;
973 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200974 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200975 /* mark all the pages in active slots as dirty */
976 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
977 ms = slots->memslots + slotnr;
978 /*
979 * The second half of the bitmap is only used on x86,
980 * and would be wasted otherwise, so we put it to good
981 * use here to keep track of the state of the storage
982 * attributes.
983 */
984 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
985 ram_pages += ms->npages;
986 }
987 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
988 kvm->arch.migration_mode = 1;
989 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200990 return 0;
991}
992
993/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100994 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200995 * kvm_s390_vm_start_migration.
996 */
997static int kvm_s390_vm_stop_migration(struct kvm *kvm)
998{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200999 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001000 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001001 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001002 kvm->arch.migration_mode = 0;
1003 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001004 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001005 return 0;
1006}
1007
1008static int kvm_s390_vm_set_migration(struct kvm *kvm,
1009 struct kvm_device_attr *attr)
1010{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001011 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001012
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001013 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001014 switch (attr->attr) {
1015 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001016 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001017 break;
1018 case KVM_S390_VM_MIGRATION_STOP:
1019 res = kvm_s390_vm_stop_migration(kvm);
1020 break;
1021 default:
1022 break;
1023 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001024 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001025
1026 return res;
1027}
1028
1029static int kvm_s390_vm_get_migration(struct kvm *kvm,
1030 struct kvm_device_attr *attr)
1031{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001032 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001033
1034 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1035 return -ENXIO;
1036
1037 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1038 return -EFAULT;
1039 return 0;
1040}
1041
Collin L. Walling8fa16962016-07-26 15:29:44 -04001042static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1043{
1044 struct kvm_s390_vm_tod_clock gtod;
1045
1046 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1047 return -EFAULT;
1048
David Hildenbrand0e7def52018-02-07 12:46:43 +01001049 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001050 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001051 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001052
1053 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1054 gtod.epoch_idx, gtod.tod);
1055
1056 return 0;
1057}
1058
Jason J. Herne72f25022014-11-25 09:46:02 -05001059static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1060{
1061 u8 gtod_high;
1062
1063 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1064 sizeof(gtod_high)))
1065 return -EFAULT;
1066
1067 if (gtod_high != 0)
1068 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001069 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001070
1071 return 0;
1072}
1073
1074static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1075{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001076 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001077
David Hildenbrand0e7def52018-02-07 12:46:43 +01001078 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1079 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001080 return -EFAULT;
1081
David Hildenbrand0e7def52018-02-07 12:46:43 +01001082 kvm_s390_set_tod_clock(kvm, &gtod);
1083 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001084 return 0;
1085}
1086
1087static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1088{
1089 int ret;
1090
1091 if (attr->flags)
1092 return -EINVAL;
1093
1094 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001095 case KVM_S390_VM_TOD_EXT:
1096 ret = kvm_s390_set_tod_ext(kvm, attr);
1097 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001098 case KVM_S390_VM_TOD_HIGH:
1099 ret = kvm_s390_set_tod_high(kvm, attr);
1100 break;
1101 case KVM_S390_VM_TOD_LOW:
1102 ret = kvm_s390_set_tod_low(kvm, attr);
1103 break;
1104 default:
1105 ret = -ENXIO;
1106 break;
1107 }
1108 return ret;
1109}
1110
David Hildenbrand33d1b272018-04-27 14:36:13 +02001111static void kvm_s390_get_tod_clock(struct kvm *kvm,
1112 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001113{
1114 struct kvm_s390_tod_clock_ext htod;
1115
1116 preempt_disable();
1117
1118 get_tod_clock_ext((char *)&htod);
1119
1120 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001121 gtod->epoch_idx = 0;
1122 if (test_kvm_facility(kvm, 139)) {
1123 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1124 if (gtod->tod < htod.tod)
1125 gtod->epoch_idx += 1;
1126 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001127
1128 preempt_enable();
1129}
1130
1131static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1132{
1133 struct kvm_s390_vm_tod_clock gtod;
1134
1135 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001136 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001137 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1138 return -EFAULT;
1139
1140 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1141 gtod.epoch_idx, gtod.tod);
1142 return 0;
1143}
1144
Jason J. Herne72f25022014-11-25 09:46:02 -05001145static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1146{
1147 u8 gtod_high = 0;
1148
1149 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1150 sizeof(gtod_high)))
1151 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001152 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001153
1154 return 0;
1155}
1156
1157static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1158{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001159 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001160
David Hildenbrand60417fc2015-09-29 16:20:36 +02001161 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001162 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1163 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001164 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001165
1166 return 0;
1167}
1168
1169static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1170{
1171 int ret;
1172
1173 if (attr->flags)
1174 return -EINVAL;
1175
1176 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001177 case KVM_S390_VM_TOD_EXT:
1178 ret = kvm_s390_get_tod_ext(kvm, attr);
1179 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001180 case KVM_S390_VM_TOD_HIGH:
1181 ret = kvm_s390_get_tod_high(kvm, attr);
1182 break;
1183 case KVM_S390_VM_TOD_LOW:
1184 ret = kvm_s390_get_tod_low(kvm, attr);
1185 break;
1186 default:
1187 ret = -ENXIO;
1188 break;
1189 }
1190 return ret;
1191}
1192
Michael Mueller658b6ed2015-02-02 15:49:35 +01001193static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1194{
1195 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001196 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001197 int ret = 0;
1198
1199 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001200 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001201 ret = -EBUSY;
1202 goto out;
1203 }
1204 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1205 if (!proc) {
1206 ret = -ENOMEM;
1207 goto out;
1208 }
1209 if (!copy_from_user(proc, (void __user *)attr->addr,
1210 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001211 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001212 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1213 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001214 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001215 if (proc->ibc > unblocked_ibc)
1216 kvm->arch.model.ibc = unblocked_ibc;
1217 else if (proc->ibc < lowest_ibc)
1218 kvm->arch.model.ibc = lowest_ibc;
1219 else
1220 kvm->arch.model.ibc = proc->ibc;
1221 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001222 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001223 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001224 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1225 kvm->arch.model.ibc,
1226 kvm->arch.model.cpuid);
1227 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1228 kvm->arch.model.fac_list[0],
1229 kvm->arch.model.fac_list[1],
1230 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001231 } else
1232 ret = -EFAULT;
1233 kfree(proc);
1234out:
1235 mutex_unlock(&kvm->lock);
1236 return ret;
1237}
1238
David Hildenbrand15c97052015-03-19 17:36:43 +01001239static int kvm_s390_set_processor_feat(struct kvm *kvm,
1240 struct kvm_device_attr *attr)
1241{
1242 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001243
1244 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1245 return -EFAULT;
1246 if (!bitmap_subset((unsigned long *) data.feat,
1247 kvm_s390_available_cpu_feat,
1248 KVM_S390_VM_CPU_FEAT_NR_BITS))
1249 return -EINVAL;
1250
1251 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001252 if (kvm->created_vcpus) {
1253 mutex_unlock(&kvm->lock);
1254 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001255 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001256 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1257 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001258 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001259 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1260 data.feat[0],
1261 data.feat[1],
1262 data.feat[2]);
1263 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001264}
1265
David Hildenbrand0a763c72016-05-18 16:03:47 +02001266static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1267 struct kvm_device_attr *attr)
1268{
1269 /*
1270 * Once supported by kernel + hw, we have to store the subfunctions
1271 * in kvm->arch and remember that user space configured them.
1272 */
1273 return -ENXIO;
1274}
1275
Michael Mueller658b6ed2015-02-02 15:49:35 +01001276static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1277{
1278 int ret = -ENXIO;
1279
1280 switch (attr->attr) {
1281 case KVM_S390_VM_CPU_PROCESSOR:
1282 ret = kvm_s390_set_processor(kvm, attr);
1283 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001284 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1285 ret = kvm_s390_set_processor_feat(kvm, attr);
1286 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001287 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1288 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1289 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001290 }
1291 return ret;
1292}
1293
1294static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1295{
1296 struct kvm_s390_vm_cpu_processor *proc;
1297 int ret = 0;
1298
1299 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1300 if (!proc) {
1301 ret = -ENOMEM;
1302 goto out;
1303 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001304 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001305 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001306 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1307 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001308 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1309 kvm->arch.model.ibc,
1310 kvm->arch.model.cpuid);
1311 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1312 kvm->arch.model.fac_list[0],
1313 kvm->arch.model.fac_list[1],
1314 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001315 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1316 ret = -EFAULT;
1317 kfree(proc);
1318out:
1319 return ret;
1320}
1321
1322static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1323{
1324 struct kvm_s390_vm_cpu_machine *mach;
1325 int ret = 0;
1326
1327 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1328 if (!mach) {
1329 ret = -ENOMEM;
1330 goto out;
1331 }
1332 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001333 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001334 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001335 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001336 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001337 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001338 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1339 kvm->arch.model.ibc,
1340 kvm->arch.model.cpuid);
1341 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1342 mach->fac_mask[0],
1343 mach->fac_mask[1],
1344 mach->fac_mask[2]);
1345 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1346 mach->fac_list[0],
1347 mach->fac_list[1],
1348 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001349 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1350 ret = -EFAULT;
1351 kfree(mach);
1352out:
1353 return ret;
1354}
1355
David Hildenbrand15c97052015-03-19 17:36:43 +01001356static int kvm_s390_get_processor_feat(struct kvm *kvm,
1357 struct kvm_device_attr *attr)
1358{
1359 struct kvm_s390_vm_cpu_feat data;
1360
1361 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1362 KVM_S390_VM_CPU_FEAT_NR_BITS);
1363 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1364 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001365 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1366 data.feat[0],
1367 data.feat[1],
1368 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001369 return 0;
1370}
1371
1372static int kvm_s390_get_machine_feat(struct kvm *kvm,
1373 struct kvm_device_attr *attr)
1374{
1375 struct kvm_s390_vm_cpu_feat data;
1376
1377 bitmap_copy((unsigned long *) data.feat,
1378 kvm_s390_available_cpu_feat,
1379 KVM_S390_VM_CPU_FEAT_NR_BITS);
1380 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1381 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001382 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1383 data.feat[0],
1384 data.feat[1],
1385 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001386 return 0;
1387}
1388
David Hildenbrand0a763c72016-05-18 16:03:47 +02001389static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1390 struct kvm_device_attr *attr)
1391{
1392 /*
1393 * Once we can actually configure subfunctions (kernel + hw support),
1394 * we have to check if they were already set by user space, if so copy
1395 * them from kvm->arch.
1396 */
1397 return -ENXIO;
1398}
1399
1400static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1401 struct kvm_device_attr *attr)
1402{
1403 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1404 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1405 return -EFAULT;
1406 return 0;
1407}
Michael Mueller658b6ed2015-02-02 15:49:35 +01001408static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1409{
1410 int ret = -ENXIO;
1411
1412 switch (attr->attr) {
1413 case KVM_S390_VM_CPU_PROCESSOR:
1414 ret = kvm_s390_get_processor(kvm, attr);
1415 break;
1416 case KVM_S390_VM_CPU_MACHINE:
1417 ret = kvm_s390_get_machine(kvm, attr);
1418 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001419 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1420 ret = kvm_s390_get_processor_feat(kvm, attr);
1421 break;
1422 case KVM_S390_VM_CPU_MACHINE_FEAT:
1423 ret = kvm_s390_get_machine_feat(kvm, attr);
1424 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001425 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1426 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1427 break;
1428 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1429 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1430 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001431 }
1432 return ret;
1433}
1434
Dominik Dingelf2061652014-04-09 13:13:00 +02001435static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1436{
1437 int ret;
1438
1439 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001440 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001441 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001442 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001443 case KVM_S390_VM_TOD:
1444 ret = kvm_s390_set_tod(kvm, attr);
1445 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001446 case KVM_S390_VM_CPU_MODEL:
1447 ret = kvm_s390_set_cpu_model(kvm, attr);
1448 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001449 case KVM_S390_VM_CRYPTO:
1450 ret = kvm_s390_vm_set_crypto(kvm, attr);
1451 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001452 case KVM_S390_VM_MIGRATION:
1453 ret = kvm_s390_vm_set_migration(kvm, attr);
1454 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001455 default:
1456 ret = -ENXIO;
1457 break;
1458 }
1459
1460 return ret;
1461}
1462
1463static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1464{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001465 int ret;
1466
1467 switch (attr->group) {
1468 case KVM_S390_VM_MEM_CTRL:
1469 ret = kvm_s390_get_mem_control(kvm, attr);
1470 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001471 case KVM_S390_VM_TOD:
1472 ret = kvm_s390_get_tod(kvm, attr);
1473 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001474 case KVM_S390_VM_CPU_MODEL:
1475 ret = kvm_s390_get_cpu_model(kvm, attr);
1476 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001477 case KVM_S390_VM_MIGRATION:
1478 ret = kvm_s390_vm_get_migration(kvm, attr);
1479 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001480 default:
1481 ret = -ENXIO;
1482 break;
1483 }
1484
1485 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001486}
1487
1488static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1489{
1490 int ret;
1491
1492 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001493 case KVM_S390_VM_MEM_CTRL:
1494 switch (attr->attr) {
1495 case KVM_S390_VM_MEM_ENABLE_CMMA:
1496 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001497 ret = sclp.has_cmma ? 0 : -ENXIO;
1498 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001499 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001500 ret = 0;
1501 break;
1502 default:
1503 ret = -ENXIO;
1504 break;
1505 }
1506 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001507 case KVM_S390_VM_TOD:
1508 switch (attr->attr) {
1509 case KVM_S390_VM_TOD_LOW:
1510 case KVM_S390_VM_TOD_HIGH:
1511 ret = 0;
1512 break;
1513 default:
1514 ret = -ENXIO;
1515 break;
1516 }
1517 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001518 case KVM_S390_VM_CPU_MODEL:
1519 switch (attr->attr) {
1520 case KVM_S390_VM_CPU_PROCESSOR:
1521 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001522 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1523 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001524 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001525 ret = 0;
1526 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001527 /* configuring subfunctions is not supported yet */
1528 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001529 default:
1530 ret = -ENXIO;
1531 break;
1532 }
1533 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001534 case KVM_S390_VM_CRYPTO:
1535 switch (attr->attr) {
1536 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1537 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1538 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1539 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1540 ret = 0;
1541 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001542 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1543 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1544 ret = ap_instructions_available() ? 0 : -ENXIO;
1545 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001546 default:
1547 ret = -ENXIO;
1548 break;
1549 }
1550 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001551 case KVM_S390_VM_MIGRATION:
1552 ret = 0;
1553 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001554 default:
1555 ret = -ENXIO;
1556 break;
1557 }
1558
1559 return ret;
1560}
1561
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001562static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1563{
1564 uint8_t *keys;
1565 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001566 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001567
1568 if (args->flags != 0)
1569 return -EINVAL;
1570
1571 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001572 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001573 return KVM_S390_GET_SKEYS_NONE;
1574
1575 /* Enforce sane limit on memory allocation */
1576 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1577 return -EINVAL;
1578
Michal Hocko752ade62017-05-08 15:57:27 -07001579 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001580 if (!keys)
1581 return -ENOMEM;
1582
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001583 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001584 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001585 for (i = 0; i < args->count; i++) {
1586 hva = gfn_to_hva(kvm, args->start_gfn + i);
1587 if (kvm_is_error_hva(hva)) {
1588 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001589 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001590 }
1591
David Hildenbrand154c8c12016-05-09 11:22:34 +02001592 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1593 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001594 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001595 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001596 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001597 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001598
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001599 if (!r) {
1600 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1601 sizeof(uint8_t) * args->count);
1602 if (r)
1603 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001604 }
1605
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001606 kvfree(keys);
1607 return r;
1608}
1609
1610static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1611{
1612 uint8_t *keys;
1613 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001614 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001615 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001616
1617 if (args->flags != 0)
1618 return -EINVAL;
1619
1620 /* Enforce sane limit on memory allocation */
1621 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1622 return -EINVAL;
1623
Michal Hocko752ade62017-05-08 15:57:27 -07001624 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001625 if (!keys)
1626 return -ENOMEM;
1627
1628 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1629 sizeof(uint8_t) * args->count);
1630 if (r) {
1631 r = -EFAULT;
1632 goto out;
1633 }
1634
1635 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001636 r = s390_enable_skey();
1637 if (r)
1638 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001639
Janosch Frankbd096f62018-07-18 13:40:22 +01001640 i = 0;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001641 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001642 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001643 while (i < args->count) {
1644 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001645 hva = gfn_to_hva(kvm, args->start_gfn + i);
1646 if (kvm_is_error_hva(hva)) {
1647 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001648 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001649 }
1650
1651 /* Lowest order bit is reserved */
1652 if (keys[i] & 0x01) {
1653 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001654 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001655 }
1656
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001657 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001658 if (r) {
1659 r = fixup_user_fault(current, current->mm, hva,
1660 FAULT_FLAG_WRITE, &unlocked);
1661 if (r)
1662 break;
1663 }
1664 if (!r)
1665 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001666 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001667 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001668 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001669out:
1670 kvfree(keys);
1671 return r;
1672}
1673
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001674/*
1675 * Base address and length must be sent at the start of each block, therefore
1676 * it's cheaper to send some clean data, as long as it's less than the size of
1677 * two longs.
1678 */
1679#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1680/* for consistency */
1681#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1682
1683/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001684 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1685 * address falls in a hole. In that case the index of one of the memslots
1686 * bordering the hole is returned.
1687 */
1688static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1689{
1690 int start = 0, end = slots->used_slots;
1691 int slot = atomic_read(&slots->lru_slot);
1692 struct kvm_memory_slot *memslots = slots->memslots;
1693
1694 if (gfn >= memslots[slot].base_gfn &&
1695 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1696 return slot;
1697
1698 while (start < end) {
1699 slot = start + (end - start) / 2;
1700
1701 if (gfn >= memslots[slot].base_gfn)
1702 end = slot;
1703 else
1704 start = slot + 1;
1705 }
1706
1707 if (gfn >= memslots[start].base_gfn &&
1708 gfn < memslots[start].base_gfn + memslots[start].npages) {
1709 atomic_set(&slots->lru_slot, start);
1710 }
1711
1712 return start;
1713}
1714
1715static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1716 u8 *res, unsigned long bufsize)
1717{
1718 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1719
1720 args->count = 0;
1721 while (args->count < bufsize) {
1722 hva = gfn_to_hva(kvm, cur_gfn);
1723 /*
1724 * We return an error if the first value was invalid, but we
1725 * return successfully if at least one value was copied.
1726 */
1727 if (kvm_is_error_hva(hva))
1728 return args->count ? 0 : -EFAULT;
1729 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1730 pgstev = 0;
1731 res[args->count++] = (pgstev >> 24) & 0x43;
1732 cur_gfn++;
1733 }
1734
1735 return 0;
1736}
1737
1738static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1739 unsigned long cur_gfn)
1740{
1741 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1742 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1743 unsigned long ofs = cur_gfn - ms->base_gfn;
1744
1745 if (ms->base_gfn + ms->npages <= cur_gfn) {
1746 slotidx--;
1747 /* If we are above the highest slot, wrap around */
1748 if (slotidx < 0)
1749 slotidx = slots->used_slots - 1;
1750
1751 ms = slots->memslots + slotidx;
1752 ofs = 0;
1753 }
1754 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1755 while ((slotidx > 0) && (ofs >= ms->npages)) {
1756 slotidx--;
1757 ms = slots->memslots + slotidx;
1758 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1759 }
1760 return ms->base_gfn + ofs;
1761}
1762
1763static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1764 u8 *res, unsigned long bufsize)
1765{
1766 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
1767 struct kvm_memslots *slots = kvm_memslots(kvm);
1768 struct kvm_memory_slot *ms;
1769
1770 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
1771 ms = gfn_to_memslot(kvm, cur_gfn);
1772 args->count = 0;
1773 args->start_gfn = cur_gfn;
1774 if (!ms)
1775 return 0;
1776 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
1777 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
1778
1779 while (args->count < bufsize) {
1780 hva = gfn_to_hva(kvm, cur_gfn);
1781 if (kvm_is_error_hva(hva))
1782 return 0;
1783 /* Decrement only if we actually flipped the bit to 0 */
1784 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
1785 atomic64_dec(&kvm->arch.cmma_dirty_pages);
1786 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1787 pgstev = 0;
1788 /* Save the value */
1789 res[args->count++] = (pgstev >> 24) & 0x43;
1790 /* If the next bit is too far away, stop. */
1791 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
1792 return 0;
1793 /* If we reached the previous "next", find the next one */
1794 if (cur_gfn == next_gfn)
1795 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
1796 /* Reached the end of memory or of the buffer, stop */
1797 if ((next_gfn >= mem_end) ||
1798 (next_gfn - args->start_gfn >= bufsize))
1799 return 0;
1800 cur_gfn++;
1801 /* Reached the end of the current memslot, take the next one. */
1802 if (cur_gfn - ms->base_gfn >= ms->npages) {
1803 ms = gfn_to_memslot(kvm, cur_gfn);
1804 if (!ms)
1805 return 0;
1806 }
1807 }
1808 return 0;
1809}
1810
1811/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001812 * This function searches for the next page with dirty CMMA attributes, and
1813 * saves the attributes in the buffer up to either the end of the buffer or
1814 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
1815 * no trailing clean bytes are saved.
1816 * In case no dirty bits were found, or if CMMA was not enabled or used, the
1817 * output buffer will indicate 0 as length.
1818 */
1819static int kvm_s390_get_cmma_bits(struct kvm *kvm,
1820 struct kvm_s390_cmma_log *args)
1821{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001822 unsigned long bufsize;
1823 int srcu_idx, peek, ret;
1824 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001825
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001826 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001827 return -ENXIO;
1828 /* Invalid/unsupported flags were specified */
1829 if (args->flags & ~KVM_S390_CMMA_PEEK)
1830 return -EINVAL;
1831 /* Migration mode query, and we are not doing a migration */
1832 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001833 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001834 return -EINVAL;
1835 /* CMMA is disabled or was not used, or the buffer has length zero */
1836 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001837 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001838 memset(args, 0, sizeof(*args));
1839 return 0;
1840 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001841 /* We are not peeking, and there are no dirty pages */
1842 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
1843 memset(args, 0, sizeof(*args));
1844 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001845 }
1846
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001847 values = vmalloc(bufsize);
1848 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001849 return -ENOMEM;
1850
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001851 down_read(&kvm->mm->mmap_sem);
1852 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001853 if (peek)
1854 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
1855 else
1856 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001857 srcu_read_unlock(&kvm->srcu, srcu_idx);
1858 up_read(&kvm->mm->mmap_sem);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001859
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001860 if (kvm->arch.migration_mode)
1861 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
1862 else
1863 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001864
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001865 if (copy_to_user((void __user *)args->values, values, args->count))
1866 ret = -EFAULT;
1867
1868 vfree(values);
1869 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001870}
1871
1872/*
1873 * This function sets the CMMA attributes for the given pages. If the input
1874 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001875 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001876 */
1877static int kvm_s390_set_cmma_bits(struct kvm *kvm,
1878 const struct kvm_s390_cmma_log *args)
1879{
1880 unsigned long hva, mask, pgstev, i;
1881 uint8_t *bits;
1882 int srcu_idx, r = 0;
1883
1884 mask = args->mask;
1885
1886 if (!kvm->arch.use_cmma)
1887 return -ENXIO;
1888 /* invalid/unsupported flags */
1889 if (args->flags != 0)
1890 return -EINVAL;
1891 /* Enforce sane limit on memory allocation */
1892 if (args->count > KVM_S390_CMMA_SIZE_MAX)
1893 return -EINVAL;
1894 /* Nothing to do */
1895 if (args->count == 0)
1896 return 0;
1897
Kees Cook42bc47b2018-06-12 14:27:11 -07001898 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001899 if (!bits)
1900 return -ENOMEM;
1901
1902 r = copy_from_user(bits, (void __user *)args->values, args->count);
1903 if (r) {
1904 r = -EFAULT;
1905 goto out;
1906 }
1907
1908 down_read(&kvm->mm->mmap_sem);
1909 srcu_idx = srcu_read_lock(&kvm->srcu);
1910 for (i = 0; i < args->count; i++) {
1911 hva = gfn_to_hva(kvm, args->start_gfn + i);
1912 if (kvm_is_error_hva(hva)) {
1913 r = -EFAULT;
1914 break;
1915 }
1916
1917 pgstev = bits[i];
1918 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02001919 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001920 set_pgste_bits(kvm->mm, hva, mask, pgstev);
1921 }
1922 srcu_read_unlock(&kvm->srcu, srcu_idx);
1923 up_read(&kvm->mm->mmap_sem);
1924
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001925 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001926 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001927 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001928 up_write(&kvm->mm->mmap_sem);
1929 }
1930out:
1931 vfree(bits);
1932 return r;
1933}
1934
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001935long kvm_arch_vm_ioctl(struct file *filp,
1936 unsigned int ioctl, unsigned long arg)
1937{
1938 struct kvm *kvm = filp->private_data;
1939 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001940 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001941 int r;
1942
1943 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001944 case KVM_S390_INTERRUPT: {
1945 struct kvm_s390_interrupt s390int;
1946
1947 r = -EFAULT;
1948 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1949 break;
1950 r = kvm_s390_inject_vm(kvm, &s390int);
1951 break;
1952 }
Cornelia Huck84223592013-07-15 13:36:01 +02001953 case KVM_CREATE_IRQCHIP: {
1954 struct kvm_irq_routing_entry routing;
1955
1956 r = -EINVAL;
1957 if (kvm->arch.use_irqchip) {
1958 /* Set up dummy routing. */
1959 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001960 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001961 }
1962 break;
1963 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001964 case KVM_SET_DEVICE_ATTR: {
1965 r = -EFAULT;
1966 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1967 break;
1968 r = kvm_s390_vm_set_attr(kvm, &attr);
1969 break;
1970 }
1971 case KVM_GET_DEVICE_ATTR: {
1972 r = -EFAULT;
1973 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1974 break;
1975 r = kvm_s390_vm_get_attr(kvm, &attr);
1976 break;
1977 }
1978 case KVM_HAS_DEVICE_ATTR: {
1979 r = -EFAULT;
1980 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1981 break;
1982 r = kvm_s390_vm_has_attr(kvm, &attr);
1983 break;
1984 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001985 case KVM_S390_GET_SKEYS: {
1986 struct kvm_s390_skeys args;
1987
1988 r = -EFAULT;
1989 if (copy_from_user(&args, argp,
1990 sizeof(struct kvm_s390_skeys)))
1991 break;
1992 r = kvm_s390_get_skeys(kvm, &args);
1993 break;
1994 }
1995 case KVM_S390_SET_SKEYS: {
1996 struct kvm_s390_skeys args;
1997
1998 r = -EFAULT;
1999 if (copy_from_user(&args, argp,
2000 sizeof(struct kvm_s390_skeys)))
2001 break;
2002 r = kvm_s390_set_skeys(kvm, &args);
2003 break;
2004 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002005 case KVM_S390_GET_CMMA_BITS: {
2006 struct kvm_s390_cmma_log args;
2007
2008 r = -EFAULT;
2009 if (copy_from_user(&args, argp, sizeof(args)))
2010 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002011 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002012 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002013 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002014 if (!r) {
2015 r = copy_to_user(argp, &args, sizeof(args));
2016 if (r)
2017 r = -EFAULT;
2018 }
2019 break;
2020 }
2021 case KVM_S390_SET_CMMA_BITS: {
2022 struct kvm_s390_cmma_log args;
2023
2024 r = -EFAULT;
2025 if (copy_from_user(&args, argp, sizeof(args)))
2026 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002027 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002028 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002029 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002030 break;
2031 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002032 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002033 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002034 }
2035
2036 return r;
2037}
2038
Tony Krowiak45c9b472015-01-13 11:33:26 -05002039static int kvm_s390_apxa_installed(void)
2040{
Tony Krowiake585b242018-09-25 19:16:18 -04002041 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002042
Tony Krowiake585b242018-09-25 19:16:18 -04002043 if (ap_instructions_available()) {
2044 if (ap_qci(&info) == 0)
2045 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002046 }
2047
2048 return 0;
2049}
2050
Tony Krowiake585b242018-09-25 19:16:18 -04002051/*
2052 * The format of the crypto control block (CRYCB) is specified in the 3 low
2053 * order bits of the CRYCB designation (CRYCBD) field as follows:
2054 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2055 * AP extended addressing (APXA) facility are installed.
2056 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2057 * Format 2: Both the APXA and MSAX3 facilities are installed
2058 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002059static void kvm_s390_set_crycb_format(struct kvm *kvm)
2060{
2061 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2062
Tony Krowiake585b242018-09-25 19:16:18 -04002063 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2064 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2065
2066 /* Check whether MSAX3 is installed */
2067 if (!test_kvm_facility(kvm, 76))
2068 return;
2069
Tony Krowiak45c9b472015-01-13 11:33:26 -05002070 if (kvm_s390_apxa_installed())
2071 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2072 else
2073 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2074}
2075
Pierre Morel0e237e42018-10-05 10:31:09 +02002076void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2077 unsigned long *aqm, unsigned long *adm)
2078{
2079 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2080
2081 mutex_lock(&kvm->lock);
2082 kvm_s390_vcpu_block_all(kvm);
2083
2084 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2085 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2086 memcpy(crycb->apcb1.apm, apm, 32);
2087 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2088 apm[0], apm[1], apm[2], apm[3]);
2089 memcpy(crycb->apcb1.aqm, aqm, 32);
2090 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2091 aqm[0], aqm[1], aqm[2], aqm[3]);
2092 memcpy(crycb->apcb1.adm, adm, 32);
2093 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2094 adm[0], adm[1], adm[2], adm[3]);
2095 break;
2096 case CRYCB_FORMAT1:
2097 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2098 memcpy(crycb->apcb0.apm, apm, 8);
2099 memcpy(crycb->apcb0.aqm, aqm, 2);
2100 memcpy(crycb->apcb0.adm, adm, 2);
2101 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2102 apm[0], *((unsigned short *)aqm),
2103 *((unsigned short *)adm));
2104 break;
2105 default: /* Can not happen */
2106 break;
2107 }
2108
2109 /* recreate the shadow crycb for each vcpu */
2110 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2111 kvm_s390_vcpu_unblock_all(kvm);
2112 mutex_unlock(&kvm->lock);
2113}
2114EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2115
Tony Krowiak421045982018-09-25 19:16:25 -04002116void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2117{
2118 mutex_lock(&kvm->lock);
2119 kvm_s390_vcpu_block_all(kvm);
2120
2121 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2122 sizeof(kvm->arch.crypto.crycb->apcb0));
2123 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2124 sizeof(kvm->arch.crypto.crycb->apcb1));
2125
Pierre Morel0e237e42018-10-05 10:31:09 +02002126 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002127 /* recreate the shadow crycb for each vcpu */
2128 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002129 kvm_s390_vcpu_unblock_all(kvm);
2130 mutex_unlock(&kvm->lock);
2131}
2132EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2133
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002134static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002135{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002136 struct cpuid cpuid;
2137
2138 get_cpu_id(&cpuid);
2139 cpuid.version = 0xff;
2140 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002141}
2142
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002143static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002144{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002145 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002146 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002147
Tony Krowiake585b242018-09-25 19:16:18 -04002148 if (!test_kvm_facility(kvm, 76))
2149 return;
2150
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002151 /* Enable AES/DEA protected key functions by default */
2152 kvm->arch.crypto.aes_kw = 1;
2153 kvm->arch.crypto.dea_kw = 1;
2154 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2155 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2156 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2157 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002158}
2159
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002160static void sca_dispose(struct kvm *kvm)
2161{
2162 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002163 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002164 else
2165 free_page((unsigned long)(kvm->arch.sca));
2166 kvm->arch.sca = NULL;
2167}
2168
Carsten Ottee08b9632012-01-04 10:25:20 +01002169int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002170{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002171 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002172 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002173 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002174 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002175
Carsten Ottee08b9632012-01-04 10:25:20 +01002176 rc = -EINVAL;
2177#ifdef CONFIG_KVM_S390_UCONTROL
2178 if (type & ~KVM_VM_S390_UCONTROL)
2179 goto out_err;
2180 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2181 goto out_err;
2182#else
2183 if (type)
2184 goto out_err;
2185#endif
2186
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002187 rc = s390_enable_sie();
2188 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002189 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002190
Carsten Otteb2904112011-10-18 12:27:13 +02002191 rc = -ENOMEM;
2192
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002193 if (!sclp.has_64bscao)
2194 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002195 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002196 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002197 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002198 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002199 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002200 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002201 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002202 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002203 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002204 kvm->arch.sca = (struct bsca_block *)
2205 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002206 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002207
2208 sprintf(debug_name, "kvm-%u", current->pid);
2209
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002210 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002211 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002212 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002213
Michael Mueller19114be2017-05-30 14:26:02 +02002214 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002215 kvm->arch.sie_page2 =
2216 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2217 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002218 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002219
Michael Mueller25c84db2019-01-31 09:52:41 +01002220 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002221 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002222
2223 for (i = 0; i < kvm_s390_fac_size(); i++) {
2224 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2225 (kvm_s390_fac_base[i] |
2226 kvm_s390_fac_ext[i]);
2227 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2228 kvm_s390_fac_base[i];
2229 }
Michael Mueller981467c2015-02-24 13:51:04 +01002230
David Hildenbrand19352222017-08-29 16:31:08 +02002231 /* we are always in czam mode - even on pre z14 machines */
2232 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2233 set_kvm_facility(kvm->arch.model.fac_list, 138);
2234 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002235 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2236 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002237 if (MACHINE_HAS_TLB_GUEST) {
2238 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2239 set_kvm_facility(kvm->arch.model.fac_list, 147);
2240 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002241
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002242 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002243 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002244
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002245 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002246
Fei Li51978392017-02-17 17:06:26 +08002247 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002248 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002249 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2250 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002251 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002252 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002253
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002254 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002255 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002256
Carsten Ottee08b9632012-01-04 10:25:20 +01002257 if (type & KVM_VM_S390_UCONTROL) {
2258 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002259 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002260 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002261 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002262 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002263 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002264 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002265 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002266 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002267 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002268 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002269 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002270 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002271 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002272
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002273 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002274 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002275 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002276 kvm_s390_vsie_init(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002277 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002278 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002279
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002280 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002281out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002282 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002283 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002284 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002285 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002286 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002287}
2288
Luiz Capitulino235539b2016-09-07 14:47:23 -04002289bool kvm_arch_has_vcpu_debugfs(void)
2290{
2291 return false;
2292}
2293
2294int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2295{
2296 return 0;
2297}
2298
Christian Borntraegerd329c032008-11-26 14:50:27 +01002299void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2300{
2301 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002302 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002303 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002304 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002305 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002306 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002307
2308 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002309 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002310
Dominik Dingele6db1d62015-05-07 15:41:57 +02002311 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002312 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002313 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002314
Christian Borntraeger6692cef2008-11-26 14:51:08 +01002315 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02002316 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002317}
2318
2319static void kvm_free_vcpus(struct kvm *kvm)
2320{
2321 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002322 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002323
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002324 kvm_for_each_vcpu(i, vcpu, kvm)
2325 kvm_arch_vcpu_destroy(vcpu);
2326
2327 mutex_lock(&kvm->lock);
2328 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2329 kvm->vcpus[i] = NULL;
2330
2331 atomic_set(&kvm->online_vcpus, 0);
2332 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002333}
2334
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002335void kvm_arch_destroy_vm(struct kvm *kvm)
2336{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002337 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002338 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002339 debug_unregister(kvm->arch.dbf);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002340 kvm_s390_gisa_destroy(kvm);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002341 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002342 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002343 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002344 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002345 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002346 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002347 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002348}
2349
2350/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002351static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2352{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002353 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002354 if (!vcpu->arch.gmap)
2355 return -ENOMEM;
2356 vcpu->arch.gmap->private = vcpu->kvm;
2357
2358 return 0;
2359}
2360
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002361static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2362{
David Hildenbranda6940672016-08-08 22:39:32 +02002363 if (!kvm_s390_use_sca_entries())
2364 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002365 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002366 if (vcpu->kvm->arch.use_esca) {
2367 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002368
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002369 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002370 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002371 } else {
2372 struct bsca_block *sca = vcpu->kvm->arch.sca;
2373
2374 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002375 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002376 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002377 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002378}
2379
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002380static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002381{
David Hildenbranda6940672016-08-08 22:39:32 +02002382 if (!kvm_s390_use_sca_entries()) {
2383 struct bsca_block *sca = vcpu->kvm->arch.sca;
2384
2385 /* we still need the basic sca for the ipte control */
2386 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2387 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002388 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002389 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002390 read_lock(&vcpu->kvm->arch.sca_lock);
2391 if (vcpu->kvm->arch.use_esca) {
2392 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002393
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002394 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002395 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2396 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002397 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002398 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002399 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002400 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002401
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002402 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002403 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2404 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002405 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002406 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002407 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002408}
2409
2410/* Basic SCA to Extended SCA data copy routines */
2411static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2412{
2413 d->sda = s->sda;
2414 d->sigp_ctrl.c = s->sigp_ctrl.c;
2415 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2416}
2417
2418static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2419{
2420 int i;
2421
2422 d->ipte_control = s->ipte_control;
2423 d->mcn[0] = s->mcn;
2424 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2425 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2426}
2427
2428static int sca_switch_to_extended(struct kvm *kvm)
2429{
2430 struct bsca_block *old_sca = kvm->arch.sca;
2431 struct esca_block *new_sca;
2432 struct kvm_vcpu *vcpu;
2433 unsigned int vcpu_idx;
2434 u32 scaol, scaoh;
2435
2436 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2437 if (!new_sca)
2438 return -ENOMEM;
2439
2440 scaoh = (u32)((u64)(new_sca) >> 32);
2441 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2442
2443 kvm_s390_vcpu_block_all(kvm);
2444 write_lock(&kvm->arch.sca_lock);
2445
2446 sca_copy_b_to_e(new_sca, old_sca);
2447
2448 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2449 vcpu->arch.sie_block->scaoh = scaoh;
2450 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002451 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002452 }
2453 kvm->arch.sca = new_sca;
2454 kvm->arch.use_esca = 1;
2455
2456 write_unlock(&kvm->arch.sca_lock);
2457 kvm_s390_vcpu_unblock_all(kvm);
2458
2459 free_page((unsigned long)old_sca);
2460
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002461 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2462 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002463 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002464}
2465
2466static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2467{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002468 int rc;
2469
David Hildenbranda6940672016-08-08 22:39:32 +02002470 if (!kvm_s390_use_sca_entries()) {
2471 if (id < KVM_MAX_VCPUS)
2472 return true;
2473 return false;
2474 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002475 if (id < KVM_S390_BSCA_CPU_SLOTS)
2476 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002477 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002478 return false;
2479
2480 mutex_lock(&kvm->lock);
2481 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2482 mutex_unlock(&kvm->lock);
2483
2484 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002485}
2486
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002487int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2488{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002489 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2490 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002491 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2492 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002493 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002494 KVM_SYNC_CRS |
2495 KVM_SYNC_ARCH0 |
2496 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002497 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002498 if (test_kvm_facility(vcpu->kvm, 64))
2499 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002500 if (test_kvm_facility(vcpu->kvm, 82))
2501 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002502 if (test_kvm_facility(vcpu->kvm, 133))
2503 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002504 if (test_kvm_facility(vcpu->kvm, 156))
2505 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002506 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2507 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2508 */
2509 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002510 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002511 else
2512 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002513
2514 if (kvm_is_ucontrol(vcpu->kvm))
2515 return __kvm_ucontrol_vcpu_init(vcpu);
2516
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002517 return 0;
2518}
2519
David Hildenbranddb0758b2016-02-15 09:42:25 +01002520/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2521static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2522{
2523 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002524 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002525 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002526 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002527}
2528
2529/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2530static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2531{
2532 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002533 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002534 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2535 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002536 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002537}
2538
2539/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2540static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2541{
2542 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2543 vcpu->arch.cputm_enabled = true;
2544 __start_cpu_timer_accounting(vcpu);
2545}
2546
2547/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2548static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2549{
2550 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2551 __stop_cpu_timer_accounting(vcpu);
2552 vcpu->arch.cputm_enabled = false;
2553}
2554
2555static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2556{
2557 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2558 __enable_cpu_timer_accounting(vcpu);
2559 preempt_enable();
2560}
2561
2562static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2563{
2564 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2565 __disable_cpu_timer_accounting(vcpu);
2566 preempt_enable();
2567}
2568
David Hildenbrand4287f242016-02-15 09:40:12 +01002569/* set the cpu timer - may only be called from the VCPU thread itself */
2570void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2571{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002572 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002573 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002574 if (vcpu->arch.cputm_enabled)
2575 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002576 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002577 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002578 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002579}
2580
David Hildenbranddb0758b2016-02-15 09:42:25 +01002581/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002582__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2583{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002584 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002585 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002586
2587 if (unlikely(!vcpu->arch.cputm_enabled))
2588 return vcpu->arch.sie_block->cputm;
2589
David Hildenbrand9c23a132016-02-17 21:53:33 +01002590 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2591 do {
2592 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2593 /*
2594 * If the writer would ever execute a read in the critical
2595 * section, e.g. in irq context, we have a deadlock.
2596 */
2597 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2598 value = vcpu->arch.sie_block->cputm;
2599 /* if cputm_start is 0, accounting is being started/stopped */
2600 if (likely(vcpu->arch.cputm_start))
2601 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2602 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2603 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002604 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002605}
2606
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002607void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2608{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002609
David Hildenbrand37d9df92015-03-11 16:47:33 +01002610 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002611 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002612 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002613 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002614 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002615}
2616
2617void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2618{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002619 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002620 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002621 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01002622 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002623 vcpu->arch.enabled_gmap = gmap_get_enabled();
2624 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002625
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002626}
2627
2628static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2629{
2630 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2631 vcpu->arch.sie_block->gpsw.mask = 0UL;
2632 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002633 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002634 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002635 vcpu->arch.sie_block->ckc = 0UL;
2636 vcpu->arch.sie_block->todpr = 0;
2637 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
David Hildenbrandb9224cd2018-04-30 17:55:24 +02002638 vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 |
2639 CR0_INTERRUPT_KEY_SUBMASK |
2640 CR0_MEASUREMENT_ALERT_SUBMASK;
2641 vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
2642 CR14_UNUSED_33 |
2643 CR14_EXTERNAL_DAMAGE_SUBMASK;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002644 /* make sure the new fpc will be lazily loaded */
2645 save_fpu_regs();
2646 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002647 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002648 vcpu->arch.sie_block->pp = 0;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002649 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002650 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2651 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002652 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2653 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002654 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002655}
2656
Dominik Dingel31928aa2014-12-04 15:47:07 +01002657void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002658{
Jason J. Herne72f25022014-11-25 09:46:02 -05002659 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002660 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002661 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01002662 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02002663 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002664 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002665 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002666 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002667 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002668 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002669 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2670 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002671 /* make vcpu_load load the right gmap on the first trigger */
2672 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002673}
2674
Tony Krowiak5102ee82014-06-27 14:46:01 -04002675static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2676{
Tony Krowiake585b242018-09-25 19:16:18 -04002677 /*
2678 * If the AP instructions are not being interpreted and the MSAX3
2679 * facility is not configured for the guest, there is nothing to set up.
2680 */
2681 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002682 return;
2683
Tony Krowiake585b242018-09-25 19:16:18 -04002684 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02002685 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04002686 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Tony Krowiaka374e892014-09-03 10:13:53 +02002687
Tony Krowiake585b242018-09-25 19:16:18 -04002688 if (vcpu->kvm->arch.crypto.apie)
2689 vcpu->arch.sie_block->eca |= ECA_APIE;
2690
2691 /* Set up protected key support */
Tony Krowiaka374e892014-09-03 10:13:53 +02002692 if (vcpu->kvm->arch.crypto.aes_kw)
2693 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2694 if (vcpu->kvm->arch.crypto.dea_kw)
2695 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04002696}
2697
Dominik Dingelb31605c2014-03-25 13:47:11 +01002698void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2699{
2700 free_page(vcpu->arch.sie_block->cbrlo);
2701 vcpu->arch.sie_block->cbrlo = 0;
2702}
2703
2704int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2705{
2706 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2707 if (!vcpu->arch.sie_block->cbrlo)
2708 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002709 return 0;
2710}
2711
Michael Mueller91520f12015-02-27 14:32:11 +01002712static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2713{
2714 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2715
Michael Mueller91520f12015-02-27 14:32:11 +01002716 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002717 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002718 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002719}
2720
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002721int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2722{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002723 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002724
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002725 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2726 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002727 CPUSTAT_STOPPED);
2728
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002729 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002730 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002731 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002732 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002733
Michael Mueller91520f12015-02-27 14:32:11 +01002734 kvm_s390_vcpu_setup_model(vcpu);
2735
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002736 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2737 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002738 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002739 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002740 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002741 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002742 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002743
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002744 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002745 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002746 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002747 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2748 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002749 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002750 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002751 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002752 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002753 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002754 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002755 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002756 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002757 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002758 vcpu->arch.sie_block->eca |= ECA_VX;
2759 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002760 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04002761 if (test_kvm_facility(vcpu->kvm, 139))
2762 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002763 if (test_kvm_facility(vcpu->kvm, 156))
2764 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002765 if (vcpu->arch.sie_block->gd) {
2766 vcpu->arch.sie_block->eca |= ECA_AIV;
2767 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
2768 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
2769 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002770 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2771 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002772 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002773
2774 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002775 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05002776 else
2777 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002778
Dominik Dingele6db1d62015-05-07 15:41:57 +02002779 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002780 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2781 if (rc)
2782 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002783 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01002784 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002785 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002786
Collin Walling67d49d52018-08-31 12:51:19 -04002787 vcpu->arch.sie_block->hpid = HPID_KVM;
2788
Tony Krowiak5102ee82014-06-27 14:46:01 -04002789 kvm_s390_vcpu_crypto_setup(vcpu);
2790
Dominik Dingelb31605c2014-03-25 13:47:11 +01002791 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002792}
2793
2794struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2795 unsigned int id)
2796{
Carsten Otte4d475552011-10-18 12:27:12 +02002797 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002798 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02002799 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002800
David Hildenbrand42158252015-10-12 12:57:22 +02002801 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02002802 goto out;
2803
2804 rc = -ENOMEM;
2805
Michael Muellerb110fea2013-06-12 13:54:54 +02002806 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002807 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02002808 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002809
QingFeng Haoda72ca42017-06-07 11:41:19 +02002810 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002811 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2812 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002813 goto out_free_cpu;
2814
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002815 vcpu->arch.sie_block = &sie_page->sie_block;
2816 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2817
David Hildenbrandefed1102015-04-16 12:32:41 +02002818 /* the real guest size will always be smaller than msl */
2819 vcpu->arch.sie_block->mso = 0;
2820 vcpu->arch.sie_block->msl = sclp.hamax;
2821
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002822 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002823 spin_lock_init(&vcpu->arch.local_int.lock);
Michael Mueller982cff42019-01-31 09:52:38 +01002824 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02002825 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
2826 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002827 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002828
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002829 rc = kvm_vcpu_init(vcpu, kvm, id);
2830 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002831 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002832 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002833 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02002834 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002835
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002836 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08002837out_free_sie_block:
2838 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002839out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02002840 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02002841out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002842 return ERR_PTR(rc);
2843}
2844
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002845int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2846{
David Hildenbrand9a022062014-08-05 17:40:47 +02002847 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002848}
2849
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002850bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
2851{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08002852 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002853}
2854
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002855void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002856{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002857 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002858 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002859}
2860
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002861void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002862{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002863 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002864}
2865
Christian Borntraeger8e236542015-04-09 13:49:04 +02002866static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2867{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002868 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002869 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002870}
2871
David Hildenbrand9ea59722018-09-25 19:16:16 -04002872bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
2873{
2874 return atomic_read(&vcpu->arch.sie_block->prog20) &
2875 (PROG_BLOCK_SIE | PROG_REQUEST);
2876}
2877
Christian Borntraeger8e236542015-04-09 13:49:04 +02002878static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2879{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04002880 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002881}
2882
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002883/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04002884 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002885 * If the CPU is not running (e.g. waiting as idle) the function will
2886 * return immediately. */
2887void exit_sie(struct kvm_vcpu *vcpu)
2888{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002889 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04002890 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002891 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2892 cpu_relax();
2893}
2894
Christian Borntraeger8e236542015-04-09 13:49:04 +02002895/* Kick a guest cpu out of SIE to process a request synchronously */
2896void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002897{
Christian Borntraeger8e236542015-04-09 13:49:04 +02002898 kvm_make_request(req, vcpu);
2899 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002900}
2901
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002902static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2903 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002904{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002905 struct kvm *kvm = gmap->private;
2906 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002907 unsigned long prefix;
2908 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002909
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02002910 if (gmap_is_shadow(gmap))
2911 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002912 if (start >= 1UL << 31)
2913 /* We are only interested in prefix pages */
2914 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002915 kvm_for_each_vcpu(i, vcpu, kvm) {
2916 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002917 prefix = kvm_s390_get_prefix(vcpu);
2918 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2919 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2920 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002921 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002922 }
2923 }
2924}
2925
Christoffer Dallb6d33832012-03-08 16:44:24 -05002926int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2927{
2928 /* kvm common code refers to this, but never calls it */
2929 BUG();
2930 return 0;
2931}
2932
Carsten Otte14eebd92012-05-15 14:15:26 +02002933static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2934 struct kvm_one_reg *reg)
2935{
2936 int r = -EINVAL;
2937
2938 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002939 case KVM_REG_S390_TODPR:
2940 r = put_user(vcpu->arch.sie_block->todpr,
2941 (u32 __user *)reg->addr);
2942 break;
2943 case KVM_REG_S390_EPOCHDIFF:
2944 r = put_user(vcpu->arch.sie_block->epoch,
2945 (u64 __user *)reg->addr);
2946 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002947 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002948 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002949 (u64 __user *)reg->addr);
2950 break;
2951 case KVM_REG_S390_CLOCK_COMP:
2952 r = put_user(vcpu->arch.sie_block->ckc,
2953 (u64 __user *)reg->addr);
2954 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002955 case KVM_REG_S390_PFTOKEN:
2956 r = put_user(vcpu->arch.pfault_token,
2957 (u64 __user *)reg->addr);
2958 break;
2959 case KVM_REG_S390_PFCOMPARE:
2960 r = put_user(vcpu->arch.pfault_compare,
2961 (u64 __user *)reg->addr);
2962 break;
2963 case KVM_REG_S390_PFSELECT:
2964 r = put_user(vcpu->arch.pfault_select,
2965 (u64 __user *)reg->addr);
2966 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002967 case KVM_REG_S390_PP:
2968 r = put_user(vcpu->arch.sie_block->pp,
2969 (u64 __user *)reg->addr);
2970 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002971 case KVM_REG_S390_GBEA:
2972 r = put_user(vcpu->arch.sie_block->gbea,
2973 (u64 __user *)reg->addr);
2974 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002975 default:
2976 break;
2977 }
2978
2979 return r;
2980}
2981
2982static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2983 struct kvm_one_reg *reg)
2984{
2985 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002986 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002987
2988 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002989 case KVM_REG_S390_TODPR:
2990 r = get_user(vcpu->arch.sie_block->todpr,
2991 (u32 __user *)reg->addr);
2992 break;
2993 case KVM_REG_S390_EPOCHDIFF:
2994 r = get_user(vcpu->arch.sie_block->epoch,
2995 (u64 __user *)reg->addr);
2996 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002997 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002998 r = get_user(val, (u64 __user *)reg->addr);
2999 if (!r)
3000 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003001 break;
3002 case KVM_REG_S390_CLOCK_COMP:
3003 r = get_user(vcpu->arch.sie_block->ckc,
3004 (u64 __user *)reg->addr);
3005 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003006 case KVM_REG_S390_PFTOKEN:
3007 r = get_user(vcpu->arch.pfault_token,
3008 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003009 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3010 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003011 break;
3012 case KVM_REG_S390_PFCOMPARE:
3013 r = get_user(vcpu->arch.pfault_compare,
3014 (u64 __user *)reg->addr);
3015 break;
3016 case KVM_REG_S390_PFSELECT:
3017 r = get_user(vcpu->arch.pfault_select,
3018 (u64 __user *)reg->addr);
3019 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003020 case KVM_REG_S390_PP:
3021 r = get_user(vcpu->arch.sie_block->pp,
3022 (u64 __user *)reg->addr);
3023 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003024 case KVM_REG_S390_GBEA:
3025 r = get_user(vcpu->arch.sie_block->gbea,
3026 (u64 __user *)reg->addr);
3027 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003028 default:
3029 break;
3030 }
3031
3032 return r;
3033}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003034
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003035static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3036{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003037 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003038 return 0;
3039}
3040
3041int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3042{
Christoffer Dall875656f2017-12-04 21:35:27 +01003043 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003044 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003045 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003046 return 0;
3047}
3048
3049int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3050{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003051 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003052 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003053 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003054 return 0;
3055}
3056
3057int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3058 struct kvm_sregs *sregs)
3059{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003060 vcpu_load(vcpu);
3061
Christian Borntraeger59674c12012-01-11 11:20:33 +01003062 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003063 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003064
3065 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003066 return 0;
3067}
3068
3069int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3070 struct kvm_sregs *sregs)
3071{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003072 vcpu_load(vcpu);
3073
Christian Borntraeger59674c12012-01-11 11:20:33 +01003074 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003075 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003076
3077 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003078 return 0;
3079}
3080
3081int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3082{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003083 int ret = 0;
3084
3085 vcpu_load(vcpu);
3086
3087 if (test_fp_ctl(fpu->fpc)) {
3088 ret = -EINVAL;
3089 goto out;
3090 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003091 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003092 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003093 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3094 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003095 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003096 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003097
3098out:
3099 vcpu_put(vcpu);
3100 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003101}
3102
3103int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3104{
Christoffer Dall13931232017-12-04 21:35:34 +01003105 vcpu_load(vcpu);
3106
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003107 /* make sure we have the latest values */
3108 save_fpu_regs();
3109 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003110 convert_vx_to_fp((freg_t *) fpu->fprs,
3111 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003112 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003113 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003114 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003115
3116 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003117 return 0;
3118}
3119
3120static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3121{
3122 int rc = 0;
3123
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003124 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003125 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003126 else {
3127 vcpu->run->psw_mask = psw.mask;
3128 vcpu->run->psw_addr = psw.addr;
3129 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003130 return rc;
3131}
3132
3133int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3134 struct kvm_translation *tr)
3135{
3136 return -EINVAL; /* not implemented yet */
3137}
3138
David Hildenbrand27291e22014-01-23 12:26:52 +01003139#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3140 KVM_GUESTDBG_USE_HW_BP | \
3141 KVM_GUESTDBG_ENABLE)
3142
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003143int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3144 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003145{
David Hildenbrand27291e22014-01-23 12:26:52 +01003146 int rc = 0;
3147
Christoffer Dall66b56562017-12-04 21:35:33 +01003148 vcpu_load(vcpu);
3149
David Hildenbrand27291e22014-01-23 12:26:52 +01003150 vcpu->guest_debug = 0;
3151 kvm_s390_clear_bp_data(vcpu);
3152
Christoffer Dall66b56562017-12-04 21:35:33 +01003153 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3154 rc = -EINVAL;
3155 goto out;
3156 }
3157 if (!sclp.has_gpere) {
3158 rc = -EINVAL;
3159 goto out;
3160 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003161
3162 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3163 vcpu->guest_debug = dbg->control;
3164 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003165 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003166
3167 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3168 rc = kvm_s390_import_bp_data(vcpu, dbg);
3169 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003170 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003171 vcpu->arch.guestdbg.last_bp = 0;
3172 }
3173
3174 if (rc) {
3175 vcpu->guest_debug = 0;
3176 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003177 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003178 }
3179
Christoffer Dall66b56562017-12-04 21:35:33 +01003180out:
3181 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003182 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003183}
3184
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003185int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3186 struct kvm_mp_state *mp_state)
3187{
Christoffer Dallfd232562017-12-04 21:35:30 +01003188 int ret;
3189
3190 vcpu_load(vcpu);
3191
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003192 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003193 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3194 KVM_MP_STATE_OPERATING;
3195
3196 vcpu_put(vcpu);
3197 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003198}
3199
3200int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3201 struct kvm_mp_state *mp_state)
3202{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003203 int rc = 0;
3204
Christoffer Dalle83dff52017-12-04 21:35:31 +01003205 vcpu_load(vcpu);
3206
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003207 /* user space knows about this interface - let it control the state */
3208 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3209
3210 switch (mp_state->mp_state) {
3211 case KVM_MP_STATE_STOPPED:
3212 kvm_s390_vcpu_stop(vcpu);
3213 break;
3214 case KVM_MP_STATE_OPERATING:
3215 kvm_s390_vcpu_start(vcpu);
3216 break;
3217 case KVM_MP_STATE_LOAD:
3218 case KVM_MP_STATE_CHECK_STOP:
3219 /* fall through - CHECK_STOP and LOAD are not supported yet */
3220 default:
3221 rc = -ENXIO;
3222 }
3223
Christoffer Dalle83dff52017-12-04 21:35:31 +01003224 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003225 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003226}
3227
David Hildenbrand8ad35752014-03-14 11:00:21 +01003228static bool ibs_enabled(struct kvm_vcpu *vcpu)
3229{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003230 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003231}
3232
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003233static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3234{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003235retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003236 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003237 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003238 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003239 /*
3240 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003241 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003242 * This ensures that the ipte instruction for this request has
3243 * already finished. We might race against a second unmapper that
3244 * wants to set the blocking bit. Lets just retry the request loop.
3245 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003246 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003247 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003248 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3249 kvm_s390_get_prefix(vcpu),
3250 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003251 if (rc) {
3252 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003253 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003254 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003255 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003256 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003257
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003258 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3259 vcpu->arch.sie_block->ihcpu = 0xffff;
3260 goto retry;
3261 }
3262
David Hildenbrand8ad35752014-03-14 11:00:21 +01003263 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3264 if (!ibs_enabled(vcpu)) {
3265 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003266 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003267 }
3268 goto retry;
3269 }
3270
3271 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3272 if (ibs_enabled(vcpu)) {
3273 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003274 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003275 }
3276 goto retry;
3277 }
3278
David Hildenbrand6502a342016-06-21 14:19:51 +02003279 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3280 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3281 goto retry;
3282 }
3283
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003284 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3285 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003286 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003287 * instruction manually, in order to provide additional
3288 * functionalities needed for live migration.
3289 */
3290 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3291 goto retry;
3292 }
3293
3294 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3295 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003296 * Re-enable CMM virtualization if CMMA is available and
3297 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003298 */
3299 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003300 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003301 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3302 goto retry;
3303 }
3304
David Hildenbrand0759d062014-05-13 16:54:32 +02003305 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003306 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003307 /* we left the vsie handler, nothing to do, just clear the request */
3308 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003309
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003310 return 0;
3311}
3312
David Hildenbrand0e7def52018-02-07 12:46:43 +01003313void kvm_s390_set_tod_clock(struct kvm *kvm,
3314 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003315{
3316 struct kvm_vcpu *vcpu;
3317 struct kvm_s390_tod_clock_ext htod;
3318 int i;
3319
3320 mutex_lock(&kvm->lock);
3321 preempt_disable();
3322
3323 get_tod_clock_ext((char *)&htod);
3324
3325 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003326 kvm->arch.epdx = 0;
3327 if (test_kvm_facility(kvm, 139)) {
3328 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3329 if (kvm->arch.epoch > gtod->tod)
3330 kvm->arch.epdx -= 1;
3331 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003332
3333 kvm_s390_vcpu_block_all(kvm);
3334 kvm_for_each_vcpu(i, vcpu, kvm) {
3335 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3336 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3337 }
3338
3339 kvm_s390_vcpu_unblock_all(kvm);
3340 preempt_enable();
3341 mutex_unlock(&kvm->lock);
3342}
3343
Thomas Huthfa576c52014-05-06 17:20:16 +02003344/**
3345 * kvm_arch_fault_in_page - fault-in guest page if necessary
3346 * @vcpu: The corresponding virtual cpu
3347 * @gpa: Guest physical address
3348 * @writable: Whether the page should be writable or not
3349 *
3350 * Make sure that a guest page has been faulted-in on the host.
3351 *
3352 * Return: Zero on success, negative error code otherwise.
3353 */
3354long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003355{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003356 return gmap_fault(vcpu->arch.gmap, gpa,
3357 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003358}
3359
Dominik Dingel3c038e62013-10-07 17:11:48 +02003360static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3361 unsigned long token)
3362{
3363 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003364 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003365
3366 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003367 irq.u.ext.ext_params2 = token;
3368 irq.type = KVM_S390_INT_PFAULT_INIT;
3369 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003370 } else {
3371 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003372 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003373 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3374 }
3375}
3376
3377void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3378 struct kvm_async_pf *work)
3379{
3380 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3381 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3382}
3383
3384void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3385 struct kvm_async_pf *work)
3386{
3387 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3388 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3389}
3390
3391void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3392 struct kvm_async_pf *work)
3393{
3394 /* s390 will always inject the page directly */
3395}
3396
3397bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3398{
3399 /*
3400 * s390 will always inject the page directly,
3401 * but we still want check_async_completion to cleanup
3402 */
3403 return true;
3404}
3405
3406static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3407{
3408 hva_t hva;
3409 struct kvm_arch_async_pf arch;
3410 int rc;
3411
3412 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3413 return 0;
3414 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3415 vcpu->arch.pfault_compare)
3416 return 0;
3417 if (psw_extint_disabled(vcpu))
3418 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003419 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003420 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003421 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003422 return 0;
3423 if (!vcpu->arch.gmap->pfault_enabled)
3424 return 0;
3425
Heiko Carstens81480cc2014-01-01 16:36:07 +01003426 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3427 hva += current->thread.gmap_addr & ~PAGE_MASK;
3428 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003429 return 0;
3430
3431 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3432 return rc;
3433}
3434
Thomas Huth3fb4c402013-09-12 10:33:43 +02003435static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003436{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003437 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003438
Dominik Dingel3c038e62013-10-07 17:11:48 +02003439 /*
3440 * On s390 notifications for arriving pages will be delivered directly
3441 * to the guest but the house keeping for completed pfaults is
3442 * handled outside the worker.
3443 */
3444 kvm_check_async_pf_completion(vcpu);
3445
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003446 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3447 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003448
3449 if (need_resched())
3450 schedule();
3451
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003452 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003453 s390_handle_mcck();
3454
Jens Freimann79395032014-04-17 10:10:30 +02003455 if (!kvm_is_ucontrol(vcpu->kvm)) {
3456 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3457 if (rc)
3458 return rc;
3459 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003460
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003461 rc = kvm_s390_handle_requests(vcpu);
3462 if (rc)
3463 return rc;
3464
David Hildenbrand27291e22014-01-23 12:26:52 +01003465 if (guestdbg_enabled(vcpu)) {
3466 kvm_s390_backup_guest_per_regs(vcpu);
3467 kvm_s390_patch_guest_per_regs(vcpu);
3468 }
3469
Michael Mueller9f30f622019-01-31 09:52:44 +01003470 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
3471
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003472 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003473 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3474 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3475 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003476
Thomas Huth3fb4c402013-09-12 10:33:43 +02003477 return 0;
3478}
3479
Thomas Huth492d8642015-02-10 16:11:01 +01003480static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3481{
David Hildenbrand56317922016-01-12 17:37:58 +01003482 struct kvm_s390_pgm_info pgm_info = {
3483 .code = PGM_ADDRESSING,
3484 };
3485 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003486 int rc;
3487
3488 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3489 trace_kvm_s390_sie_fault(vcpu);
3490
3491 /*
3492 * We want to inject an addressing exception, which is defined as a
3493 * suppressing or terminating exception. However, since we came here
3494 * by a DAT access exception, the PSW still points to the faulting
3495 * instruction since DAT exceptions are nullifying. So we've got
3496 * to look up the current opcode to get the length of the instruction
3497 * to be able to forward the PSW.
3498 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003499 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003500 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003501 if (rc < 0) {
3502 return rc;
3503 } else if (rc) {
3504 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3505 * Forward by arbitrary ilc, injection will take care of
3506 * nullification if necessary.
3507 */
3508 pgm_info = vcpu->arch.pgm;
3509 ilen = 4;
3510 }
David Hildenbrand56317922016-01-12 17:37:58 +01003511 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3512 kvm_s390_forward_psw(vcpu, ilen);
3513 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003514}
3515
Thomas Huth3fb4c402013-09-12 10:33:43 +02003516static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3517{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003518 struct mcck_volatile_info *mcck_info;
3519 struct sie_page *sie_page;
3520
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003521 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3522 vcpu->arch.sie_block->icptcode);
3523 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3524
David Hildenbrand27291e22014-01-23 12:26:52 +01003525 if (guestdbg_enabled(vcpu))
3526 kvm_s390_restore_guest_per_regs(vcpu);
3527
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003528 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3529 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003530
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003531 if (exit_reason == -EINTR) {
3532 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3533 sie_page = container_of(vcpu->arch.sie_block,
3534 struct sie_page, sie_block);
3535 mcck_info = &sie_page->mcck_info;
3536 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3537 return 0;
3538 }
3539
David Hildenbrand71f116b2015-10-19 16:24:28 +02003540 if (vcpu->arch.sie_block->icptcode > 0) {
3541 int rc = kvm_handle_sie_intercept(vcpu);
3542
3543 if (rc != -EOPNOTSUPP)
3544 return rc;
3545 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3546 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3547 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3548 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3549 return -EREMOTE;
3550 } else if (exit_reason != -EFAULT) {
3551 vcpu->stat.exit_null++;
3552 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003553 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3554 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3555 vcpu->run->s390_ucontrol.trans_exc_code =
3556 current->thread.gmap_addr;
3557 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003558 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003559 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003560 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003561 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003562 if (kvm_arch_setup_async_pf(vcpu))
3563 return 0;
3564 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003565 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003566 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003567}
3568
3569static int __vcpu_run(struct kvm_vcpu *vcpu)
3570{
3571 int rc, exit_reason;
3572
Thomas Huth800c1062013-09-12 10:33:45 +02003573 /*
3574 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3575 * ning the guest), so that memslots (and other stuff) are protected
3576 */
3577 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3578
Thomas Hutha76ccff2013-09-12 10:33:44 +02003579 do {
3580 rc = vcpu_pre_run(vcpu);
3581 if (rc)
3582 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003583
Thomas Huth800c1062013-09-12 10:33:45 +02003584 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003585 /*
3586 * As PF_VCPU will be used in fault handler, between
3587 * guest_enter and guest_exit should be no uaccess.
3588 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003589 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003590 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003591 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003592 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003593 exit_reason = sie64a(vcpu->arch.sie_block,
3594 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003595 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003596 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003597 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003598 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003599 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003600
Thomas Hutha76ccff2013-09-12 10:33:44 +02003601 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003602 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003603
Thomas Huth800c1062013-09-12 10:33:45 +02003604 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003605 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003606}
3607
David Hildenbrandb028ee32014-07-17 10:47:43 +02003608static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3609{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003610 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003611 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003612
3613 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003614 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003615 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3616 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3617 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3618 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3619 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3620 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003621 /* some control register changes require a tlb flush */
3622 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003623 }
3624 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003625 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003626 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3627 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3628 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3629 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3630 }
3631 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3632 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3633 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3634 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003635 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3636 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003637 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003638 /*
3639 * If userspace sets the riccb (e.g. after migration) to a valid state,
3640 * we should enable RI here instead of doing the lazy enablement.
3641 */
3642 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003643 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003644 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003645 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003646 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003647 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003648 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003649 /*
3650 * If userspace sets the gscb (e.g. after migration) to non-zero,
3651 * we should enable GS here instead of doing the lazy enablement.
3652 */
3653 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3654 test_kvm_facility(vcpu->kvm, 133) &&
3655 gscb->gssm &&
3656 !vcpu->arch.gs_enabled) {
3657 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3658 vcpu->arch.sie_block->ecb |= ECB_GS;
3659 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3660 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003661 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003662 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3663 test_kvm_facility(vcpu->kvm, 82)) {
3664 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3665 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3666 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003667 save_access_regs(vcpu->arch.host_acrs);
3668 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003669 /* save host (userspace) fprs/vrs */
3670 save_fpu_regs();
3671 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3672 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3673 if (MACHINE_HAS_VX)
3674 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3675 else
3676 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3677 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3678 if (test_fp_ctl(current->thread.fpu.fpc))
3679 /* User space provided an invalid FPC, let's clear it */
3680 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003681 if (MACHINE_HAS_GS) {
3682 preempt_disable();
3683 __ctl_set_bit(2, 4);
3684 if (current->thread.gs_cb) {
3685 vcpu->arch.host_gscb = current->thread.gs_cb;
3686 save_gs_cb(vcpu->arch.host_gscb);
3687 }
3688 if (vcpu->arch.gs_enabled) {
3689 current->thread.gs_cb = (struct gs_cb *)
3690 &vcpu->run->s.regs.gscb;
3691 restore_gs_cb(current->thread.gs_cb);
3692 }
3693 preempt_enable();
3694 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003695 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Fan Zhang80cd8762016-08-15 04:53:22 +02003696
David Hildenbrandb028ee32014-07-17 10:47:43 +02003697 kvm_run->kvm_dirty_regs = 0;
3698}
3699
3700static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3701{
3702 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3703 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3704 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3705 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003706 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003707 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3708 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3709 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3710 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3711 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3712 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3713 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003714 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003715 save_access_regs(vcpu->run->s.regs.acrs);
3716 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003717 /* Save guest register state */
3718 save_fpu_regs();
3719 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3720 /* Restore will be done lazily at return */
3721 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3722 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003723 if (MACHINE_HAS_GS) {
3724 __ctl_set_bit(2, 4);
3725 if (vcpu->arch.gs_enabled)
3726 save_gs_cb(current->thread.gs_cb);
3727 preempt_disable();
3728 current->thread.gs_cb = vcpu->arch.host_gscb;
3729 restore_gs_cb(vcpu->arch.host_gscb);
3730 preempt_enable();
3731 if (!vcpu->arch.host_gscb)
3732 __ctl_clear_bit(2, 4);
3733 vcpu->arch.host_gscb = NULL;
3734 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003735 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02003736}
3737
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003738int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3739{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003740 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003741
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003742 if (kvm_run->immediate_exit)
3743 return -EINTR;
3744
Christoffer Dallaccb7572017-12-04 21:35:25 +01003745 vcpu_load(vcpu);
3746
David Hildenbrand27291e22014-01-23 12:26:52 +01003747 if (guestdbg_exit_pending(vcpu)) {
3748 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01003749 rc = 0;
3750 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01003751 }
3752
Jan H. Schönherr20b70352017-11-24 22:39:01 +01003753 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003754
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003755 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3756 kvm_s390_vcpu_start(vcpu);
3757 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003758 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003759 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01003760 rc = -EINVAL;
3761 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003762 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003763
David Hildenbrandb028ee32014-07-17 10:47:43 +02003764 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003765 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003766
Heiko Carstensdab4079d2009-06-12 10:26:32 +02003767 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003768 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02003769
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003770 if (signal_pending(current) && !rc) {
3771 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003772 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003773 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003774
David Hildenbrand27291e22014-01-23 12:26:52 +01003775 if (guestdbg_exit_pending(vcpu) && !rc) {
3776 kvm_s390_prepare_debug_exit(vcpu);
3777 rc = 0;
3778 }
3779
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003780 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02003781 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003782 rc = 0;
3783 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003784
David Hildenbranddb0758b2016-02-15 09:42:25 +01003785 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003786 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003787
Jan H. Schönherr20b70352017-11-24 22:39:01 +01003788 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003789
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003790 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01003791out:
3792 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02003793 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003794}
3795
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003796/*
3797 * store status at address
3798 * we use have two special cases:
3799 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3800 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3801 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01003802int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003803{
Carsten Otte092670c2011-07-24 10:48:22 +02003804 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003805 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02003806 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01003807 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003808 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003809
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003810 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01003811 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
3812 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003813 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003814 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003815 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
3816 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003817 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003818 gpa = px;
3819 } else
3820 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003821
3822 /* manually convert vector registers if necessary */
3823 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01003824 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003825 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3826 fprs, 128);
3827 } else {
3828 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01003829 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003830 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003831 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003832 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003833 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003834 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003835 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02003836 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003837 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003838 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003839 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003840 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01003841 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003842 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01003843 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01003844 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003845 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003846 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003847 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003848 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003849 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003850 &vcpu->arch.sie_block->gcr, 128);
3851 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003852}
3853
Thomas Huthe8798922013-11-06 15:46:33 +01003854int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
3855{
3856 /*
3857 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003858 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01003859 * it into the save area
3860 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02003861 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003862 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01003863 save_access_regs(vcpu->run->s.regs.acrs);
3864
3865 return kvm_s390_store_status_unloaded(vcpu, addr);
3866}
3867
David Hildenbrand8ad35752014-03-14 11:00:21 +01003868static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3869{
3870 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003871 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003872}
3873
3874static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
3875{
3876 unsigned int i;
3877 struct kvm_vcpu *vcpu;
3878
3879 kvm_for_each_vcpu(i, vcpu, kvm) {
3880 __disable_ibs_on_vcpu(vcpu);
3881 }
3882}
3883
3884static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3885{
David Hildenbrand09a400e2016-04-04 15:57:08 +02003886 if (!sclp.has_ibs)
3887 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01003888 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003889 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003890}
3891
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003892void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
3893{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003894 int i, online_vcpus, started_vcpus = 0;
3895
3896 if (!is_vcpu_stopped(vcpu))
3897 return;
3898
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003899 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003900 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003901 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003902 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3903
3904 for (i = 0; i < online_vcpus; i++) {
3905 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
3906 started_vcpus++;
3907 }
3908
3909 if (started_vcpus == 0) {
3910 /* we're the only active VCPU -> speed it up */
3911 __enable_ibs_on_vcpu(vcpu);
3912 } else if (started_vcpus == 1) {
3913 /*
3914 * As we are starting a second VCPU, we have to disable
3915 * the IBS facility on all VCPUs to remove potentially
3916 * oustanding ENABLE requests.
3917 */
3918 __disable_ibs_on_all_vcpus(vcpu->kvm);
3919 }
3920
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003921 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003922 /*
3923 * Another VCPU might have used IBS while we were offline.
3924 * Let's play safe and flush the VCPU at startup.
3925 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003926 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003927 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003928 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003929}
3930
3931void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
3932{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003933 int i, online_vcpus, started_vcpus = 0;
3934 struct kvm_vcpu *started_vcpu = NULL;
3935
3936 if (is_vcpu_stopped(vcpu))
3937 return;
3938
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003939 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003940 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003941 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003942 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3943
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003944 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02003945 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003946
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003947 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003948 __disable_ibs_on_vcpu(vcpu);
3949
3950 for (i = 0; i < online_vcpus; i++) {
3951 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3952 started_vcpus++;
3953 started_vcpu = vcpu->kvm->vcpus[i];
3954 }
3955 }
3956
3957 if (started_vcpus == 1) {
3958 /*
3959 * As we only have one VCPU left, we want to enable the
3960 * IBS facility for that VCPU to speed it up.
3961 */
3962 __enable_ibs_on_vcpu(started_vcpu);
3963 }
3964
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003965 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003966 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003967}
3968
Cornelia Huckd6712df2012-12-20 15:32:11 +01003969static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3970 struct kvm_enable_cap *cap)
3971{
3972 int r;
3973
3974 if (cap->flags)
3975 return -EINVAL;
3976
3977 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003978 case KVM_CAP_S390_CSS_SUPPORT:
3979 if (!vcpu->kvm->arch.css_support) {
3980 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02003981 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003982 trace_kvm_s390_enable_css(vcpu->kvm);
3983 }
3984 r = 0;
3985 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01003986 default:
3987 r = -EINVAL;
3988 break;
3989 }
3990 return r;
3991}
3992
Thomas Huth41408c282015-02-06 15:01:21 +01003993static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3994 struct kvm_s390_mem_op *mop)
3995{
3996 void __user *uaddr = (void __user *)mop->buf;
3997 void *tmpbuf = NULL;
3998 int r, srcu_idx;
3999 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4000 | KVM_S390_MEMOP_F_CHECK_ONLY;
4001
4002 if (mop->flags & ~supported_flags)
4003 return -EINVAL;
4004
4005 if (mop->size > MEM_OP_MAX_SIZE)
4006 return -E2BIG;
4007
4008 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4009 tmpbuf = vmalloc(mop->size);
4010 if (!tmpbuf)
4011 return -ENOMEM;
4012 }
4013
4014 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4015
4016 switch (mop->op) {
4017 case KVM_S390_MEMOP_LOGICAL_READ:
4018 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004019 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4020 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004021 break;
4022 }
4023 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4024 if (r == 0) {
4025 if (copy_to_user(uaddr, tmpbuf, mop->size))
4026 r = -EFAULT;
4027 }
4028 break;
4029 case KVM_S390_MEMOP_LOGICAL_WRITE:
4030 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004031 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4032 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004033 break;
4034 }
4035 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4036 r = -EFAULT;
4037 break;
4038 }
4039 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4040 break;
4041 default:
4042 r = -EINVAL;
4043 }
4044
4045 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4046
4047 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4048 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4049
4050 vfree(tmpbuf);
4051 return r;
4052}
4053
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004054long kvm_arch_vcpu_async_ioctl(struct file *filp,
4055 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004056{
4057 struct kvm_vcpu *vcpu = filp->private_data;
4058 void __user *argp = (void __user *)arg;
4059
Avi Kivity93736622010-05-13 12:35:17 +03004060 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004061 case KVM_S390_IRQ: {
4062 struct kvm_s390_irq s390irq;
4063
Jens Freimann47b43c52014-11-11 20:57:06 +01004064 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004065 return -EFAULT;
4066 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004067 }
Avi Kivity93736622010-05-13 12:35:17 +03004068 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004069 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02004070 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01004071
4072 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004073 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004074 if (s390int_to_s390irq(&s390int, &s390irq))
4075 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004076 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004077 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004078 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004079 return -ENOIOCTLCMD;
4080}
4081
4082long kvm_arch_vcpu_ioctl(struct file *filp,
4083 unsigned int ioctl, unsigned long arg)
4084{
4085 struct kvm_vcpu *vcpu = filp->private_data;
4086 void __user *argp = (void __user *)arg;
4087 int idx;
4088 long r;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004089
4090 vcpu_load(vcpu);
4091
4092 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004093 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004094 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004095 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004096 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004097 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004098 case KVM_S390_SET_INITIAL_PSW: {
4099 psw_t psw;
4100
Avi Kivitybc923cc2010-05-13 12:21:46 +03004101 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004102 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004103 break;
4104 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4105 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004106 }
4107 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03004108 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4109 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004110 case KVM_SET_ONE_REG:
4111 case KVM_GET_ONE_REG: {
4112 struct kvm_one_reg reg;
4113 r = -EFAULT;
4114 if (copy_from_user(&reg, argp, sizeof(reg)))
4115 break;
4116 if (ioctl == KVM_SET_ONE_REG)
4117 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4118 else
4119 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4120 break;
4121 }
Carsten Otte27e03932012-01-04 10:25:21 +01004122#ifdef CONFIG_KVM_S390_UCONTROL
4123 case KVM_S390_UCAS_MAP: {
4124 struct kvm_s390_ucas_mapping ucasmap;
4125
4126 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4127 r = -EFAULT;
4128 break;
4129 }
4130
4131 if (!kvm_is_ucontrol(vcpu->kvm)) {
4132 r = -EINVAL;
4133 break;
4134 }
4135
4136 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4137 ucasmap.vcpu_addr, ucasmap.length);
4138 break;
4139 }
4140 case KVM_S390_UCAS_UNMAP: {
4141 struct kvm_s390_ucas_mapping ucasmap;
4142
4143 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4144 r = -EFAULT;
4145 break;
4146 }
4147
4148 if (!kvm_is_ucontrol(vcpu->kvm)) {
4149 r = -EINVAL;
4150 break;
4151 }
4152
4153 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4154 ucasmap.length);
4155 break;
4156 }
4157#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004158 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004159 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004160 break;
4161 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004162 case KVM_ENABLE_CAP:
4163 {
4164 struct kvm_enable_cap cap;
4165 r = -EFAULT;
4166 if (copy_from_user(&cap, argp, sizeof(cap)))
4167 break;
4168 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4169 break;
4170 }
Thomas Huth41408c282015-02-06 15:01:21 +01004171 case KVM_S390_MEM_OP: {
4172 struct kvm_s390_mem_op mem_op;
4173
4174 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4175 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4176 else
4177 r = -EFAULT;
4178 break;
4179 }
Jens Freimann816c7662014-11-24 17:13:46 +01004180 case KVM_S390_SET_IRQ_STATE: {
4181 struct kvm_s390_irq_state irq_state;
4182
4183 r = -EFAULT;
4184 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4185 break;
4186 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4187 irq_state.len == 0 ||
4188 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4189 r = -EINVAL;
4190 break;
4191 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004192 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004193 r = kvm_s390_set_irq_state(vcpu,
4194 (void __user *) irq_state.buf,
4195 irq_state.len);
4196 break;
4197 }
4198 case KVM_S390_GET_IRQ_STATE: {
4199 struct kvm_s390_irq_state irq_state;
4200
4201 r = -EFAULT;
4202 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4203 break;
4204 if (irq_state.len == 0) {
4205 r = -EINVAL;
4206 break;
4207 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004208 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004209 r = kvm_s390_get_irq_state(vcpu,
4210 (__u8 __user *) irq_state.buf,
4211 irq_state.len);
4212 break;
4213 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004214 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004215 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004216 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004217
4218 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004219 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004220}
4221
Souptick Joarder1499fa82018-04-19 00:49:58 +05304222vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004223{
4224#ifdef CONFIG_KVM_S390_UCONTROL
4225 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4226 && (kvm_is_ucontrol(vcpu->kvm))) {
4227 vmf->page = virt_to_page(vcpu->arch.sie_block);
4228 get_page(vmf->page);
4229 return 0;
4230 }
4231#endif
4232 return VM_FAULT_SIGBUS;
4233}
4234
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05304235int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4236 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09004237{
4238 return 0;
4239}
4240
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004241/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004242int kvm_arch_prepare_memory_region(struct kvm *kvm,
4243 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004244 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004245 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004246{
Nick Wangdd2887e2013-03-25 17:22:57 +01004247 /* A few sanity checks. We can have memory slots which have to be
4248 located/ended at a segment boundary (1MB). The memory in userland is
4249 ok to be fragmented into various different vmas. It is okay to mmap()
4250 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004251
Carsten Otte598841c2011-07-24 10:48:21 +02004252 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004253 return -EINVAL;
4254
Carsten Otte598841c2011-07-24 10:48:21 +02004255 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004256 return -EINVAL;
4257
Dominik Dingela3a92c32014-12-01 17:24:42 +01004258 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4259 return -EINVAL;
4260
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004261 return 0;
4262}
4263
4264void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004265 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004266 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004267 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004268 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004269{
Carsten Ottef7850c92011-07-24 10:48:23 +02004270 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004271
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01004272 /* If the basics of the memslot do not change, we do not want
4273 * to update the gmap. Every update causes several unnecessary
4274 * segment translation exceptions. This is usually handled just
4275 * fine by the normal fault handler + gmap, but it will also
4276 * cause faults on the prefix page of running guest CPUs.
4277 */
4278 if (old->userspace_addr == mem->userspace_addr &&
4279 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
4280 old->npages * PAGE_SIZE == mem->memory_size)
4281 return;
Carsten Otte598841c2011-07-24 10:48:21 +02004282
4283 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4284 mem->guest_phys_addr, mem->memory_size);
4285 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004286 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02004287 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004288}
4289
Alexander Yarygin60a37702016-04-01 15:38:57 +03004290static inline unsigned long nonhyp_mask(int i)
4291{
4292 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4293
4294 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4295}
4296
Christian Borntraeger3491caf2016-05-13 12:16:35 +02004297void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4298{
4299 vcpu->valid_wakeup = false;
4300}
4301
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004302static int __init kvm_s390_init(void)
4303{
Alexander Yarygin60a37702016-04-01 15:38:57 +03004304 int i;
4305
David Hildenbrand07197fd2015-01-30 16:01:38 +01004306 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004307 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01004308 return -ENODEV;
4309 }
4310
Janosch Franka4499382018-07-13 11:28:31 +01004311 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004312 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01004313 return -EINVAL;
4314 }
4315
Alexander Yarygin60a37702016-04-01 15:38:57 +03004316 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00004317 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03004318 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4319
Michael Mueller9d8d5782015-02-02 15:42:51 +01004320 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004321}
4322
4323static void __exit kvm_s390_exit(void)
4324{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004325 kvm_exit();
4326}
4327
4328module_init(kvm_s390_init);
4329module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02004330
4331/*
4332 * Enable autoloading of the kvm module.
4333 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4334 * since x86 takes a different approach.
4335 */
4336#include <linux/miscdevice.h>
4337MODULE_ALIAS_MISCDEV(KVM_MINOR);
4338MODULE_ALIAS("devname:kvm");