blob: 757f76bba9ea69028c61fdb4f72ae566f90098e8 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Christian Borntraegera37cb072018-01-23 13:28:40 +01005 * Copyright IBM Corp. 2008, 2018
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010047#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010048#include "gaccess.h"
49
Cornelia Huck5786fff2012-07-23 17:20:29 +020050#define CREATE_TRACE_POINTS
51#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020052#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020053
Thomas Huth41408c282015-02-06 15:01:21 +010054#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010055#define LOCAL_IRQS 32
56#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010058
Heiko Carstensb0c632d2008-03-25 18:47:20 +010059#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Christian Borntraegerccc40c52018-03-08 12:48:05 +000060#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061
62struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020064 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010065 { "exit_validity", VCPU_STAT(exit_validity) },
66 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 { "exit_external_request", VCPU_STAT(exit_external_request) },
Christian Borntraegera5e0acea92018-02-23 07:57:33 +000068 { "exit_io_request", VCPU_STAT(exit_io_request) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010069 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030071 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020074 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010075 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020076 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020077 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020078 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020079 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010080 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010081 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
82 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000083 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
84 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010085 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020086 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010087 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000088 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010089 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
90 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
91 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000092 { "deliver_program", VCPU_STAT(deliver_program) },
93 { "deliver_io", VCPU_STAT(deliver_io) },
QingFeng Hao32de0742018-03-02 11:56:47 +010094 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010095 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000096 { "inject_ckc", VCPU_STAT(inject_ckc) },
97 { "inject_cputm", VCPU_STAT(inject_cputm) },
98 { "inject_external_call", VCPU_STAT(inject_external_call) },
99 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
100 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
101 { "inject_io", VM_STAT(inject_io) },
102 { "inject_mchk", VCPU_STAT(inject_mchk) },
103 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
104 { "inject_program", VCPU_STAT(inject_program) },
105 { "inject_restart", VCPU_STAT(inject_restart) },
106 { "inject_service_signal", VM_STAT(inject_service_signal) },
107 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
108 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
109 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
110 { "inject_virtio", VM_STAT(inject_virtio) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100111 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
112 { "instruction_gs", VCPU_STAT(instruction_gs) },
113 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
114 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
115 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200116 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100117 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100118 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100119 { "instruction_sck", VCPU_STAT(instruction_sck) },
120 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100121 { "instruction_spx", VCPU_STAT(instruction_spx) },
122 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
123 { "instruction_stap", VCPU_STAT(instruction_stap) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100124 { "instruction_iske", VCPU_STAT(instruction_iske) },
125 { "instruction_ri", VCPU_STAT(instruction_ri) },
126 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
127 { "instruction_sske", VCPU_STAT(instruction_sske) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100128 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200129 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100130 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
131 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100132 { "instruction_tb", VCPU_STAT(instruction_tb) },
133 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200134 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100135 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200136 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200137 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100138 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100139 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200140 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100141 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200142 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
143 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100144 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200145 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
146 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500147 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100148 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
149 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
150 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200151 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
152 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
153 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100154 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
155 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
156 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
157 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
158 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
159 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100160 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100161 { NULL }
162};
163
Collin L. Walling8fa16962016-07-26 15:29:44 -0400164struct kvm_s390_tod_clock_ext {
165 __u8 epoch_idx;
166 __u64 tod;
167 __u8 reserved[7];
168} __packed;
169
David Hildenbranda411edf2016-02-02 15:41:22 +0100170/* allow nested virtualization in KVM (if enabled by user space) */
171static int nested;
172module_param(nested, int, S_IRUGO);
173MODULE_PARM_DESC(nested, "Nested virtualization support");
174
Janosch Franka4499382018-07-13 11:28:31 +0100175/* allow 1m huge page guest backing, if !nested */
176static int hpage;
177module_param(hpage, int, 0444);
178MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100179
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000180/*
181 * For now we handle at most 16 double words as this is what the s390 base
182 * kernel handles and stores in the prefix page. If we ever need to go beyond
183 * this, this requires changes to code, but the external uapi can stay.
184 */
185#define SIZE_INTERNAL 16
186
187/*
188 * Base feature mask that defines default mask for facilities. Consists of the
189 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
190 */
191static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
192/*
193 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
194 * and defines the facilities that can be enabled via a cpu model.
195 */
196static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
197
198static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200199{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000200 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
201 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
202 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
203 sizeof(S390_lowcore.stfle_fac_list));
204
205 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200206}
207
David Hildenbrand15c97052015-03-19 17:36:43 +0100208/* available cpu features supported by kvm */
209static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200210/* available subfunctions indicated via query / "test bit" */
211static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100212
Michael Mueller9d8d5782015-02-02 15:42:51 +0100213static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200214static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200215debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100216
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100217/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200218int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100219{
220 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200221 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100222}
223
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100224static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
225 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200226
David Hildenbrand15757672018-02-07 12:46:45 +0100227static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
228{
229 u8 delta_idx = 0;
230
231 /*
232 * The TOD jumps by delta, we have to compensate this by adding
233 * -delta to the epoch.
234 */
235 delta = -delta;
236
237 /* sign-extension - we're adding to signed values below */
238 if ((s64)delta < 0)
239 delta_idx = -1;
240
241 scb->epoch += delta;
242 if (scb->ecd & ECD_MEF) {
243 scb->epdx += delta_idx;
244 if (scb->epoch < delta)
245 scb->epdx += 1;
246 }
247}
248
Fan Zhangfdf03652015-05-13 10:58:41 +0200249/*
250 * This callback is executed during stop_machine(). All CPUs are therefore
251 * temporarily stopped. In order not to change guest behavior, we have to
252 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
253 * so a CPU won't be stopped while calculating with the epoch.
254 */
255static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
256 void *v)
257{
258 struct kvm *kvm;
259 struct kvm_vcpu *vcpu;
260 int i;
261 unsigned long long *delta = v;
262
263 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200264 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100265 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
266 if (i == 0) {
267 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
268 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
269 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100270 if (vcpu->arch.cputm_enabled)
271 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100272 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100273 kvm_clock_sync_scb(vcpu->arch.vsie_block,
274 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200275 }
276 }
277 return NOTIFY_OK;
278}
279
280static struct notifier_block kvm_clock_notifier = {
281 .notifier_call = kvm_clock_sync,
282};
283
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100284int kvm_arch_hardware_setup(void)
285{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200286 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100287 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200288 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
289 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200290 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
291 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100292 return 0;
293}
294
295void kvm_arch_hardware_unsetup(void)
296{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100297 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200298 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200299 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
300 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100301}
302
David Hildenbrand22be5a132016-01-21 13:22:54 +0100303static void allow_cpu_feat(unsigned long nr)
304{
305 set_bit_inv(nr, kvm_s390_available_cpu_feat);
306}
307
David Hildenbrand0a763c72016-05-18 16:03:47 +0200308static inline int plo_test_bit(unsigned char nr)
309{
310 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100311 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200312
313 asm volatile(
314 /* Parameter registers are ignored for "test bit" */
315 " plo 0,0,0,0(0)\n"
316 " ipm %0\n"
317 " srl %0,28\n"
318 : "=d" (cc)
319 : "d" (r0)
320 : "cc");
321 return cc == 0;
322}
323
Christian Borntraegerd6681392019-02-20 03:04:07 -0500324static inline void __insn32_query(unsigned int opcode, u8 query[32])
325{
326 register unsigned long r0 asm("0") = 0; /* query function */
327 register unsigned long r1 asm("1") = (unsigned long) query;
328
329 asm volatile(
330 /* Parameter regs are ignored */
331 " .insn rrf,%[opc] << 16,2,4,6,0\n"
332 : "=m" (*query)
333 : "d" (r0), "a" (r1), [opc] "i" (opcode)
334 : "cc");
335}
336
Christian Borntraeger173aec22018-12-28 10:59:06 +0100337#define INSN_SORTL 0xb938
338
David Hildenbrand22be5a132016-01-21 13:22:54 +0100339static void kvm_s390_cpu_feat_init(void)
340{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200341 int i;
342
343 for (i = 0; i < 256; ++i) {
344 if (plo_test_bit(i))
345 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
346 }
347
348 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400349 ptff(kvm_s390_available_subfunc.ptff,
350 sizeof(kvm_s390_available_subfunc.ptff),
351 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200352
353 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200354 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
355 kvm_s390_available_subfunc.kmac);
356 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
357 kvm_s390_available_subfunc.kmc);
358 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
359 kvm_s390_available_subfunc.km);
360 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
361 kvm_s390_available_subfunc.kimd);
362 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
363 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200364 }
365 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200366 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
367 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200368 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200369 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
370 kvm_s390_available_subfunc.kmctr);
371 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
372 kvm_s390_available_subfunc.kmf);
373 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
374 kvm_s390_available_subfunc.kmo);
375 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
376 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200377 }
378 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100379 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200380 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200381
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400382 if (test_facility(146)) /* MSA8 */
383 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
384 kvm_s390_available_subfunc.kma);
385
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100386 if (test_facility(155)) /* MSA9 */
387 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
388 kvm_s390_available_subfunc.kdsa);
389
Christian Borntraeger173aec22018-12-28 10:59:06 +0100390 if (test_facility(150)) /* SORTL */
391 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
392
David Hildenbrand22be5a132016-01-21 13:22:54 +0100393 if (MACHINE_HAS_ESOP)
394 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200395 /*
396 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
397 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
398 */
399 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100400 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200401 return;
402 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100403 if (sclp.has_64bscao)
404 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100405 if (sclp.has_siif)
406 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100407 if (sclp.has_gpere)
408 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100409 if (sclp.has_gsls)
410 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100411 if (sclp.has_ib)
412 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100413 if (sclp.has_cei)
414 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100415 if (sclp.has_ibs)
416 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500417 if (sclp.has_kss)
418 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200419 /*
420 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
421 * all skey handling functions read/set the skey from the PGSTE
422 * instead of the real storage key.
423 *
424 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
425 * pages being detected as preserved although they are resident.
426 *
427 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
428 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
429 *
430 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
431 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
432 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
433 *
434 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
435 * cannot easily shadow the SCA because of the ipte lock.
436 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100437}
438
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100439int kvm_arch_init(void *opaque)
440{
Michael Mueller308c3e62018-11-30 15:32:06 +0100441 int rc;
442
Christian Borntraeger78f26132015-07-22 15:50:58 +0200443 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
444 if (!kvm_s390_dbf)
445 return -ENOMEM;
446
447 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
Michael Mueller308c3e62018-11-30 15:32:06 +0100448 rc = -ENOMEM;
449 goto out_debug_unreg;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200450 }
451
David Hildenbrand22be5a132016-01-21 13:22:54 +0100452 kvm_s390_cpu_feat_init();
453
Cornelia Huck84877d92014-09-02 10:27:35 +0100454 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100455 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
456 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100457 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Michael Mueller308c3e62018-11-30 15:32:06 +0100458 goto out_debug_unreg;
459 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100460
461 rc = kvm_s390_gib_init(GAL_ISC);
462 if (rc)
463 goto out_gib_destroy;
464
Michael Mueller308c3e62018-11-30 15:32:06 +0100465 return 0;
466
Michael Muellerb1d1e762019-01-31 09:52:45 +0100467out_gib_destroy:
468 kvm_s390_gib_destroy();
Michael Mueller308c3e62018-11-30 15:32:06 +0100469out_debug_unreg:
470 debug_unregister(kvm_s390_dbf);
471 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100472}
473
Christian Borntraeger78f26132015-07-22 15:50:58 +0200474void kvm_arch_exit(void)
475{
Michael Mueller1282c212019-01-31 09:52:40 +0100476 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200477 debug_unregister(kvm_s390_dbf);
478}
479
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100480/* Section: device related */
481long kvm_arch_dev_ioctl(struct file *filp,
482 unsigned int ioctl, unsigned long arg)
483{
484 if (ioctl == KVM_S390_ENABLE_SIE)
485 return s390_enable_sie();
486 return -EINVAL;
487}
488
Alexander Graf784aa3d2014-07-14 18:27:35 +0200489int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100490{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100491 int r;
492
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200493 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100494 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200495 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100496 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100497#ifdef CONFIG_KVM_S390_UCONTROL
498 case KVM_CAP_S390_UCONTROL:
499#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200500 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100501 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200502 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100503 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100504 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100505 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200506 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200507 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200508 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200509 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100510 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100511 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200512 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100513 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400514 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100515 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200516 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200517 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100518 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100519 case KVM_CAP_S390_AIS_MIGRATION:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100520 r = 1;
521 break;
Janosch Franka4499382018-07-13 11:28:31 +0100522 case KVM_CAP_S390_HPAGE_1M:
523 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100524 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100525 r = 1;
526 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100527 case KVM_CAP_S390_MEM_OP:
528 r = MEM_OP_MAX_SIZE;
529 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200530 case KVM_CAP_NR_VCPUS:
531 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100532 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200533 if (!kvm_s390_use_sca_entries())
534 r = KVM_MAX_VCPUS;
535 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100536 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200537 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100538 case KVM_CAP_NR_MEMSLOTS:
539 r = KVM_USER_MEM_SLOTS;
540 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200541 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100542 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200543 break;
Eric Farman68c55752014-06-09 10:57:26 -0400544 case KVM_CAP_S390_VECTOR_REGISTERS:
545 r = MACHINE_HAS_VX;
546 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800547 case KVM_CAP_S390_RI:
548 r = test_facility(64);
549 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100550 case KVM_CAP_S390_GS:
551 r = test_facility(133);
552 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100553 case KVM_CAP_S390_BPB:
554 r = test_facility(82);
555 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200556 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100557 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200558 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100559 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100560}
561
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400562static void kvm_s390_sync_dirty_log(struct kvm *kvm,
Janosch Frank0959e162018-07-17 13:21:22 +0100563 struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400564{
Janosch Frank0959e162018-07-17 13:21:22 +0100565 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400566 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100567 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400568 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100569 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400570
Janosch Frank0959e162018-07-17 13:21:22 +0100571 /* Loop over all guest segments */
572 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400573 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100574 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
575 gaddr = gfn_to_gpa(cur_gfn);
576 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
577 if (kvm_is_error_hva(vmaddr))
578 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400579
Janosch Frank0959e162018-07-17 13:21:22 +0100580 bitmap_zero(bitmap, _PAGE_ENTRIES);
581 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
582 for (i = 0; i < _PAGE_ENTRIES; i++) {
583 if (test_bit(i, bitmap))
584 mark_page_dirty(kvm, cur_gfn + i);
585 }
586
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100587 if (fatal_signal_pending(current))
588 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100589 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400590 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400591}
592
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100593/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200594static void sca_del_vcpu(struct kvm_vcpu *vcpu);
595
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100596/*
597 * Get (and clear) the dirty memory log for a memory slot.
598 */
599int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
600 struct kvm_dirty_log *log)
601{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400602 int r;
603 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200604 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400605 struct kvm_memory_slot *memslot;
606 int is_dirty = 0;
607
Janosch Franke1e8a962017-02-02 16:39:31 +0100608 if (kvm_is_ucontrol(kvm))
609 return -EINVAL;
610
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400611 mutex_lock(&kvm->slots_lock);
612
613 r = -EINVAL;
614 if (log->slot >= KVM_USER_MEM_SLOTS)
615 goto out;
616
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200617 slots = kvm_memslots(kvm);
618 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400619 r = -ENOENT;
620 if (!memslot->dirty_bitmap)
621 goto out;
622
623 kvm_s390_sync_dirty_log(kvm, memslot);
624 r = kvm_get_dirty_log(kvm, log, &is_dirty);
625 if (r)
626 goto out;
627
628 /* Clear the dirty log */
629 if (is_dirty) {
630 n = kvm_dirty_bitmap_bytes(memslot);
631 memset(memslot->dirty_bitmap, 0, n);
632 }
633 r = 0;
634out:
635 mutex_unlock(&kvm->slots_lock);
636 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100637}
638
David Hildenbrand6502a342016-06-21 14:19:51 +0200639static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
640{
641 unsigned int i;
642 struct kvm_vcpu *vcpu;
643
644 kvm_for_each_vcpu(i, vcpu, kvm) {
645 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
646 }
647}
648
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100649int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200650{
651 int r;
652
653 if (cap->flags)
654 return -EINVAL;
655
656 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200657 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200658 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200659 kvm->arch.use_irqchip = 1;
660 r = 0;
661 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200662 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200663 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200664 kvm->arch.user_sigp = 1;
665 r = 0;
666 break;
Eric Farman68c55752014-06-09 10:57:26 -0400667 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100668 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200669 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100670 r = -EBUSY;
671 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100672 set_kvm_facility(kvm->arch.model.fac_mask, 129);
673 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200674 if (test_facility(134)) {
675 set_kvm_facility(kvm->arch.model.fac_mask, 134);
676 set_kvm_facility(kvm->arch.model.fac_list, 134);
677 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100678 if (test_facility(135)) {
679 set_kvm_facility(kvm->arch.model.fac_mask, 135);
680 set_kvm_facility(kvm->arch.model.fac_list, 135);
681 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100682 if (test_facility(148)) {
683 set_kvm_facility(kvm->arch.model.fac_mask, 148);
684 set_kvm_facility(kvm->arch.model.fac_list, 148);
685 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100686 if (test_facility(152)) {
687 set_kvm_facility(kvm->arch.model.fac_mask, 152);
688 set_kvm_facility(kvm->arch.model.fac_list, 152);
689 }
Michael Mueller18280d82015-03-16 16:05:41 +0100690 r = 0;
691 } else
692 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100693 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200694 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
695 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400696 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800697 case KVM_CAP_S390_RI:
698 r = -EINVAL;
699 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200700 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800701 r = -EBUSY;
702 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100703 set_kvm_facility(kvm->arch.model.fac_mask, 64);
704 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800705 r = 0;
706 }
707 mutex_unlock(&kvm->lock);
708 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
709 r ? "(not available)" : "(success)");
710 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100711 case KVM_CAP_S390_AIS:
712 mutex_lock(&kvm->lock);
713 if (kvm->created_vcpus) {
714 r = -EBUSY;
715 } else {
716 set_kvm_facility(kvm->arch.model.fac_mask, 72);
717 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100718 r = 0;
719 }
720 mutex_unlock(&kvm->lock);
721 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
722 r ? "(not available)" : "(success)");
723 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100724 case KVM_CAP_S390_GS:
725 r = -EINVAL;
726 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100727 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100728 r = -EBUSY;
729 } else if (test_facility(133)) {
730 set_kvm_facility(kvm->arch.model.fac_mask, 133);
731 set_kvm_facility(kvm->arch.model.fac_list, 133);
732 r = 0;
733 }
734 mutex_unlock(&kvm->lock);
735 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
736 r ? "(not available)" : "(success)");
737 break;
Janosch Franka4499382018-07-13 11:28:31 +0100738 case KVM_CAP_S390_HPAGE_1M:
739 mutex_lock(&kvm->lock);
740 if (kvm->created_vcpus)
741 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100742 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100743 r = -EINVAL;
744 else {
745 r = 0;
Janosch Frankdf88f312018-08-30 16:14:18 +0200746 down_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100747 kvm->mm->context.allow_gmap_hpage_1m = 1;
Janosch Frankdf88f312018-08-30 16:14:18 +0200748 up_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100749 /*
750 * We might have to create fake 4k page
751 * tables. To avoid that the hardware works on
752 * stale PGSTEs, we emulate these instructions.
753 */
754 kvm->arch.use_skf = 0;
755 kvm->arch.use_pfmfi = 0;
756 }
757 mutex_unlock(&kvm->lock);
758 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
759 r ? "(not available)" : "(success)");
760 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100761 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200762 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100763 kvm->arch.user_stsi = 1;
764 r = 0;
765 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200766 case KVM_CAP_S390_USER_INSTR0:
767 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
768 kvm->arch.user_instr0 = 1;
769 icpt_operexc_on_all_vcpus(kvm);
770 r = 0;
771 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200772 default:
773 r = -EINVAL;
774 break;
775 }
776 return r;
777}
778
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100779static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
780{
781 int ret;
782
783 switch (attr->attr) {
784 case KVM_S390_VM_MEM_LIMIT_SIZE:
785 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200786 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100787 kvm->arch.mem_limit);
788 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100789 ret = -EFAULT;
790 break;
791 default:
792 ret = -ENXIO;
793 break;
794 }
795 return ret;
796}
797
798static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200799{
800 int ret;
801 unsigned int idx;
802 switch (attr->attr) {
803 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100804 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100805 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200806 break;
807
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200808 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200809 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100810 if (kvm->created_vcpus)
811 ret = -EBUSY;
812 else if (kvm->mm->context.allow_gmap_hpage_1m)
813 ret = -EINVAL;
814 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200815 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100816 /* Not compatible with cmma. */
817 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200818 ret = 0;
819 }
820 mutex_unlock(&kvm->lock);
821 break;
822 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100823 ret = -ENXIO;
824 if (!sclp.has_cmma)
825 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200826 ret = -EINVAL;
827 if (!kvm->arch.use_cmma)
828 break;
829
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200830 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200831 mutex_lock(&kvm->lock);
832 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200833 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200834 srcu_read_unlock(&kvm->srcu, idx);
835 mutex_unlock(&kvm->lock);
836 ret = 0;
837 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100838 case KVM_S390_VM_MEM_LIMIT_SIZE: {
839 unsigned long new_limit;
840
841 if (kvm_is_ucontrol(kvm))
842 return -EINVAL;
843
844 if (get_user(new_limit, (u64 __user *)attr->addr))
845 return -EFAULT;
846
Dominik Dingela3a92c32014-12-01 17:24:42 +0100847 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
848 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100849 return -E2BIG;
850
Dominik Dingela3a92c32014-12-01 17:24:42 +0100851 if (!new_limit)
852 return -EINVAL;
853
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100854 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100855 if (new_limit != KVM_S390_NO_MEM_LIMIT)
856 new_limit -= 1;
857
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100858 ret = -EBUSY;
859 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200860 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100861 /* gmap_create will round the limit up */
862 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100863
864 if (!new) {
865 ret = -ENOMEM;
866 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100867 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100868 new->private = kvm;
869 kvm->arch.gmap = new;
870 ret = 0;
871 }
872 }
873 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100874 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
875 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
876 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100877 break;
878 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200879 default:
880 ret = -ENXIO;
881 break;
882 }
883 return ret;
884}
885
Tony Krowiaka374e892014-09-03 10:13:53 +0200886static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
887
Tony Krowiak20c922f2018-04-22 11:37:03 -0400888void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200889{
890 struct kvm_vcpu *vcpu;
891 int i;
892
Tony Krowiak20c922f2018-04-22 11:37:03 -0400893 kvm_s390_vcpu_block_all(kvm);
894
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400895 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400896 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400897 /* recreate the shadow crycb by leaving the VSIE handler */
898 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
899 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400900
901 kvm_s390_vcpu_unblock_all(kvm);
902}
903
904static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
905{
Tony Krowiaka374e892014-09-03 10:13:53 +0200906 mutex_lock(&kvm->lock);
907 switch (attr->attr) {
908 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200909 if (!test_kvm_facility(kvm, 76)) {
910 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400911 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200912 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200913 get_random_bytes(
914 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
915 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
916 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200917 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200918 break;
919 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200920 if (!test_kvm_facility(kvm, 76)) {
921 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400922 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200923 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200924 get_random_bytes(
925 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
926 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
927 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200928 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200929 break;
930 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200931 if (!test_kvm_facility(kvm, 76)) {
932 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400933 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200934 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200935 kvm->arch.crypto.aes_kw = 0;
936 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
937 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200938 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200939 break;
940 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200941 if (!test_kvm_facility(kvm, 76)) {
942 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400943 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200944 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200945 kvm->arch.crypto.dea_kw = 0;
946 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
947 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200948 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200949 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400950 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
951 if (!ap_instructions_available()) {
952 mutex_unlock(&kvm->lock);
953 return -EOPNOTSUPP;
954 }
955 kvm->arch.crypto.apie = 1;
956 break;
957 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
958 if (!ap_instructions_available()) {
959 mutex_unlock(&kvm->lock);
960 return -EOPNOTSUPP;
961 }
962 kvm->arch.crypto.apie = 0;
963 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200964 default:
965 mutex_unlock(&kvm->lock);
966 return -ENXIO;
967 }
968
Tony Krowiak20c922f2018-04-22 11:37:03 -0400969 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200970 mutex_unlock(&kvm->lock);
971 return 0;
972}
973
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200974static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
975{
976 int cx;
977 struct kvm_vcpu *vcpu;
978
979 kvm_for_each_vcpu(cx, vcpu, kvm)
980 kvm_s390_sync_request(req, vcpu);
981}
982
983/*
984 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100985 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200986 */
987static int kvm_s390_vm_start_migration(struct kvm *kvm)
988{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200989 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200990 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200991 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200992 int slotnr;
993
994 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200995 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200996 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200997 slots = kvm_memslots(kvm);
998 if (!slots || !slots->used_slots)
999 return -EINVAL;
1000
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001001 if (!kvm->arch.use_cmma) {
1002 kvm->arch.migration_mode = 1;
1003 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001004 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001005 /* mark all the pages in active slots as dirty */
1006 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1007 ms = slots->memslots + slotnr;
1008 /*
1009 * The second half of the bitmap is only used on x86,
1010 * and would be wasted otherwise, so we put it to good
1011 * use here to keep track of the state of the storage
1012 * attributes.
1013 */
1014 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1015 ram_pages += ms->npages;
1016 }
1017 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1018 kvm->arch.migration_mode = 1;
1019 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001020 return 0;
1021}
1022
1023/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001024 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001025 * kvm_s390_vm_start_migration.
1026 */
1027static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1028{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001029 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001030 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001031 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001032 kvm->arch.migration_mode = 0;
1033 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001034 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001035 return 0;
1036}
1037
1038static int kvm_s390_vm_set_migration(struct kvm *kvm,
1039 struct kvm_device_attr *attr)
1040{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001041 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001042
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001043 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001044 switch (attr->attr) {
1045 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001046 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001047 break;
1048 case KVM_S390_VM_MIGRATION_STOP:
1049 res = kvm_s390_vm_stop_migration(kvm);
1050 break;
1051 default:
1052 break;
1053 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001054 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001055
1056 return res;
1057}
1058
1059static int kvm_s390_vm_get_migration(struct kvm *kvm,
1060 struct kvm_device_attr *attr)
1061{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001062 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001063
1064 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1065 return -ENXIO;
1066
1067 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1068 return -EFAULT;
1069 return 0;
1070}
1071
Collin L. Walling8fa16962016-07-26 15:29:44 -04001072static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1073{
1074 struct kvm_s390_vm_tod_clock gtod;
1075
1076 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1077 return -EFAULT;
1078
David Hildenbrand0e7def52018-02-07 12:46:43 +01001079 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001080 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001081 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001082
1083 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1084 gtod.epoch_idx, gtod.tod);
1085
1086 return 0;
1087}
1088
Jason J. Herne72f25022014-11-25 09:46:02 -05001089static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1090{
1091 u8 gtod_high;
1092
1093 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1094 sizeof(gtod_high)))
1095 return -EFAULT;
1096
1097 if (gtod_high != 0)
1098 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001099 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001100
1101 return 0;
1102}
1103
1104static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1105{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001106 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001107
David Hildenbrand0e7def52018-02-07 12:46:43 +01001108 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1109 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001110 return -EFAULT;
1111
David Hildenbrand0e7def52018-02-07 12:46:43 +01001112 kvm_s390_set_tod_clock(kvm, &gtod);
1113 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001114 return 0;
1115}
1116
1117static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1118{
1119 int ret;
1120
1121 if (attr->flags)
1122 return -EINVAL;
1123
1124 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001125 case KVM_S390_VM_TOD_EXT:
1126 ret = kvm_s390_set_tod_ext(kvm, attr);
1127 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001128 case KVM_S390_VM_TOD_HIGH:
1129 ret = kvm_s390_set_tod_high(kvm, attr);
1130 break;
1131 case KVM_S390_VM_TOD_LOW:
1132 ret = kvm_s390_set_tod_low(kvm, attr);
1133 break;
1134 default:
1135 ret = -ENXIO;
1136 break;
1137 }
1138 return ret;
1139}
1140
David Hildenbrand33d1b272018-04-27 14:36:13 +02001141static void kvm_s390_get_tod_clock(struct kvm *kvm,
1142 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001143{
1144 struct kvm_s390_tod_clock_ext htod;
1145
1146 preempt_disable();
1147
1148 get_tod_clock_ext((char *)&htod);
1149
1150 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001151 gtod->epoch_idx = 0;
1152 if (test_kvm_facility(kvm, 139)) {
1153 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1154 if (gtod->tod < htod.tod)
1155 gtod->epoch_idx += 1;
1156 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001157
1158 preempt_enable();
1159}
1160
1161static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1162{
1163 struct kvm_s390_vm_tod_clock gtod;
1164
1165 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001166 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001167 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1168 return -EFAULT;
1169
1170 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1171 gtod.epoch_idx, gtod.tod);
1172 return 0;
1173}
1174
Jason J. Herne72f25022014-11-25 09:46:02 -05001175static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1176{
1177 u8 gtod_high = 0;
1178
1179 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1180 sizeof(gtod_high)))
1181 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001182 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001183
1184 return 0;
1185}
1186
1187static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1188{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001189 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001190
David Hildenbrand60417fc2015-09-29 16:20:36 +02001191 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001192 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1193 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001194 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001195
1196 return 0;
1197}
1198
1199static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1200{
1201 int ret;
1202
1203 if (attr->flags)
1204 return -EINVAL;
1205
1206 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001207 case KVM_S390_VM_TOD_EXT:
1208 ret = kvm_s390_get_tod_ext(kvm, attr);
1209 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001210 case KVM_S390_VM_TOD_HIGH:
1211 ret = kvm_s390_get_tod_high(kvm, attr);
1212 break;
1213 case KVM_S390_VM_TOD_LOW:
1214 ret = kvm_s390_get_tod_low(kvm, attr);
1215 break;
1216 default:
1217 ret = -ENXIO;
1218 break;
1219 }
1220 return ret;
1221}
1222
Michael Mueller658b6ed2015-02-02 15:49:35 +01001223static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1224{
1225 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001226 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001227 int ret = 0;
1228
1229 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001230 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001231 ret = -EBUSY;
1232 goto out;
1233 }
1234 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1235 if (!proc) {
1236 ret = -ENOMEM;
1237 goto out;
1238 }
1239 if (!copy_from_user(proc, (void __user *)attr->addr,
1240 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001241 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001242 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1243 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001244 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001245 if (proc->ibc > unblocked_ibc)
1246 kvm->arch.model.ibc = unblocked_ibc;
1247 else if (proc->ibc < lowest_ibc)
1248 kvm->arch.model.ibc = lowest_ibc;
1249 else
1250 kvm->arch.model.ibc = proc->ibc;
1251 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001252 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001253 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001254 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1255 kvm->arch.model.ibc,
1256 kvm->arch.model.cpuid);
1257 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1258 kvm->arch.model.fac_list[0],
1259 kvm->arch.model.fac_list[1],
1260 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001261 } else
1262 ret = -EFAULT;
1263 kfree(proc);
1264out:
1265 mutex_unlock(&kvm->lock);
1266 return ret;
1267}
1268
David Hildenbrand15c97052015-03-19 17:36:43 +01001269static int kvm_s390_set_processor_feat(struct kvm *kvm,
1270 struct kvm_device_attr *attr)
1271{
1272 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001273
1274 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1275 return -EFAULT;
1276 if (!bitmap_subset((unsigned long *) data.feat,
1277 kvm_s390_available_cpu_feat,
1278 KVM_S390_VM_CPU_FEAT_NR_BITS))
1279 return -EINVAL;
1280
1281 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001282 if (kvm->created_vcpus) {
1283 mutex_unlock(&kvm->lock);
1284 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001285 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001286 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1287 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001288 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001289 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1290 data.feat[0],
1291 data.feat[1],
1292 data.feat[2]);
1293 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001294}
1295
David Hildenbrand0a763c72016-05-18 16:03:47 +02001296static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1297 struct kvm_device_attr *attr)
1298{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001299 mutex_lock(&kvm->lock);
1300 if (kvm->created_vcpus) {
1301 mutex_unlock(&kvm->lock);
1302 return -EBUSY;
1303 }
1304
1305 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1306 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1307 mutex_unlock(&kvm->lock);
1308 return -EFAULT;
1309 }
1310 mutex_unlock(&kvm->lock);
1311
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001312 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1313 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1314 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1315 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1316 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1317 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1318 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1319 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1320 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1321 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1322 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1323 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1324 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1325 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1326 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1327 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1328 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1329 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1330 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1331 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1332 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1333 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1334 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1335 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1336 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1337 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1338 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1339 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1340 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1341 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1342 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1343 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1344 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1345 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1346 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1347 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1348 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1349 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1350 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1351 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1352 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1353 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1354 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1355 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001356 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1357 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1358 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001359 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1360 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1361 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1362 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1363 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001364
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001365 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001366}
1367
Michael Mueller658b6ed2015-02-02 15:49:35 +01001368static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1369{
1370 int ret = -ENXIO;
1371
1372 switch (attr->attr) {
1373 case KVM_S390_VM_CPU_PROCESSOR:
1374 ret = kvm_s390_set_processor(kvm, attr);
1375 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001376 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1377 ret = kvm_s390_set_processor_feat(kvm, attr);
1378 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001379 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1380 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1381 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001382 }
1383 return ret;
1384}
1385
1386static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1387{
1388 struct kvm_s390_vm_cpu_processor *proc;
1389 int ret = 0;
1390
1391 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1392 if (!proc) {
1393 ret = -ENOMEM;
1394 goto out;
1395 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001396 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001397 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001398 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1399 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001400 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1401 kvm->arch.model.ibc,
1402 kvm->arch.model.cpuid);
1403 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1404 kvm->arch.model.fac_list[0],
1405 kvm->arch.model.fac_list[1],
1406 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001407 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1408 ret = -EFAULT;
1409 kfree(proc);
1410out:
1411 return ret;
1412}
1413
1414static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1415{
1416 struct kvm_s390_vm_cpu_machine *mach;
1417 int ret = 0;
1418
1419 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1420 if (!mach) {
1421 ret = -ENOMEM;
1422 goto out;
1423 }
1424 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001425 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001426 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001427 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001428 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001429 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001430 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1431 kvm->arch.model.ibc,
1432 kvm->arch.model.cpuid);
1433 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1434 mach->fac_mask[0],
1435 mach->fac_mask[1],
1436 mach->fac_mask[2]);
1437 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1438 mach->fac_list[0],
1439 mach->fac_list[1],
1440 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001441 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1442 ret = -EFAULT;
1443 kfree(mach);
1444out:
1445 return ret;
1446}
1447
David Hildenbrand15c97052015-03-19 17:36:43 +01001448static int kvm_s390_get_processor_feat(struct kvm *kvm,
1449 struct kvm_device_attr *attr)
1450{
1451 struct kvm_s390_vm_cpu_feat data;
1452
1453 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1454 KVM_S390_VM_CPU_FEAT_NR_BITS);
1455 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1456 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001457 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1458 data.feat[0],
1459 data.feat[1],
1460 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001461 return 0;
1462}
1463
1464static int kvm_s390_get_machine_feat(struct kvm *kvm,
1465 struct kvm_device_attr *attr)
1466{
1467 struct kvm_s390_vm_cpu_feat data;
1468
1469 bitmap_copy((unsigned long *) data.feat,
1470 kvm_s390_available_cpu_feat,
1471 KVM_S390_VM_CPU_FEAT_NR_BITS);
1472 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1473 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001474 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1475 data.feat[0],
1476 data.feat[1],
1477 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001478 return 0;
1479}
1480
David Hildenbrand0a763c72016-05-18 16:03:47 +02001481static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1482 struct kvm_device_attr *attr)
1483{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001484 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1485 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1486 return -EFAULT;
1487
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001488 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1489 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1490 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1491 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1492 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1493 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1494 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1495 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1496 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1497 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1498 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1499 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1500 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1501 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1502 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1503 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1504 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1505 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1506 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1507 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1508 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1509 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1510 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1511 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1512 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1513 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1514 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1515 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1516 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1517 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1518 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1519 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1520 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1521 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1522 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1523 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1524 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1525 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1526 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1527 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1528 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1529 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1530 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1531 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001532 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1533 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1534 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001535 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1536 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1537 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1538 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1539 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001540
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001541 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001542}
1543
1544static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1545 struct kvm_device_attr *attr)
1546{
1547 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1548 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1549 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001550
1551 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1552 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1553 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1554 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1555 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1556 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1557 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1558 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1559 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1560 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1561 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1562 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1563 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1564 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1565 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1566 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1567 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1568 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1569 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1570 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1571 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1572 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1573 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1574 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1575 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1576 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1577 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1578 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1579 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1580 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1581 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1582 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1583 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1584 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1585 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1586 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1587 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1588 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1589 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1590 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1591 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1592 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1593 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1594 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001595 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1596 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1597 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001598 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1599 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1600 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1601 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1602 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001603
David Hildenbrand0a763c72016-05-18 16:03:47 +02001604 return 0;
1605}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001606
Michael Mueller658b6ed2015-02-02 15:49:35 +01001607static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1608{
1609 int ret = -ENXIO;
1610
1611 switch (attr->attr) {
1612 case KVM_S390_VM_CPU_PROCESSOR:
1613 ret = kvm_s390_get_processor(kvm, attr);
1614 break;
1615 case KVM_S390_VM_CPU_MACHINE:
1616 ret = kvm_s390_get_machine(kvm, attr);
1617 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001618 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1619 ret = kvm_s390_get_processor_feat(kvm, attr);
1620 break;
1621 case KVM_S390_VM_CPU_MACHINE_FEAT:
1622 ret = kvm_s390_get_machine_feat(kvm, attr);
1623 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001624 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1625 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1626 break;
1627 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1628 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1629 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001630 }
1631 return ret;
1632}
1633
Dominik Dingelf2061652014-04-09 13:13:00 +02001634static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1635{
1636 int ret;
1637
1638 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001639 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001640 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001641 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001642 case KVM_S390_VM_TOD:
1643 ret = kvm_s390_set_tod(kvm, attr);
1644 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001645 case KVM_S390_VM_CPU_MODEL:
1646 ret = kvm_s390_set_cpu_model(kvm, attr);
1647 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001648 case KVM_S390_VM_CRYPTO:
1649 ret = kvm_s390_vm_set_crypto(kvm, attr);
1650 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001651 case KVM_S390_VM_MIGRATION:
1652 ret = kvm_s390_vm_set_migration(kvm, attr);
1653 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001654 default:
1655 ret = -ENXIO;
1656 break;
1657 }
1658
1659 return ret;
1660}
1661
1662static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1663{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001664 int ret;
1665
1666 switch (attr->group) {
1667 case KVM_S390_VM_MEM_CTRL:
1668 ret = kvm_s390_get_mem_control(kvm, attr);
1669 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001670 case KVM_S390_VM_TOD:
1671 ret = kvm_s390_get_tod(kvm, attr);
1672 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001673 case KVM_S390_VM_CPU_MODEL:
1674 ret = kvm_s390_get_cpu_model(kvm, attr);
1675 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001676 case KVM_S390_VM_MIGRATION:
1677 ret = kvm_s390_vm_get_migration(kvm, attr);
1678 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001679 default:
1680 ret = -ENXIO;
1681 break;
1682 }
1683
1684 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001685}
1686
1687static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1688{
1689 int ret;
1690
1691 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001692 case KVM_S390_VM_MEM_CTRL:
1693 switch (attr->attr) {
1694 case KVM_S390_VM_MEM_ENABLE_CMMA:
1695 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001696 ret = sclp.has_cmma ? 0 : -ENXIO;
1697 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001698 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001699 ret = 0;
1700 break;
1701 default:
1702 ret = -ENXIO;
1703 break;
1704 }
1705 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001706 case KVM_S390_VM_TOD:
1707 switch (attr->attr) {
1708 case KVM_S390_VM_TOD_LOW:
1709 case KVM_S390_VM_TOD_HIGH:
1710 ret = 0;
1711 break;
1712 default:
1713 ret = -ENXIO;
1714 break;
1715 }
1716 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001717 case KVM_S390_VM_CPU_MODEL:
1718 switch (attr->attr) {
1719 case KVM_S390_VM_CPU_PROCESSOR:
1720 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001721 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1722 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001723 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001724 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001725 ret = 0;
1726 break;
1727 default:
1728 ret = -ENXIO;
1729 break;
1730 }
1731 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001732 case KVM_S390_VM_CRYPTO:
1733 switch (attr->attr) {
1734 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1735 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1736 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1737 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1738 ret = 0;
1739 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001740 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1741 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1742 ret = ap_instructions_available() ? 0 : -ENXIO;
1743 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001744 default:
1745 ret = -ENXIO;
1746 break;
1747 }
1748 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001749 case KVM_S390_VM_MIGRATION:
1750 ret = 0;
1751 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001752 default:
1753 ret = -ENXIO;
1754 break;
1755 }
1756
1757 return ret;
1758}
1759
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001760static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1761{
1762 uint8_t *keys;
1763 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001764 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001765
1766 if (args->flags != 0)
1767 return -EINVAL;
1768
1769 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001770 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001771 return KVM_S390_GET_SKEYS_NONE;
1772
1773 /* Enforce sane limit on memory allocation */
1774 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1775 return -EINVAL;
1776
Michal Hocko752ade62017-05-08 15:57:27 -07001777 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001778 if (!keys)
1779 return -ENOMEM;
1780
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001781 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001782 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001783 for (i = 0; i < args->count; i++) {
1784 hva = gfn_to_hva(kvm, args->start_gfn + i);
1785 if (kvm_is_error_hva(hva)) {
1786 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001787 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001788 }
1789
David Hildenbrand154c8c12016-05-09 11:22:34 +02001790 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1791 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001792 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001793 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001794 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001795 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001796
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001797 if (!r) {
1798 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1799 sizeof(uint8_t) * args->count);
1800 if (r)
1801 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001802 }
1803
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001804 kvfree(keys);
1805 return r;
1806}
1807
1808static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1809{
1810 uint8_t *keys;
1811 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001812 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001813 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001814
1815 if (args->flags != 0)
1816 return -EINVAL;
1817
1818 /* Enforce sane limit on memory allocation */
1819 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1820 return -EINVAL;
1821
Michal Hocko752ade62017-05-08 15:57:27 -07001822 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001823 if (!keys)
1824 return -ENOMEM;
1825
1826 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1827 sizeof(uint8_t) * args->count);
1828 if (r) {
1829 r = -EFAULT;
1830 goto out;
1831 }
1832
1833 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001834 r = s390_enable_skey();
1835 if (r)
1836 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001837
Janosch Frankbd096f62018-07-18 13:40:22 +01001838 i = 0;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001839 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001840 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001841 while (i < args->count) {
1842 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001843 hva = gfn_to_hva(kvm, args->start_gfn + i);
1844 if (kvm_is_error_hva(hva)) {
1845 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001846 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001847 }
1848
1849 /* Lowest order bit is reserved */
1850 if (keys[i] & 0x01) {
1851 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001852 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001853 }
1854
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001855 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001856 if (r) {
1857 r = fixup_user_fault(current, current->mm, hva,
1858 FAULT_FLAG_WRITE, &unlocked);
1859 if (r)
1860 break;
1861 }
1862 if (!r)
1863 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001864 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001865 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001866 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001867out:
1868 kvfree(keys);
1869 return r;
1870}
1871
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001872/*
1873 * Base address and length must be sent at the start of each block, therefore
1874 * it's cheaper to send some clean data, as long as it's less than the size of
1875 * two longs.
1876 */
1877#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1878/* for consistency */
1879#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1880
1881/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001882 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1883 * address falls in a hole. In that case the index of one of the memslots
1884 * bordering the hole is returned.
1885 */
1886static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1887{
1888 int start = 0, end = slots->used_slots;
1889 int slot = atomic_read(&slots->lru_slot);
1890 struct kvm_memory_slot *memslots = slots->memslots;
1891
1892 if (gfn >= memslots[slot].base_gfn &&
1893 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1894 return slot;
1895
1896 while (start < end) {
1897 slot = start + (end - start) / 2;
1898
1899 if (gfn >= memslots[slot].base_gfn)
1900 end = slot;
1901 else
1902 start = slot + 1;
1903 }
1904
1905 if (gfn >= memslots[start].base_gfn &&
1906 gfn < memslots[start].base_gfn + memslots[start].npages) {
1907 atomic_set(&slots->lru_slot, start);
1908 }
1909
1910 return start;
1911}
1912
1913static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1914 u8 *res, unsigned long bufsize)
1915{
1916 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1917
1918 args->count = 0;
1919 while (args->count < bufsize) {
1920 hva = gfn_to_hva(kvm, cur_gfn);
1921 /*
1922 * We return an error if the first value was invalid, but we
1923 * return successfully if at least one value was copied.
1924 */
1925 if (kvm_is_error_hva(hva))
1926 return args->count ? 0 : -EFAULT;
1927 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1928 pgstev = 0;
1929 res[args->count++] = (pgstev >> 24) & 0x43;
1930 cur_gfn++;
1931 }
1932
1933 return 0;
1934}
1935
1936static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1937 unsigned long cur_gfn)
1938{
1939 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1940 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1941 unsigned long ofs = cur_gfn - ms->base_gfn;
1942
1943 if (ms->base_gfn + ms->npages <= cur_gfn) {
1944 slotidx--;
1945 /* If we are above the highest slot, wrap around */
1946 if (slotidx < 0)
1947 slotidx = slots->used_slots - 1;
1948
1949 ms = slots->memslots + slotidx;
1950 ofs = 0;
1951 }
1952 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1953 while ((slotidx > 0) && (ofs >= ms->npages)) {
1954 slotidx--;
1955 ms = slots->memslots + slotidx;
1956 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1957 }
1958 return ms->base_gfn + ofs;
1959}
1960
1961static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1962 u8 *res, unsigned long bufsize)
1963{
1964 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
1965 struct kvm_memslots *slots = kvm_memslots(kvm);
1966 struct kvm_memory_slot *ms;
1967
1968 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
1969 ms = gfn_to_memslot(kvm, cur_gfn);
1970 args->count = 0;
1971 args->start_gfn = cur_gfn;
1972 if (!ms)
1973 return 0;
1974 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
1975 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
1976
1977 while (args->count < bufsize) {
1978 hva = gfn_to_hva(kvm, cur_gfn);
1979 if (kvm_is_error_hva(hva))
1980 return 0;
1981 /* Decrement only if we actually flipped the bit to 0 */
1982 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
1983 atomic64_dec(&kvm->arch.cmma_dirty_pages);
1984 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1985 pgstev = 0;
1986 /* Save the value */
1987 res[args->count++] = (pgstev >> 24) & 0x43;
1988 /* If the next bit is too far away, stop. */
1989 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
1990 return 0;
1991 /* If we reached the previous "next", find the next one */
1992 if (cur_gfn == next_gfn)
1993 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
1994 /* Reached the end of memory or of the buffer, stop */
1995 if ((next_gfn >= mem_end) ||
1996 (next_gfn - args->start_gfn >= bufsize))
1997 return 0;
1998 cur_gfn++;
1999 /* Reached the end of the current memslot, take the next one. */
2000 if (cur_gfn - ms->base_gfn >= ms->npages) {
2001 ms = gfn_to_memslot(kvm, cur_gfn);
2002 if (!ms)
2003 return 0;
2004 }
2005 }
2006 return 0;
2007}
2008
2009/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002010 * This function searches for the next page with dirty CMMA attributes, and
2011 * saves the attributes in the buffer up to either the end of the buffer or
2012 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2013 * no trailing clean bytes are saved.
2014 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2015 * output buffer will indicate 0 as length.
2016 */
2017static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2018 struct kvm_s390_cmma_log *args)
2019{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002020 unsigned long bufsize;
2021 int srcu_idx, peek, ret;
2022 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002023
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002024 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002025 return -ENXIO;
2026 /* Invalid/unsupported flags were specified */
2027 if (args->flags & ~KVM_S390_CMMA_PEEK)
2028 return -EINVAL;
2029 /* Migration mode query, and we are not doing a migration */
2030 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002031 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002032 return -EINVAL;
2033 /* CMMA is disabled or was not used, or the buffer has length zero */
2034 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002035 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002036 memset(args, 0, sizeof(*args));
2037 return 0;
2038 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002039 /* We are not peeking, and there are no dirty pages */
2040 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2041 memset(args, 0, sizeof(*args));
2042 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002043 }
2044
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002045 values = vmalloc(bufsize);
2046 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002047 return -ENOMEM;
2048
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002049 down_read(&kvm->mm->mmap_sem);
2050 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002051 if (peek)
2052 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2053 else
2054 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002055 srcu_read_unlock(&kvm->srcu, srcu_idx);
2056 up_read(&kvm->mm->mmap_sem);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002057
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002058 if (kvm->arch.migration_mode)
2059 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2060 else
2061 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002062
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002063 if (copy_to_user((void __user *)args->values, values, args->count))
2064 ret = -EFAULT;
2065
2066 vfree(values);
2067 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002068}
2069
2070/*
2071 * This function sets the CMMA attributes for the given pages. If the input
2072 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002073 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002074 */
2075static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2076 const struct kvm_s390_cmma_log *args)
2077{
2078 unsigned long hva, mask, pgstev, i;
2079 uint8_t *bits;
2080 int srcu_idx, r = 0;
2081
2082 mask = args->mask;
2083
2084 if (!kvm->arch.use_cmma)
2085 return -ENXIO;
2086 /* invalid/unsupported flags */
2087 if (args->flags != 0)
2088 return -EINVAL;
2089 /* Enforce sane limit on memory allocation */
2090 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2091 return -EINVAL;
2092 /* Nothing to do */
2093 if (args->count == 0)
2094 return 0;
2095
Kees Cook42bc47b2018-06-12 14:27:11 -07002096 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002097 if (!bits)
2098 return -ENOMEM;
2099
2100 r = copy_from_user(bits, (void __user *)args->values, args->count);
2101 if (r) {
2102 r = -EFAULT;
2103 goto out;
2104 }
2105
2106 down_read(&kvm->mm->mmap_sem);
2107 srcu_idx = srcu_read_lock(&kvm->srcu);
2108 for (i = 0; i < args->count; i++) {
2109 hva = gfn_to_hva(kvm, args->start_gfn + i);
2110 if (kvm_is_error_hva(hva)) {
2111 r = -EFAULT;
2112 break;
2113 }
2114
2115 pgstev = bits[i];
2116 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002117 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002118 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2119 }
2120 srcu_read_unlock(&kvm->srcu, srcu_idx);
2121 up_read(&kvm->mm->mmap_sem);
2122
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002123 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002124 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002125 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002126 up_write(&kvm->mm->mmap_sem);
2127 }
2128out:
2129 vfree(bits);
2130 return r;
2131}
2132
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002133long kvm_arch_vm_ioctl(struct file *filp,
2134 unsigned int ioctl, unsigned long arg)
2135{
2136 struct kvm *kvm = filp->private_data;
2137 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002138 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002139 int r;
2140
2141 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002142 case KVM_S390_INTERRUPT: {
2143 struct kvm_s390_interrupt s390int;
2144
2145 r = -EFAULT;
2146 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2147 break;
2148 r = kvm_s390_inject_vm(kvm, &s390int);
2149 break;
2150 }
Cornelia Huck84223592013-07-15 13:36:01 +02002151 case KVM_CREATE_IRQCHIP: {
2152 struct kvm_irq_routing_entry routing;
2153
2154 r = -EINVAL;
2155 if (kvm->arch.use_irqchip) {
2156 /* Set up dummy routing. */
2157 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002158 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002159 }
2160 break;
2161 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002162 case KVM_SET_DEVICE_ATTR: {
2163 r = -EFAULT;
2164 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2165 break;
2166 r = kvm_s390_vm_set_attr(kvm, &attr);
2167 break;
2168 }
2169 case KVM_GET_DEVICE_ATTR: {
2170 r = -EFAULT;
2171 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2172 break;
2173 r = kvm_s390_vm_get_attr(kvm, &attr);
2174 break;
2175 }
2176 case KVM_HAS_DEVICE_ATTR: {
2177 r = -EFAULT;
2178 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2179 break;
2180 r = kvm_s390_vm_has_attr(kvm, &attr);
2181 break;
2182 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002183 case KVM_S390_GET_SKEYS: {
2184 struct kvm_s390_skeys args;
2185
2186 r = -EFAULT;
2187 if (copy_from_user(&args, argp,
2188 sizeof(struct kvm_s390_skeys)))
2189 break;
2190 r = kvm_s390_get_skeys(kvm, &args);
2191 break;
2192 }
2193 case KVM_S390_SET_SKEYS: {
2194 struct kvm_s390_skeys args;
2195
2196 r = -EFAULT;
2197 if (copy_from_user(&args, argp,
2198 sizeof(struct kvm_s390_skeys)))
2199 break;
2200 r = kvm_s390_set_skeys(kvm, &args);
2201 break;
2202 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002203 case KVM_S390_GET_CMMA_BITS: {
2204 struct kvm_s390_cmma_log args;
2205
2206 r = -EFAULT;
2207 if (copy_from_user(&args, argp, sizeof(args)))
2208 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002209 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002210 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002211 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002212 if (!r) {
2213 r = copy_to_user(argp, &args, sizeof(args));
2214 if (r)
2215 r = -EFAULT;
2216 }
2217 break;
2218 }
2219 case KVM_S390_SET_CMMA_BITS: {
2220 struct kvm_s390_cmma_log args;
2221
2222 r = -EFAULT;
2223 if (copy_from_user(&args, argp, sizeof(args)))
2224 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002225 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002226 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002227 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002228 break;
2229 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002230 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002231 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002232 }
2233
2234 return r;
2235}
2236
Tony Krowiak45c9b472015-01-13 11:33:26 -05002237static int kvm_s390_apxa_installed(void)
2238{
Tony Krowiake585b242018-09-25 19:16:18 -04002239 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002240
Tony Krowiake585b242018-09-25 19:16:18 -04002241 if (ap_instructions_available()) {
2242 if (ap_qci(&info) == 0)
2243 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002244 }
2245
2246 return 0;
2247}
2248
Tony Krowiake585b242018-09-25 19:16:18 -04002249/*
2250 * The format of the crypto control block (CRYCB) is specified in the 3 low
2251 * order bits of the CRYCB designation (CRYCBD) field as follows:
2252 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2253 * AP extended addressing (APXA) facility are installed.
2254 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2255 * Format 2: Both the APXA and MSAX3 facilities are installed
2256 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002257static void kvm_s390_set_crycb_format(struct kvm *kvm)
2258{
2259 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2260
Tony Krowiake585b242018-09-25 19:16:18 -04002261 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2262 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2263
2264 /* Check whether MSAX3 is installed */
2265 if (!test_kvm_facility(kvm, 76))
2266 return;
2267
Tony Krowiak45c9b472015-01-13 11:33:26 -05002268 if (kvm_s390_apxa_installed())
2269 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2270 else
2271 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2272}
2273
Pierre Morel0e237e42018-10-05 10:31:09 +02002274void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2275 unsigned long *aqm, unsigned long *adm)
2276{
2277 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2278
2279 mutex_lock(&kvm->lock);
2280 kvm_s390_vcpu_block_all(kvm);
2281
2282 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2283 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2284 memcpy(crycb->apcb1.apm, apm, 32);
2285 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2286 apm[0], apm[1], apm[2], apm[3]);
2287 memcpy(crycb->apcb1.aqm, aqm, 32);
2288 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2289 aqm[0], aqm[1], aqm[2], aqm[3]);
2290 memcpy(crycb->apcb1.adm, adm, 32);
2291 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2292 adm[0], adm[1], adm[2], adm[3]);
2293 break;
2294 case CRYCB_FORMAT1:
2295 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2296 memcpy(crycb->apcb0.apm, apm, 8);
2297 memcpy(crycb->apcb0.aqm, aqm, 2);
2298 memcpy(crycb->apcb0.adm, adm, 2);
2299 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2300 apm[0], *((unsigned short *)aqm),
2301 *((unsigned short *)adm));
2302 break;
2303 default: /* Can not happen */
2304 break;
2305 }
2306
2307 /* recreate the shadow crycb for each vcpu */
2308 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2309 kvm_s390_vcpu_unblock_all(kvm);
2310 mutex_unlock(&kvm->lock);
2311}
2312EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2313
Tony Krowiak421045982018-09-25 19:16:25 -04002314void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2315{
2316 mutex_lock(&kvm->lock);
2317 kvm_s390_vcpu_block_all(kvm);
2318
2319 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2320 sizeof(kvm->arch.crypto.crycb->apcb0));
2321 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2322 sizeof(kvm->arch.crypto.crycb->apcb1));
2323
Pierre Morel0e237e42018-10-05 10:31:09 +02002324 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002325 /* recreate the shadow crycb for each vcpu */
2326 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002327 kvm_s390_vcpu_unblock_all(kvm);
2328 mutex_unlock(&kvm->lock);
2329}
2330EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2331
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002332static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002333{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002334 struct cpuid cpuid;
2335
2336 get_cpu_id(&cpuid);
2337 cpuid.version = 0xff;
2338 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002339}
2340
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002341static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002342{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002343 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002344 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002345
Tony Krowiake585b242018-09-25 19:16:18 -04002346 if (!test_kvm_facility(kvm, 76))
2347 return;
2348
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002349 /* Enable AES/DEA protected key functions by default */
2350 kvm->arch.crypto.aes_kw = 1;
2351 kvm->arch.crypto.dea_kw = 1;
2352 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2353 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2354 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2355 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002356}
2357
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002358static void sca_dispose(struct kvm *kvm)
2359{
2360 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002361 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002362 else
2363 free_page((unsigned long)(kvm->arch.sca));
2364 kvm->arch.sca = NULL;
2365}
2366
Carsten Ottee08b9632012-01-04 10:25:20 +01002367int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002368{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002369 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002370 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002371 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002372 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002373
Carsten Ottee08b9632012-01-04 10:25:20 +01002374 rc = -EINVAL;
2375#ifdef CONFIG_KVM_S390_UCONTROL
2376 if (type & ~KVM_VM_S390_UCONTROL)
2377 goto out_err;
2378 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2379 goto out_err;
2380#else
2381 if (type)
2382 goto out_err;
2383#endif
2384
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002385 rc = s390_enable_sie();
2386 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002387 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002388
Carsten Otteb2904112011-10-18 12:27:13 +02002389 rc = -ENOMEM;
2390
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002391 if (!sclp.has_64bscao)
2392 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002393 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002394 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002395 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002396 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002397 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002398 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002399 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002400 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002401 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002402 kvm->arch.sca = (struct bsca_block *)
2403 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002404 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002405
2406 sprintf(debug_name, "kvm-%u", current->pid);
2407
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002408 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002409 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002410 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002411
Michael Mueller19114be2017-05-30 14:26:02 +02002412 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002413 kvm->arch.sie_page2 =
2414 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2415 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002416 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002417
Michael Mueller25c84db2019-01-31 09:52:41 +01002418 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002419 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002420
2421 for (i = 0; i < kvm_s390_fac_size(); i++) {
2422 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2423 (kvm_s390_fac_base[i] |
2424 kvm_s390_fac_ext[i]);
2425 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2426 kvm_s390_fac_base[i];
2427 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002428 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002429
David Hildenbrand19352222017-08-29 16:31:08 +02002430 /* we are always in czam mode - even on pre z14 machines */
2431 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2432 set_kvm_facility(kvm->arch.model.fac_list, 138);
2433 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002434 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2435 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002436 if (MACHINE_HAS_TLB_GUEST) {
2437 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2438 set_kvm_facility(kvm->arch.model.fac_list, 147);
2439 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002440
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002441 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002442 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002443
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002444 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002445
Fei Li51978392017-02-17 17:06:26 +08002446 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002447 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002448 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2449 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002450 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002451 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002452
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002453 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002454 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002455
Carsten Ottee08b9632012-01-04 10:25:20 +01002456 if (type & KVM_VM_S390_UCONTROL) {
2457 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002458 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002459 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002460 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002461 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002462 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002463 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002464 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002465 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002466 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002467 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002468 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002469 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002470 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002471
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002472 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002473 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002474 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002475 kvm_s390_vsie_init(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002476 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002477 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002478
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002479 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002480out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002481 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002482 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002483 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002484 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002485 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002486}
2487
Luiz Capitulino235539b2016-09-07 14:47:23 -04002488bool kvm_arch_has_vcpu_debugfs(void)
2489{
2490 return false;
2491}
2492
2493int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2494{
2495 return 0;
2496}
2497
Christian Borntraegerd329c032008-11-26 14:50:27 +01002498void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2499{
2500 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002501 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002502 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002503 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002504 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002505 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002506
2507 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002508 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002509
Dominik Dingele6db1d62015-05-07 15:41:57 +02002510 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002511 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002512 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002513
Christian Borntraeger6692cef2008-11-26 14:51:08 +01002514 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02002515 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002516}
2517
2518static void kvm_free_vcpus(struct kvm *kvm)
2519{
2520 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002521 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002522
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002523 kvm_for_each_vcpu(i, vcpu, kvm)
2524 kvm_arch_vcpu_destroy(vcpu);
2525
2526 mutex_lock(&kvm->lock);
2527 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2528 kvm->vcpus[i] = NULL;
2529
2530 atomic_set(&kvm->online_vcpus, 0);
2531 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002532}
2533
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002534void kvm_arch_destroy_vm(struct kvm *kvm)
2535{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002536 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002537 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002538 debug_unregister(kvm->arch.dbf);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002539 kvm_s390_gisa_destroy(kvm);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002540 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002541 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002542 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002543 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002544 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002545 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002546 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002547}
2548
2549/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002550static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2551{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002552 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002553 if (!vcpu->arch.gmap)
2554 return -ENOMEM;
2555 vcpu->arch.gmap->private = vcpu->kvm;
2556
2557 return 0;
2558}
2559
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002560static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2561{
David Hildenbranda6940672016-08-08 22:39:32 +02002562 if (!kvm_s390_use_sca_entries())
2563 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002564 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002565 if (vcpu->kvm->arch.use_esca) {
2566 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002567
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002568 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002569 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002570 } else {
2571 struct bsca_block *sca = vcpu->kvm->arch.sca;
2572
2573 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002574 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002575 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002576 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002577}
2578
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002579static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002580{
David Hildenbranda6940672016-08-08 22:39:32 +02002581 if (!kvm_s390_use_sca_entries()) {
2582 struct bsca_block *sca = vcpu->kvm->arch.sca;
2583
2584 /* we still need the basic sca for the ipte control */
2585 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2586 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002587 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002588 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002589 read_lock(&vcpu->kvm->arch.sca_lock);
2590 if (vcpu->kvm->arch.use_esca) {
2591 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002592
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002593 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002594 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2595 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002596 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002597 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002598 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002599 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002600
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002601 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002602 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2603 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002604 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002605 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002606 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002607}
2608
2609/* Basic SCA to Extended SCA data copy routines */
2610static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2611{
2612 d->sda = s->sda;
2613 d->sigp_ctrl.c = s->sigp_ctrl.c;
2614 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2615}
2616
2617static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2618{
2619 int i;
2620
2621 d->ipte_control = s->ipte_control;
2622 d->mcn[0] = s->mcn;
2623 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2624 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2625}
2626
2627static int sca_switch_to_extended(struct kvm *kvm)
2628{
2629 struct bsca_block *old_sca = kvm->arch.sca;
2630 struct esca_block *new_sca;
2631 struct kvm_vcpu *vcpu;
2632 unsigned int vcpu_idx;
2633 u32 scaol, scaoh;
2634
2635 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2636 if (!new_sca)
2637 return -ENOMEM;
2638
2639 scaoh = (u32)((u64)(new_sca) >> 32);
2640 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2641
2642 kvm_s390_vcpu_block_all(kvm);
2643 write_lock(&kvm->arch.sca_lock);
2644
2645 sca_copy_b_to_e(new_sca, old_sca);
2646
2647 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2648 vcpu->arch.sie_block->scaoh = scaoh;
2649 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002650 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002651 }
2652 kvm->arch.sca = new_sca;
2653 kvm->arch.use_esca = 1;
2654
2655 write_unlock(&kvm->arch.sca_lock);
2656 kvm_s390_vcpu_unblock_all(kvm);
2657
2658 free_page((unsigned long)old_sca);
2659
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002660 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2661 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002662 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002663}
2664
2665static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2666{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002667 int rc;
2668
David Hildenbranda6940672016-08-08 22:39:32 +02002669 if (!kvm_s390_use_sca_entries()) {
2670 if (id < KVM_MAX_VCPUS)
2671 return true;
2672 return false;
2673 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002674 if (id < KVM_S390_BSCA_CPU_SLOTS)
2675 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002676 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002677 return false;
2678
2679 mutex_lock(&kvm->lock);
2680 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2681 mutex_unlock(&kvm->lock);
2682
2683 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002684}
2685
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002686int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2687{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002688 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2689 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002690 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2691 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002692 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002693 KVM_SYNC_CRS |
2694 KVM_SYNC_ARCH0 |
2695 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002696 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002697 if (test_kvm_facility(vcpu->kvm, 64))
2698 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002699 if (test_kvm_facility(vcpu->kvm, 82))
2700 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002701 if (test_kvm_facility(vcpu->kvm, 133))
2702 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002703 if (test_kvm_facility(vcpu->kvm, 156))
2704 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002705 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2706 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2707 */
2708 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002709 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002710 else
2711 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002712
2713 if (kvm_is_ucontrol(vcpu->kvm))
2714 return __kvm_ucontrol_vcpu_init(vcpu);
2715
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002716 return 0;
2717}
2718
David Hildenbranddb0758b2016-02-15 09:42:25 +01002719/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2720static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2721{
2722 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002723 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002724 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002725 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002726}
2727
2728/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2729static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2730{
2731 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002732 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002733 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2734 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002735 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002736}
2737
2738/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2739static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2740{
2741 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2742 vcpu->arch.cputm_enabled = true;
2743 __start_cpu_timer_accounting(vcpu);
2744}
2745
2746/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2747static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2748{
2749 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2750 __stop_cpu_timer_accounting(vcpu);
2751 vcpu->arch.cputm_enabled = false;
2752}
2753
2754static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2755{
2756 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2757 __enable_cpu_timer_accounting(vcpu);
2758 preempt_enable();
2759}
2760
2761static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2762{
2763 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2764 __disable_cpu_timer_accounting(vcpu);
2765 preempt_enable();
2766}
2767
David Hildenbrand4287f242016-02-15 09:40:12 +01002768/* set the cpu timer - may only be called from the VCPU thread itself */
2769void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2770{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002771 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002772 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002773 if (vcpu->arch.cputm_enabled)
2774 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002775 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002776 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002777 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002778}
2779
David Hildenbranddb0758b2016-02-15 09:42:25 +01002780/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002781__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2782{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002783 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002784 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002785
2786 if (unlikely(!vcpu->arch.cputm_enabled))
2787 return vcpu->arch.sie_block->cputm;
2788
David Hildenbrand9c23a132016-02-17 21:53:33 +01002789 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2790 do {
2791 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2792 /*
2793 * If the writer would ever execute a read in the critical
2794 * section, e.g. in irq context, we have a deadlock.
2795 */
2796 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2797 value = vcpu->arch.sie_block->cputm;
2798 /* if cputm_start is 0, accounting is being started/stopped */
2799 if (likely(vcpu->arch.cputm_start))
2800 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2801 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2802 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002803 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002804}
2805
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002806void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2807{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002808
David Hildenbrand37d9df92015-03-11 16:47:33 +01002809 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002810 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002811 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002812 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002813 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002814}
2815
2816void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2817{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002818 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002819 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002820 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01002821 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002822 vcpu->arch.enabled_gmap = gmap_get_enabled();
2823 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002824
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002825}
2826
2827static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2828{
2829 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2830 vcpu->arch.sie_block->gpsw.mask = 0UL;
2831 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002832 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002833 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002834 vcpu->arch.sie_block->ckc = 0UL;
2835 vcpu->arch.sie_block->todpr = 0;
2836 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
David Hildenbrandb9224cd2018-04-30 17:55:24 +02002837 vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 |
2838 CR0_INTERRUPT_KEY_SUBMASK |
2839 CR0_MEASUREMENT_ALERT_SUBMASK;
2840 vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
2841 CR14_UNUSED_33 |
2842 CR14_EXTERNAL_DAMAGE_SUBMASK;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002843 /* make sure the new fpc will be lazily loaded */
2844 save_fpu_regs();
2845 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002846 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002847 vcpu->arch.sie_block->pp = 0;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002848 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002849 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2850 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002851 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2852 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002853 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002854}
2855
Dominik Dingel31928aa2014-12-04 15:47:07 +01002856void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002857{
Jason J. Herne72f25022014-11-25 09:46:02 -05002858 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002859 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002860 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01002861 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02002862 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002863 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002864 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002865 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002866 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002867 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002868 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2869 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002870 /* make vcpu_load load the right gmap on the first trigger */
2871 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002872}
2873
Tony Krowiak5102ee82014-06-27 14:46:01 -04002874static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2875{
Tony Krowiake585b242018-09-25 19:16:18 -04002876 /*
2877 * If the AP instructions are not being interpreted and the MSAX3
2878 * facility is not configured for the guest, there is nothing to set up.
2879 */
2880 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002881 return;
2882
Tony Krowiake585b242018-09-25 19:16:18 -04002883 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02002884 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04002885 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Tony Krowiaka374e892014-09-03 10:13:53 +02002886
Tony Krowiake585b242018-09-25 19:16:18 -04002887 if (vcpu->kvm->arch.crypto.apie)
2888 vcpu->arch.sie_block->eca |= ECA_APIE;
2889
2890 /* Set up protected key support */
Tony Krowiaka374e892014-09-03 10:13:53 +02002891 if (vcpu->kvm->arch.crypto.aes_kw)
2892 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2893 if (vcpu->kvm->arch.crypto.dea_kw)
2894 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04002895}
2896
Dominik Dingelb31605c2014-03-25 13:47:11 +01002897void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2898{
2899 free_page(vcpu->arch.sie_block->cbrlo);
2900 vcpu->arch.sie_block->cbrlo = 0;
2901}
2902
2903int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2904{
2905 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2906 if (!vcpu->arch.sie_block->cbrlo)
2907 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002908 return 0;
2909}
2910
Michael Mueller91520f12015-02-27 14:32:11 +01002911static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2912{
2913 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2914
Michael Mueller91520f12015-02-27 14:32:11 +01002915 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002916 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002917 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002918}
2919
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002920int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2921{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002922 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002923
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002924 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2925 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002926 CPUSTAT_STOPPED);
2927
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002928 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002929 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002930 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002931 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002932
Michael Mueller91520f12015-02-27 14:32:11 +01002933 kvm_s390_vcpu_setup_model(vcpu);
2934
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002935 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2936 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002937 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002938 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002939 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002940 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002941 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002942
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002943 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002944 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002945 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002946 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2947 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002948 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002949 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002950 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002951 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002952 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002953 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002954 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002955 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002956 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002957 vcpu->arch.sie_block->eca |= ECA_VX;
2958 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002959 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04002960 if (test_kvm_facility(vcpu->kvm, 139))
2961 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002962 if (test_kvm_facility(vcpu->kvm, 156))
2963 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002964 if (vcpu->arch.sie_block->gd) {
2965 vcpu->arch.sie_block->eca |= ECA_AIV;
2966 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
2967 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
2968 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002969 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2970 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002971 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002972
2973 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002974 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05002975 else
2976 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002977
Dominik Dingele6db1d62015-05-07 15:41:57 +02002978 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002979 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2980 if (rc)
2981 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002982 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01002983 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002984 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002985
Collin Walling67d49d52018-08-31 12:51:19 -04002986 vcpu->arch.sie_block->hpid = HPID_KVM;
2987
Tony Krowiak5102ee82014-06-27 14:46:01 -04002988 kvm_s390_vcpu_crypto_setup(vcpu);
2989
Dominik Dingelb31605c2014-03-25 13:47:11 +01002990 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002991}
2992
2993struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2994 unsigned int id)
2995{
Carsten Otte4d475552011-10-18 12:27:12 +02002996 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002997 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02002998 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002999
David Hildenbrand42158252015-10-12 12:57:22 +02003000 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02003001 goto out;
3002
3003 rc = -ENOMEM;
3004
Michael Muellerb110fea2013-06-12 13:54:54 +02003005 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003006 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02003007 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003008
QingFeng Haoda72ca42017-06-07 11:41:19 +02003009 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003010 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
3011 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003012 goto out_free_cpu;
3013
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003014 vcpu->arch.sie_block = &sie_page->sie_block;
3015 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3016
David Hildenbrandefed1102015-04-16 12:32:41 +02003017 /* the real guest size will always be smaller than msl */
3018 vcpu->arch.sie_block->mso = 0;
3019 vcpu->arch.sie_block->msl = sclp.hamax;
3020
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003021 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003022 spin_lock_init(&vcpu->arch.local_int.lock);
Michael Mueller982cff42019-01-31 09:52:38 +01003023 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003024 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3025 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003026 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003027
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003028 rc = kvm_vcpu_init(vcpu, kvm, id);
3029 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003030 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01003031 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003032 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02003033 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003034
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003035 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003036out_free_sie_block:
3037 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003038out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02003039 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02003040out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003041 return ERR_PTR(rc);
3042}
3043
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003044int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3045{
David Hildenbrand9a022062014-08-05 17:40:47 +02003046 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003047}
3048
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003049bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3050{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003051 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003052}
3053
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003054void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003055{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003056 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003057 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003058}
3059
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003060void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003061{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003062 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003063}
3064
Christian Borntraeger8e236542015-04-09 13:49:04 +02003065static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3066{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003067 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003068 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003069}
3070
David Hildenbrand9ea59722018-09-25 19:16:16 -04003071bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3072{
3073 return atomic_read(&vcpu->arch.sie_block->prog20) &
3074 (PROG_BLOCK_SIE | PROG_REQUEST);
3075}
3076
Christian Borntraeger8e236542015-04-09 13:49:04 +02003077static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3078{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003079 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003080}
3081
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003082/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003083 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003084 * If the CPU is not running (e.g. waiting as idle) the function will
3085 * return immediately. */
3086void exit_sie(struct kvm_vcpu *vcpu)
3087{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003088 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003089 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003090 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3091 cpu_relax();
3092}
3093
Christian Borntraeger8e236542015-04-09 13:49:04 +02003094/* Kick a guest cpu out of SIE to process a request synchronously */
3095void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003096{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003097 kvm_make_request(req, vcpu);
3098 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003099}
3100
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003101static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3102 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003103{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003104 struct kvm *kvm = gmap->private;
3105 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003106 unsigned long prefix;
3107 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003108
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003109 if (gmap_is_shadow(gmap))
3110 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003111 if (start >= 1UL << 31)
3112 /* We are only interested in prefix pages */
3113 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003114 kvm_for_each_vcpu(i, vcpu, kvm) {
3115 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003116 prefix = kvm_s390_get_prefix(vcpu);
3117 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3118 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3119 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003120 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003121 }
3122 }
3123}
3124
Christoffer Dallb6d33832012-03-08 16:44:24 -05003125int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3126{
3127 /* kvm common code refers to this, but never calls it */
3128 BUG();
3129 return 0;
3130}
3131
Carsten Otte14eebd92012-05-15 14:15:26 +02003132static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3133 struct kvm_one_reg *reg)
3134{
3135 int r = -EINVAL;
3136
3137 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003138 case KVM_REG_S390_TODPR:
3139 r = put_user(vcpu->arch.sie_block->todpr,
3140 (u32 __user *)reg->addr);
3141 break;
3142 case KVM_REG_S390_EPOCHDIFF:
3143 r = put_user(vcpu->arch.sie_block->epoch,
3144 (u64 __user *)reg->addr);
3145 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003146 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003147 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003148 (u64 __user *)reg->addr);
3149 break;
3150 case KVM_REG_S390_CLOCK_COMP:
3151 r = put_user(vcpu->arch.sie_block->ckc,
3152 (u64 __user *)reg->addr);
3153 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003154 case KVM_REG_S390_PFTOKEN:
3155 r = put_user(vcpu->arch.pfault_token,
3156 (u64 __user *)reg->addr);
3157 break;
3158 case KVM_REG_S390_PFCOMPARE:
3159 r = put_user(vcpu->arch.pfault_compare,
3160 (u64 __user *)reg->addr);
3161 break;
3162 case KVM_REG_S390_PFSELECT:
3163 r = put_user(vcpu->arch.pfault_select,
3164 (u64 __user *)reg->addr);
3165 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003166 case KVM_REG_S390_PP:
3167 r = put_user(vcpu->arch.sie_block->pp,
3168 (u64 __user *)reg->addr);
3169 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003170 case KVM_REG_S390_GBEA:
3171 r = put_user(vcpu->arch.sie_block->gbea,
3172 (u64 __user *)reg->addr);
3173 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003174 default:
3175 break;
3176 }
3177
3178 return r;
3179}
3180
3181static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3182 struct kvm_one_reg *reg)
3183{
3184 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003185 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003186
3187 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003188 case KVM_REG_S390_TODPR:
3189 r = get_user(vcpu->arch.sie_block->todpr,
3190 (u32 __user *)reg->addr);
3191 break;
3192 case KVM_REG_S390_EPOCHDIFF:
3193 r = get_user(vcpu->arch.sie_block->epoch,
3194 (u64 __user *)reg->addr);
3195 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003196 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003197 r = get_user(val, (u64 __user *)reg->addr);
3198 if (!r)
3199 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003200 break;
3201 case KVM_REG_S390_CLOCK_COMP:
3202 r = get_user(vcpu->arch.sie_block->ckc,
3203 (u64 __user *)reg->addr);
3204 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003205 case KVM_REG_S390_PFTOKEN:
3206 r = get_user(vcpu->arch.pfault_token,
3207 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003208 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3209 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003210 break;
3211 case KVM_REG_S390_PFCOMPARE:
3212 r = get_user(vcpu->arch.pfault_compare,
3213 (u64 __user *)reg->addr);
3214 break;
3215 case KVM_REG_S390_PFSELECT:
3216 r = get_user(vcpu->arch.pfault_select,
3217 (u64 __user *)reg->addr);
3218 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003219 case KVM_REG_S390_PP:
3220 r = get_user(vcpu->arch.sie_block->pp,
3221 (u64 __user *)reg->addr);
3222 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003223 case KVM_REG_S390_GBEA:
3224 r = get_user(vcpu->arch.sie_block->gbea,
3225 (u64 __user *)reg->addr);
3226 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003227 default:
3228 break;
3229 }
3230
3231 return r;
3232}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003233
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003234static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3235{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003236 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003237 return 0;
3238}
3239
3240int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3241{
Christoffer Dall875656f2017-12-04 21:35:27 +01003242 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003243 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003244 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003245 return 0;
3246}
3247
3248int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3249{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003250 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003251 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003252 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003253 return 0;
3254}
3255
3256int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3257 struct kvm_sregs *sregs)
3258{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003259 vcpu_load(vcpu);
3260
Christian Borntraeger59674c12012-01-11 11:20:33 +01003261 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003262 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003263
3264 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003265 return 0;
3266}
3267
3268int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3269 struct kvm_sregs *sregs)
3270{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003271 vcpu_load(vcpu);
3272
Christian Borntraeger59674c12012-01-11 11:20:33 +01003273 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003274 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003275
3276 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003277 return 0;
3278}
3279
3280int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3281{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003282 int ret = 0;
3283
3284 vcpu_load(vcpu);
3285
3286 if (test_fp_ctl(fpu->fpc)) {
3287 ret = -EINVAL;
3288 goto out;
3289 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003290 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003291 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003292 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3293 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003294 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003295 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003296
3297out:
3298 vcpu_put(vcpu);
3299 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003300}
3301
3302int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3303{
Christoffer Dall13931232017-12-04 21:35:34 +01003304 vcpu_load(vcpu);
3305
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003306 /* make sure we have the latest values */
3307 save_fpu_regs();
3308 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003309 convert_vx_to_fp((freg_t *) fpu->fprs,
3310 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003311 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003312 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003313 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003314
3315 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003316 return 0;
3317}
3318
3319static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3320{
3321 int rc = 0;
3322
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003323 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003324 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003325 else {
3326 vcpu->run->psw_mask = psw.mask;
3327 vcpu->run->psw_addr = psw.addr;
3328 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003329 return rc;
3330}
3331
3332int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3333 struct kvm_translation *tr)
3334{
3335 return -EINVAL; /* not implemented yet */
3336}
3337
David Hildenbrand27291e22014-01-23 12:26:52 +01003338#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3339 KVM_GUESTDBG_USE_HW_BP | \
3340 KVM_GUESTDBG_ENABLE)
3341
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003342int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3343 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003344{
David Hildenbrand27291e22014-01-23 12:26:52 +01003345 int rc = 0;
3346
Christoffer Dall66b56562017-12-04 21:35:33 +01003347 vcpu_load(vcpu);
3348
David Hildenbrand27291e22014-01-23 12:26:52 +01003349 vcpu->guest_debug = 0;
3350 kvm_s390_clear_bp_data(vcpu);
3351
Christoffer Dall66b56562017-12-04 21:35:33 +01003352 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3353 rc = -EINVAL;
3354 goto out;
3355 }
3356 if (!sclp.has_gpere) {
3357 rc = -EINVAL;
3358 goto out;
3359 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003360
3361 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3362 vcpu->guest_debug = dbg->control;
3363 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003364 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003365
3366 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3367 rc = kvm_s390_import_bp_data(vcpu, dbg);
3368 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003369 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003370 vcpu->arch.guestdbg.last_bp = 0;
3371 }
3372
3373 if (rc) {
3374 vcpu->guest_debug = 0;
3375 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003376 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003377 }
3378
Christoffer Dall66b56562017-12-04 21:35:33 +01003379out:
3380 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003381 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003382}
3383
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003384int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3385 struct kvm_mp_state *mp_state)
3386{
Christoffer Dallfd232562017-12-04 21:35:30 +01003387 int ret;
3388
3389 vcpu_load(vcpu);
3390
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003391 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003392 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3393 KVM_MP_STATE_OPERATING;
3394
3395 vcpu_put(vcpu);
3396 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003397}
3398
3399int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3400 struct kvm_mp_state *mp_state)
3401{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003402 int rc = 0;
3403
Christoffer Dalle83dff52017-12-04 21:35:31 +01003404 vcpu_load(vcpu);
3405
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003406 /* user space knows about this interface - let it control the state */
3407 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3408
3409 switch (mp_state->mp_state) {
3410 case KVM_MP_STATE_STOPPED:
3411 kvm_s390_vcpu_stop(vcpu);
3412 break;
3413 case KVM_MP_STATE_OPERATING:
3414 kvm_s390_vcpu_start(vcpu);
3415 break;
3416 case KVM_MP_STATE_LOAD:
3417 case KVM_MP_STATE_CHECK_STOP:
3418 /* fall through - CHECK_STOP and LOAD are not supported yet */
3419 default:
3420 rc = -ENXIO;
3421 }
3422
Christoffer Dalle83dff52017-12-04 21:35:31 +01003423 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003424 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003425}
3426
David Hildenbrand8ad35752014-03-14 11:00:21 +01003427static bool ibs_enabled(struct kvm_vcpu *vcpu)
3428{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003429 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003430}
3431
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003432static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3433{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003434retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003435 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003436 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003437 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003438 /*
3439 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003440 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003441 * This ensures that the ipte instruction for this request has
3442 * already finished. We might race against a second unmapper that
3443 * wants to set the blocking bit. Lets just retry the request loop.
3444 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003445 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003446 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003447 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3448 kvm_s390_get_prefix(vcpu),
3449 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003450 if (rc) {
3451 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003452 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003453 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003454 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003455 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003456
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003457 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3458 vcpu->arch.sie_block->ihcpu = 0xffff;
3459 goto retry;
3460 }
3461
David Hildenbrand8ad35752014-03-14 11:00:21 +01003462 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3463 if (!ibs_enabled(vcpu)) {
3464 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003465 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003466 }
3467 goto retry;
3468 }
3469
3470 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3471 if (ibs_enabled(vcpu)) {
3472 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003473 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003474 }
3475 goto retry;
3476 }
3477
David Hildenbrand6502a342016-06-21 14:19:51 +02003478 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3479 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3480 goto retry;
3481 }
3482
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003483 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3484 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003485 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003486 * instruction manually, in order to provide additional
3487 * functionalities needed for live migration.
3488 */
3489 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3490 goto retry;
3491 }
3492
3493 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3494 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003495 * Re-enable CMM virtualization if CMMA is available and
3496 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003497 */
3498 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003499 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003500 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3501 goto retry;
3502 }
3503
David Hildenbrand0759d062014-05-13 16:54:32 +02003504 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003505 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003506 /* we left the vsie handler, nothing to do, just clear the request */
3507 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003508
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003509 return 0;
3510}
3511
David Hildenbrand0e7def52018-02-07 12:46:43 +01003512void kvm_s390_set_tod_clock(struct kvm *kvm,
3513 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003514{
3515 struct kvm_vcpu *vcpu;
3516 struct kvm_s390_tod_clock_ext htod;
3517 int i;
3518
3519 mutex_lock(&kvm->lock);
3520 preempt_disable();
3521
3522 get_tod_clock_ext((char *)&htod);
3523
3524 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003525 kvm->arch.epdx = 0;
3526 if (test_kvm_facility(kvm, 139)) {
3527 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3528 if (kvm->arch.epoch > gtod->tod)
3529 kvm->arch.epdx -= 1;
3530 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003531
3532 kvm_s390_vcpu_block_all(kvm);
3533 kvm_for_each_vcpu(i, vcpu, kvm) {
3534 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3535 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3536 }
3537
3538 kvm_s390_vcpu_unblock_all(kvm);
3539 preempt_enable();
3540 mutex_unlock(&kvm->lock);
3541}
3542
Thomas Huthfa576c52014-05-06 17:20:16 +02003543/**
3544 * kvm_arch_fault_in_page - fault-in guest page if necessary
3545 * @vcpu: The corresponding virtual cpu
3546 * @gpa: Guest physical address
3547 * @writable: Whether the page should be writable or not
3548 *
3549 * Make sure that a guest page has been faulted-in on the host.
3550 *
3551 * Return: Zero on success, negative error code otherwise.
3552 */
3553long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003554{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003555 return gmap_fault(vcpu->arch.gmap, gpa,
3556 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003557}
3558
Dominik Dingel3c038e62013-10-07 17:11:48 +02003559static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3560 unsigned long token)
3561{
3562 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003563 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003564
3565 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003566 irq.u.ext.ext_params2 = token;
3567 irq.type = KVM_S390_INT_PFAULT_INIT;
3568 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003569 } else {
3570 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003571 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003572 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3573 }
3574}
3575
3576void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3577 struct kvm_async_pf *work)
3578{
3579 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3580 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3581}
3582
3583void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3584 struct kvm_async_pf *work)
3585{
3586 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3587 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3588}
3589
3590void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3591 struct kvm_async_pf *work)
3592{
3593 /* s390 will always inject the page directly */
3594}
3595
3596bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3597{
3598 /*
3599 * s390 will always inject the page directly,
3600 * but we still want check_async_completion to cleanup
3601 */
3602 return true;
3603}
3604
3605static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3606{
3607 hva_t hva;
3608 struct kvm_arch_async_pf arch;
3609 int rc;
3610
3611 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3612 return 0;
3613 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3614 vcpu->arch.pfault_compare)
3615 return 0;
3616 if (psw_extint_disabled(vcpu))
3617 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003618 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003619 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003620 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003621 return 0;
3622 if (!vcpu->arch.gmap->pfault_enabled)
3623 return 0;
3624
Heiko Carstens81480cc2014-01-01 16:36:07 +01003625 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3626 hva += current->thread.gmap_addr & ~PAGE_MASK;
3627 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003628 return 0;
3629
3630 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3631 return rc;
3632}
3633
Thomas Huth3fb4c402013-09-12 10:33:43 +02003634static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003635{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003636 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003637
Dominik Dingel3c038e62013-10-07 17:11:48 +02003638 /*
3639 * On s390 notifications for arriving pages will be delivered directly
3640 * to the guest but the house keeping for completed pfaults is
3641 * handled outside the worker.
3642 */
3643 kvm_check_async_pf_completion(vcpu);
3644
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003645 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3646 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003647
3648 if (need_resched())
3649 schedule();
3650
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003651 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003652 s390_handle_mcck();
3653
Jens Freimann79395032014-04-17 10:10:30 +02003654 if (!kvm_is_ucontrol(vcpu->kvm)) {
3655 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3656 if (rc)
3657 return rc;
3658 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003659
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003660 rc = kvm_s390_handle_requests(vcpu);
3661 if (rc)
3662 return rc;
3663
David Hildenbrand27291e22014-01-23 12:26:52 +01003664 if (guestdbg_enabled(vcpu)) {
3665 kvm_s390_backup_guest_per_regs(vcpu);
3666 kvm_s390_patch_guest_per_regs(vcpu);
3667 }
3668
Michael Mueller9f30f622019-01-31 09:52:44 +01003669 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
3670
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003671 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003672 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3673 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3674 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003675
Thomas Huth3fb4c402013-09-12 10:33:43 +02003676 return 0;
3677}
3678
Thomas Huth492d8642015-02-10 16:11:01 +01003679static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3680{
David Hildenbrand56317922016-01-12 17:37:58 +01003681 struct kvm_s390_pgm_info pgm_info = {
3682 .code = PGM_ADDRESSING,
3683 };
3684 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003685 int rc;
3686
3687 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3688 trace_kvm_s390_sie_fault(vcpu);
3689
3690 /*
3691 * We want to inject an addressing exception, which is defined as a
3692 * suppressing or terminating exception. However, since we came here
3693 * by a DAT access exception, the PSW still points to the faulting
3694 * instruction since DAT exceptions are nullifying. So we've got
3695 * to look up the current opcode to get the length of the instruction
3696 * to be able to forward the PSW.
3697 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003698 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003699 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003700 if (rc < 0) {
3701 return rc;
3702 } else if (rc) {
3703 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3704 * Forward by arbitrary ilc, injection will take care of
3705 * nullification if necessary.
3706 */
3707 pgm_info = vcpu->arch.pgm;
3708 ilen = 4;
3709 }
David Hildenbrand56317922016-01-12 17:37:58 +01003710 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3711 kvm_s390_forward_psw(vcpu, ilen);
3712 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003713}
3714
Thomas Huth3fb4c402013-09-12 10:33:43 +02003715static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3716{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003717 struct mcck_volatile_info *mcck_info;
3718 struct sie_page *sie_page;
3719
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003720 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3721 vcpu->arch.sie_block->icptcode);
3722 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3723
David Hildenbrand27291e22014-01-23 12:26:52 +01003724 if (guestdbg_enabled(vcpu))
3725 kvm_s390_restore_guest_per_regs(vcpu);
3726
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003727 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3728 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003729
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003730 if (exit_reason == -EINTR) {
3731 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3732 sie_page = container_of(vcpu->arch.sie_block,
3733 struct sie_page, sie_block);
3734 mcck_info = &sie_page->mcck_info;
3735 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3736 return 0;
3737 }
3738
David Hildenbrand71f116b2015-10-19 16:24:28 +02003739 if (vcpu->arch.sie_block->icptcode > 0) {
3740 int rc = kvm_handle_sie_intercept(vcpu);
3741
3742 if (rc != -EOPNOTSUPP)
3743 return rc;
3744 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3745 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3746 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3747 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3748 return -EREMOTE;
3749 } else if (exit_reason != -EFAULT) {
3750 vcpu->stat.exit_null++;
3751 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003752 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3753 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3754 vcpu->run->s390_ucontrol.trans_exc_code =
3755 current->thread.gmap_addr;
3756 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003757 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003758 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003759 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003760 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003761 if (kvm_arch_setup_async_pf(vcpu))
3762 return 0;
3763 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003764 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003765 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003766}
3767
3768static int __vcpu_run(struct kvm_vcpu *vcpu)
3769{
3770 int rc, exit_reason;
3771
Thomas Huth800c1062013-09-12 10:33:45 +02003772 /*
3773 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3774 * ning the guest), so that memslots (and other stuff) are protected
3775 */
3776 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3777
Thomas Hutha76ccff2013-09-12 10:33:44 +02003778 do {
3779 rc = vcpu_pre_run(vcpu);
3780 if (rc)
3781 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003782
Thomas Huth800c1062013-09-12 10:33:45 +02003783 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003784 /*
3785 * As PF_VCPU will be used in fault handler, between
3786 * guest_enter and guest_exit should be no uaccess.
3787 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003788 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003789 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003790 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003791 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003792 exit_reason = sie64a(vcpu->arch.sie_block,
3793 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003794 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003795 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003796 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003797 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003798 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003799
Thomas Hutha76ccff2013-09-12 10:33:44 +02003800 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003801 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003802
Thomas Huth800c1062013-09-12 10:33:45 +02003803 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003804 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003805}
3806
David Hildenbrandb028ee32014-07-17 10:47:43 +02003807static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3808{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003809 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003810 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003811
3812 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003813 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003814 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3815 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3816 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3817 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3818 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3819 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003820 /* some control register changes require a tlb flush */
3821 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003822 }
3823 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003824 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003825 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3826 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3827 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3828 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3829 }
3830 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3831 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3832 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3833 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003834 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3835 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003836 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003837 /*
3838 * If userspace sets the riccb (e.g. after migration) to a valid state,
3839 * we should enable RI here instead of doing the lazy enablement.
3840 */
3841 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003842 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003843 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003844 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003845 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003846 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003847 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003848 /*
3849 * If userspace sets the gscb (e.g. after migration) to non-zero,
3850 * we should enable GS here instead of doing the lazy enablement.
3851 */
3852 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3853 test_kvm_facility(vcpu->kvm, 133) &&
3854 gscb->gssm &&
3855 !vcpu->arch.gs_enabled) {
3856 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3857 vcpu->arch.sie_block->ecb |= ECB_GS;
3858 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3859 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003860 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003861 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3862 test_kvm_facility(vcpu->kvm, 82)) {
3863 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3864 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3865 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003866 save_access_regs(vcpu->arch.host_acrs);
3867 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003868 /* save host (userspace) fprs/vrs */
3869 save_fpu_regs();
3870 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3871 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3872 if (MACHINE_HAS_VX)
3873 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3874 else
3875 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3876 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3877 if (test_fp_ctl(current->thread.fpu.fpc))
3878 /* User space provided an invalid FPC, let's clear it */
3879 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003880 if (MACHINE_HAS_GS) {
3881 preempt_disable();
3882 __ctl_set_bit(2, 4);
3883 if (current->thread.gs_cb) {
3884 vcpu->arch.host_gscb = current->thread.gs_cb;
3885 save_gs_cb(vcpu->arch.host_gscb);
3886 }
3887 if (vcpu->arch.gs_enabled) {
3888 current->thread.gs_cb = (struct gs_cb *)
3889 &vcpu->run->s.regs.gscb;
3890 restore_gs_cb(current->thread.gs_cb);
3891 }
3892 preempt_enable();
3893 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003894 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Fan Zhang80cd8762016-08-15 04:53:22 +02003895
David Hildenbrandb028ee32014-07-17 10:47:43 +02003896 kvm_run->kvm_dirty_regs = 0;
3897}
3898
3899static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3900{
3901 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3902 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3903 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3904 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003905 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003906 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3907 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3908 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3909 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3910 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3911 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3912 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003913 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003914 save_access_regs(vcpu->run->s.regs.acrs);
3915 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003916 /* Save guest register state */
3917 save_fpu_regs();
3918 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3919 /* Restore will be done lazily at return */
3920 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3921 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003922 if (MACHINE_HAS_GS) {
3923 __ctl_set_bit(2, 4);
3924 if (vcpu->arch.gs_enabled)
3925 save_gs_cb(current->thread.gs_cb);
3926 preempt_disable();
3927 current->thread.gs_cb = vcpu->arch.host_gscb;
3928 restore_gs_cb(vcpu->arch.host_gscb);
3929 preempt_enable();
3930 if (!vcpu->arch.host_gscb)
3931 __ctl_clear_bit(2, 4);
3932 vcpu->arch.host_gscb = NULL;
3933 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003934 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02003935}
3936
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003937int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3938{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003939 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003940
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003941 if (kvm_run->immediate_exit)
3942 return -EINTR;
3943
Christoffer Dallaccb7572017-12-04 21:35:25 +01003944 vcpu_load(vcpu);
3945
David Hildenbrand27291e22014-01-23 12:26:52 +01003946 if (guestdbg_exit_pending(vcpu)) {
3947 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01003948 rc = 0;
3949 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01003950 }
3951
Jan H. Schönherr20b70352017-11-24 22:39:01 +01003952 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003953
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003954 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3955 kvm_s390_vcpu_start(vcpu);
3956 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003957 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003958 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01003959 rc = -EINVAL;
3960 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003961 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003962
David Hildenbrandb028ee32014-07-17 10:47:43 +02003963 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003964 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003965
Heiko Carstensdab4079d2009-06-12 10:26:32 +02003966 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003967 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02003968
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003969 if (signal_pending(current) && !rc) {
3970 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003971 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003972 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003973
David Hildenbrand27291e22014-01-23 12:26:52 +01003974 if (guestdbg_exit_pending(vcpu) && !rc) {
3975 kvm_s390_prepare_debug_exit(vcpu);
3976 rc = 0;
3977 }
3978
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003979 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02003980 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003981 rc = 0;
3982 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003983
David Hildenbranddb0758b2016-02-15 09:42:25 +01003984 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003985 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003986
Jan H. Schönherr20b70352017-11-24 22:39:01 +01003987 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003988
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003989 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01003990out:
3991 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02003992 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003993}
3994
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003995/*
3996 * store status at address
3997 * we use have two special cases:
3998 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3999 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4000 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004001int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004002{
Carsten Otte092670c2011-07-24 10:48:22 +02004003 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004004 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004005 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004006 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004007 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004008
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004009 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004010 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4011 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004012 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004013 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004014 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4015 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004016 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004017 gpa = px;
4018 } else
4019 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004020
4021 /* manually convert vector registers if necessary */
4022 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004023 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004024 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4025 fprs, 128);
4026 } else {
4027 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004028 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004029 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004030 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004031 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004032 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004033 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004034 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004035 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004036 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004037 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004038 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004039 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004040 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004041 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004042 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004043 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004044 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004045 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004046 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004047 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004048 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004049 &vcpu->arch.sie_block->gcr, 128);
4050 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004051}
4052
Thomas Huthe8798922013-11-06 15:46:33 +01004053int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4054{
4055 /*
4056 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004057 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004058 * it into the save area
4059 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004060 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004061 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004062 save_access_regs(vcpu->run->s.regs.acrs);
4063
4064 return kvm_s390_store_status_unloaded(vcpu, addr);
4065}
4066
David Hildenbrand8ad35752014-03-14 11:00:21 +01004067static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4068{
4069 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004070 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004071}
4072
4073static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4074{
4075 unsigned int i;
4076 struct kvm_vcpu *vcpu;
4077
4078 kvm_for_each_vcpu(i, vcpu, kvm) {
4079 __disable_ibs_on_vcpu(vcpu);
4080 }
4081}
4082
4083static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4084{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004085 if (!sclp.has_ibs)
4086 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004087 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004088 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004089}
4090
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004091void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
4092{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004093 int i, online_vcpus, started_vcpus = 0;
4094
4095 if (!is_vcpu_stopped(vcpu))
4096 return;
4097
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004098 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004099 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004100 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004101 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4102
4103 for (i = 0; i < online_vcpus; i++) {
4104 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4105 started_vcpus++;
4106 }
4107
4108 if (started_vcpus == 0) {
4109 /* we're the only active VCPU -> speed it up */
4110 __enable_ibs_on_vcpu(vcpu);
4111 } else if (started_vcpus == 1) {
4112 /*
4113 * As we are starting a second VCPU, we have to disable
4114 * the IBS facility on all VCPUs to remove potentially
4115 * oustanding ENABLE requests.
4116 */
4117 __disable_ibs_on_all_vcpus(vcpu->kvm);
4118 }
4119
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004120 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004121 /*
4122 * Another VCPU might have used IBS while we were offline.
4123 * Let's play safe and flush the VCPU at startup.
4124 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004125 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004126 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004127 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004128}
4129
4130void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
4131{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004132 int i, online_vcpus, started_vcpus = 0;
4133 struct kvm_vcpu *started_vcpu = NULL;
4134
4135 if (is_vcpu_stopped(vcpu))
4136 return;
4137
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004138 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004139 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004140 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004141 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4142
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004143 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004144 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004145
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004146 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004147 __disable_ibs_on_vcpu(vcpu);
4148
4149 for (i = 0; i < online_vcpus; i++) {
4150 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4151 started_vcpus++;
4152 started_vcpu = vcpu->kvm->vcpus[i];
4153 }
4154 }
4155
4156 if (started_vcpus == 1) {
4157 /*
4158 * As we only have one VCPU left, we want to enable the
4159 * IBS facility for that VCPU to speed it up.
4160 */
4161 __enable_ibs_on_vcpu(started_vcpu);
4162 }
4163
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004164 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004165 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004166}
4167
Cornelia Huckd6712df2012-12-20 15:32:11 +01004168static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4169 struct kvm_enable_cap *cap)
4170{
4171 int r;
4172
4173 if (cap->flags)
4174 return -EINVAL;
4175
4176 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004177 case KVM_CAP_S390_CSS_SUPPORT:
4178 if (!vcpu->kvm->arch.css_support) {
4179 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004180 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004181 trace_kvm_s390_enable_css(vcpu->kvm);
4182 }
4183 r = 0;
4184 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004185 default:
4186 r = -EINVAL;
4187 break;
4188 }
4189 return r;
4190}
4191
Thomas Huth41408c282015-02-06 15:01:21 +01004192static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4193 struct kvm_s390_mem_op *mop)
4194{
4195 void __user *uaddr = (void __user *)mop->buf;
4196 void *tmpbuf = NULL;
4197 int r, srcu_idx;
4198 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4199 | KVM_S390_MEMOP_F_CHECK_ONLY;
4200
4201 if (mop->flags & ~supported_flags)
4202 return -EINVAL;
4203
4204 if (mop->size > MEM_OP_MAX_SIZE)
4205 return -E2BIG;
4206
4207 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4208 tmpbuf = vmalloc(mop->size);
4209 if (!tmpbuf)
4210 return -ENOMEM;
4211 }
4212
4213 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4214
4215 switch (mop->op) {
4216 case KVM_S390_MEMOP_LOGICAL_READ:
4217 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004218 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4219 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004220 break;
4221 }
4222 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4223 if (r == 0) {
4224 if (copy_to_user(uaddr, tmpbuf, mop->size))
4225 r = -EFAULT;
4226 }
4227 break;
4228 case KVM_S390_MEMOP_LOGICAL_WRITE:
4229 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004230 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4231 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004232 break;
4233 }
4234 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4235 r = -EFAULT;
4236 break;
4237 }
4238 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4239 break;
4240 default:
4241 r = -EINVAL;
4242 }
4243
4244 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4245
4246 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4247 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4248
4249 vfree(tmpbuf);
4250 return r;
4251}
4252
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004253long kvm_arch_vcpu_async_ioctl(struct file *filp,
4254 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004255{
4256 struct kvm_vcpu *vcpu = filp->private_data;
4257 void __user *argp = (void __user *)arg;
4258
Avi Kivity93736622010-05-13 12:35:17 +03004259 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004260 case KVM_S390_IRQ: {
4261 struct kvm_s390_irq s390irq;
4262
Jens Freimann47b43c52014-11-11 20:57:06 +01004263 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004264 return -EFAULT;
4265 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004266 }
Avi Kivity93736622010-05-13 12:35:17 +03004267 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004268 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02004269 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01004270
4271 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004272 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004273 if (s390int_to_s390irq(&s390int, &s390irq))
4274 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004275 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004276 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004277 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004278 return -ENOIOCTLCMD;
4279}
4280
4281long kvm_arch_vcpu_ioctl(struct file *filp,
4282 unsigned int ioctl, unsigned long arg)
4283{
4284 struct kvm_vcpu *vcpu = filp->private_data;
4285 void __user *argp = (void __user *)arg;
4286 int idx;
4287 long r;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004288
4289 vcpu_load(vcpu);
4290
4291 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004292 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004293 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004294 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004295 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004296 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004297 case KVM_S390_SET_INITIAL_PSW: {
4298 psw_t psw;
4299
Avi Kivitybc923cc2010-05-13 12:21:46 +03004300 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004301 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004302 break;
4303 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4304 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004305 }
4306 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03004307 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4308 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004309 case KVM_SET_ONE_REG:
4310 case KVM_GET_ONE_REG: {
4311 struct kvm_one_reg reg;
4312 r = -EFAULT;
4313 if (copy_from_user(&reg, argp, sizeof(reg)))
4314 break;
4315 if (ioctl == KVM_SET_ONE_REG)
4316 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4317 else
4318 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4319 break;
4320 }
Carsten Otte27e03932012-01-04 10:25:21 +01004321#ifdef CONFIG_KVM_S390_UCONTROL
4322 case KVM_S390_UCAS_MAP: {
4323 struct kvm_s390_ucas_mapping ucasmap;
4324
4325 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4326 r = -EFAULT;
4327 break;
4328 }
4329
4330 if (!kvm_is_ucontrol(vcpu->kvm)) {
4331 r = -EINVAL;
4332 break;
4333 }
4334
4335 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4336 ucasmap.vcpu_addr, ucasmap.length);
4337 break;
4338 }
4339 case KVM_S390_UCAS_UNMAP: {
4340 struct kvm_s390_ucas_mapping ucasmap;
4341
4342 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4343 r = -EFAULT;
4344 break;
4345 }
4346
4347 if (!kvm_is_ucontrol(vcpu->kvm)) {
4348 r = -EINVAL;
4349 break;
4350 }
4351
4352 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4353 ucasmap.length);
4354 break;
4355 }
4356#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004357 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004358 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004359 break;
4360 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004361 case KVM_ENABLE_CAP:
4362 {
4363 struct kvm_enable_cap cap;
4364 r = -EFAULT;
4365 if (copy_from_user(&cap, argp, sizeof(cap)))
4366 break;
4367 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4368 break;
4369 }
Thomas Huth41408c282015-02-06 15:01:21 +01004370 case KVM_S390_MEM_OP: {
4371 struct kvm_s390_mem_op mem_op;
4372
4373 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4374 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4375 else
4376 r = -EFAULT;
4377 break;
4378 }
Jens Freimann816c7662014-11-24 17:13:46 +01004379 case KVM_S390_SET_IRQ_STATE: {
4380 struct kvm_s390_irq_state irq_state;
4381
4382 r = -EFAULT;
4383 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4384 break;
4385 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4386 irq_state.len == 0 ||
4387 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4388 r = -EINVAL;
4389 break;
4390 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004391 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004392 r = kvm_s390_set_irq_state(vcpu,
4393 (void __user *) irq_state.buf,
4394 irq_state.len);
4395 break;
4396 }
4397 case KVM_S390_GET_IRQ_STATE: {
4398 struct kvm_s390_irq_state irq_state;
4399
4400 r = -EFAULT;
4401 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4402 break;
4403 if (irq_state.len == 0) {
4404 r = -EINVAL;
4405 break;
4406 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004407 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004408 r = kvm_s390_get_irq_state(vcpu,
4409 (__u8 __user *) irq_state.buf,
4410 irq_state.len);
4411 break;
4412 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004413 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004414 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004415 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004416
4417 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004418 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004419}
4420
Souptick Joarder1499fa82018-04-19 00:49:58 +05304421vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004422{
4423#ifdef CONFIG_KVM_S390_UCONTROL
4424 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4425 && (kvm_is_ucontrol(vcpu->kvm))) {
4426 vmf->page = virt_to_page(vcpu->arch.sie_block);
4427 get_page(vmf->page);
4428 return 0;
4429 }
4430#endif
4431 return VM_FAULT_SIGBUS;
4432}
4433
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05304434int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4435 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09004436{
4437 return 0;
4438}
4439
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004440/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004441int kvm_arch_prepare_memory_region(struct kvm *kvm,
4442 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004443 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004444 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004445{
Nick Wangdd2887e2013-03-25 17:22:57 +01004446 /* A few sanity checks. We can have memory slots which have to be
4447 located/ended at a segment boundary (1MB). The memory in userland is
4448 ok to be fragmented into various different vmas. It is okay to mmap()
4449 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004450
Carsten Otte598841c2011-07-24 10:48:21 +02004451 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004452 return -EINVAL;
4453
Carsten Otte598841c2011-07-24 10:48:21 +02004454 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004455 return -EINVAL;
4456
Dominik Dingela3a92c32014-12-01 17:24:42 +01004457 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4458 return -EINVAL;
4459
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004460 return 0;
4461}
4462
4463void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004464 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004465 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004466 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004467 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004468{
Carsten Ottef7850c92011-07-24 10:48:23 +02004469 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004470
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01004471 /* If the basics of the memslot do not change, we do not want
4472 * to update the gmap. Every update causes several unnecessary
4473 * segment translation exceptions. This is usually handled just
4474 * fine by the normal fault handler + gmap, but it will also
4475 * cause faults on the prefix page of running guest CPUs.
4476 */
4477 if (old->userspace_addr == mem->userspace_addr &&
4478 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
4479 old->npages * PAGE_SIZE == mem->memory_size)
4480 return;
Carsten Otte598841c2011-07-24 10:48:21 +02004481
4482 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4483 mem->guest_phys_addr, mem->memory_size);
4484 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004485 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02004486 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004487}
4488
Alexander Yarygin60a37702016-04-01 15:38:57 +03004489static inline unsigned long nonhyp_mask(int i)
4490{
4491 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4492
4493 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4494}
4495
Christian Borntraeger3491caf2016-05-13 12:16:35 +02004496void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4497{
4498 vcpu->valid_wakeup = false;
4499}
4500
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004501static int __init kvm_s390_init(void)
4502{
Alexander Yarygin60a37702016-04-01 15:38:57 +03004503 int i;
4504
David Hildenbrand07197fd2015-01-30 16:01:38 +01004505 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004506 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01004507 return -ENODEV;
4508 }
4509
Janosch Franka4499382018-07-13 11:28:31 +01004510 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004511 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01004512 return -EINVAL;
4513 }
4514
Alexander Yarygin60a37702016-04-01 15:38:57 +03004515 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00004516 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03004517 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4518
Michael Mueller9d8d5782015-02-02 15:42:51 +01004519 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004520}
4521
4522static void __exit kvm_s390_exit(void)
4523{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004524 kvm_exit();
4525}
4526
4527module_init(kvm_s390_init);
4528module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02004529
4530/*
4531 * Enable autoloading of the kvm module.
4532 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4533 * since x86 takes a different approach.
4534 */
4535#include <linux/miscdevice.h>
4536MODULE_ALIAS_MISCDEV(KVM_MINOR);
4537MODULE_ALIAS("devname:kvm");