blob: 03ddbe5e62bc3c32088d377754f9753dc2c9b183 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Christian Borntraegera37cb072018-01-23 13:28:40 +01005 * Copyright IBM Corp. 2008, 2018
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010047#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010048#include "gaccess.h"
49
Cornelia Huck5786fff2012-07-23 17:20:29 +020050#define CREATE_TRACE_POINTS
51#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020052#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020053
Thomas Huth41408c282015-02-06 15:01:21 +010054#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010055#define LOCAL_IRQS 32
56#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010058
Heiko Carstensb0c632d2008-03-25 18:47:20 +010059#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Christian Borntraegerccc40c52018-03-08 12:48:05 +000060#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061
62struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020064 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010065 { "exit_validity", VCPU_STAT(exit_validity) },
66 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 { "exit_external_request", VCPU_STAT(exit_external_request) },
Christian Borntraegera5e0acea92018-02-23 07:57:33 +000068 { "exit_io_request", VCPU_STAT(exit_io_request) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010069 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030071 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020074 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010075 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020076 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020077 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020078 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020079 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010080 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010081 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
82 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000083 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
84 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010085 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020086 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010087 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000088 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010089 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
90 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
91 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000092 { "deliver_program", VCPU_STAT(deliver_program) },
93 { "deliver_io", VCPU_STAT(deliver_io) },
QingFeng Hao32de0742018-03-02 11:56:47 +010094 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010095 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000096 { "inject_ckc", VCPU_STAT(inject_ckc) },
97 { "inject_cputm", VCPU_STAT(inject_cputm) },
98 { "inject_external_call", VCPU_STAT(inject_external_call) },
99 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
100 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
101 { "inject_io", VM_STAT(inject_io) },
102 { "inject_mchk", VCPU_STAT(inject_mchk) },
103 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
104 { "inject_program", VCPU_STAT(inject_program) },
105 { "inject_restart", VCPU_STAT(inject_restart) },
106 { "inject_service_signal", VM_STAT(inject_service_signal) },
107 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
108 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
109 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
110 { "inject_virtio", VM_STAT(inject_virtio) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100111 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
112 { "instruction_gs", VCPU_STAT(instruction_gs) },
113 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
114 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
115 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200116 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100117 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100118 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100119 { "instruction_sck", VCPU_STAT(instruction_sck) },
120 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100121 { "instruction_spx", VCPU_STAT(instruction_spx) },
122 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
123 { "instruction_stap", VCPU_STAT(instruction_stap) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100124 { "instruction_iske", VCPU_STAT(instruction_iske) },
125 { "instruction_ri", VCPU_STAT(instruction_ri) },
126 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
127 { "instruction_sske", VCPU_STAT(instruction_sske) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100128 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200129 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100130 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
131 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100132 { "instruction_tb", VCPU_STAT(instruction_tb) },
133 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200134 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100135 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200136 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200137 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100138 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100139 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200140 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100141 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200142 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
143 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100144 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200145 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
146 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500147 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100148 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
149 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
150 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200151 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
152 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
153 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100154 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
155 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
156 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
157 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
158 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
159 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100160 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100161 { NULL }
162};
163
Collin L. Walling8fa16962016-07-26 15:29:44 -0400164struct kvm_s390_tod_clock_ext {
165 __u8 epoch_idx;
166 __u64 tod;
167 __u8 reserved[7];
168} __packed;
169
David Hildenbranda411edf2016-02-02 15:41:22 +0100170/* allow nested virtualization in KVM (if enabled by user space) */
171static int nested;
172module_param(nested, int, S_IRUGO);
173MODULE_PARM_DESC(nested, "Nested virtualization support");
174
Janosch Franka4499382018-07-13 11:28:31 +0100175/* allow 1m huge page guest backing, if !nested */
176static int hpage;
177module_param(hpage, int, 0444);
178MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100179
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000180/*
181 * For now we handle at most 16 double words as this is what the s390 base
182 * kernel handles and stores in the prefix page. If we ever need to go beyond
183 * this, this requires changes to code, but the external uapi can stay.
184 */
185#define SIZE_INTERNAL 16
186
187/*
188 * Base feature mask that defines default mask for facilities. Consists of the
189 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
190 */
191static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
192/*
193 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
194 * and defines the facilities that can be enabled via a cpu model.
195 */
196static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
197
198static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200199{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000200 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
201 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
202 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
203 sizeof(S390_lowcore.stfle_fac_list));
204
205 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200206}
207
David Hildenbrand15c97052015-03-19 17:36:43 +0100208/* available cpu features supported by kvm */
209static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200210/* available subfunctions indicated via query / "test bit" */
211static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100212
Michael Mueller9d8d5782015-02-02 15:42:51 +0100213static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200214static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200215debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100216
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100217/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200218int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100219{
220 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200221 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100222}
223
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100224static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
225 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200226
David Hildenbrand15757672018-02-07 12:46:45 +0100227static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
228{
229 u8 delta_idx = 0;
230
231 /*
232 * The TOD jumps by delta, we have to compensate this by adding
233 * -delta to the epoch.
234 */
235 delta = -delta;
236
237 /* sign-extension - we're adding to signed values below */
238 if ((s64)delta < 0)
239 delta_idx = -1;
240
241 scb->epoch += delta;
242 if (scb->ecd & ECD_MEF) {
243 scb->epdx += delta_idx;
244 if (scb->epoch < delta)
245 scb->epdx += 1;
246 }
247}
248
Fan Zhangfdf03652015-05-13 10:58:41 +0200249/*
250 * This callback is executed during stop_machine(). All CPUs are therefore
251 * temporarily stopped. In order not to change guest behavior, we have to
252 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
253 * so a CPU won't be stopped while calculating with the epoch.
254 */
255static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
256 void *v)
257{
258 struct kvm *kvm;
259 struct kvm_vcpu *vcpu;
260 int i;
261 unsigned long long *delta = v;
262
263 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200264 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100265 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
266 if (i == 0) {
267 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
268 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
269 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100270 if (vcpu->arch.cputm_enabled)
271 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100272 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100273 kvm_clock_sync_scb(vcpu->arch.vsie_block,
274 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200275 }
276 }
277 return NOTIFY_OK;
278}
279
280static struct notifier_block kvm_clock_notifier = {
281 .notifier_call = kvm_clock_sync,
282};
283
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100284int kvm_arch_hardware_setup(void)
285{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200286 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100287 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200288 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
289 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200290 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
291 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100292 return 0;
293}
294
295void kvm_arch_hardware_unsetup(void)
296{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100297 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200298 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200299 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
300 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100301}
302
David Hildenbrand22be5a132016-01-21 13:22:54 +0100303static void allow_cpu_feat(unsigned long nr)
304{
305 set_bit_inv(nr, kvm_s390_available_cpu_feat);
306}
307
David Hildenbrand0a763c72016-05-18 16:03:47 +0200308static inline int plo_test_bit(unsigned char nr)
309{
310 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100311 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200312
313 asm volatile(
314 /* Parameter registers are ignored for "test bit" */
315 " plo 0,0,0,0(0)\n"
316 " ipm %0\n"
317 " srl %0,28\n"
318 : "=d" (cc)
319 : "d" (r0)
320 : "cc");
321 return cc == 0;
322}
323
David Hildenbrand22be5a132016-01-21 13:22:54 +0100324static void kvm_s390_cpu_feat_init(void)
325{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200326 int i;
327
328 for (i = 0; i < 256; ++i) {
329 if (plo_test_bit(i))
330 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
331 }
332
333 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400334 ptff(kvm_s390_available_subfunc.ptff,
335 sizeof(kvm_s390_available_subfunc.ptff),
336 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200337
338 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200339 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
340 kvm_s390_available_subfunc.kmac);
341 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
342 kvm_s390_available_subfunc.kmc);
343 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
344 kvm_s390_available_subfunc.km);
345 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
346 kvm_s390_available_subfunc.kimd);
347 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
348 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200349 }
350 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200351 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
352 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200353 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200354 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
355 kvm_s390_available_subfunc.kmctr);
356 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
357 kvm_s390_available_subfunc.kmf);
358 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
359 kvm_s390_available_subfunc.kmo);
360 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
361 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200362 }
363 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100364 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200365 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200366
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400367 if (test_facility(146)) /* MSA8 */
368 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
369 kvm_s390_available_subfunc.kma);
370
David Hildenbrand22be5a132016-01-21 13:22:54 +0100371 if (MACHINE_HAS_ESOP)
372 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200373 /*
374 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
375 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
376 */
377 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100378 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200379 return;
380 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100381 if (sclp.has_64bscao)
382 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100383 if (sclp.has_siif)
384 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100385 if (sclp.has_gpere)
386 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100387 if (sclp.has_gsls)
388 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100389 if (sclp.has_ib)
390 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100391 if (sclp.has_cei)
392 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100393 if (sclp.has_ibs)
394 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500395 if (sclp.has_kss)
396 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200397 /*
398 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
399 * all skey handling functions read/set the skey from the PGSTE
400 * instead of the real storage key.
401 *
402 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
403 * pages being detected as preserved although they are resident.
404 *
405 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
406 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
407 *
408 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
409 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
410 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
411 *
412 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
413 * cannot easily shadow the SCA because of the ipte lock.
414 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100415}
416
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100417int kvm_arch_init(void *opaque)
418{
Michael Mueller308c3e62018-11-30 15:32:06 +0100419 int rc;
420
Christian Borntraeger78f26132015-07-22 15:50:58 +0200421 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
422 if (!kvm_s390_dbf)
423 return -ENOMEM;
424
425 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
Michael Mueller308c3e62018-11-30 15:32:06 +0100426 rc = -ENOMEM;
427 goto out_debug_unreg;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200428 }
429
David Hildenbrand22be5a132016-01-21 13:22:54 +0100430 kvm_s390_cpu_feat_init();
431
Cornelia Huck84877d92014-09-02 10:27:35 +0100432 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100433 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
434 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100435 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Michael Mueller308c3e62018-11-30 15:32:06 +0100436 goto out_debug_unreg;
437 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100438
439 rc = kvm_s390_gib_init(GAL_ISC);
440 if (rc)
441 goto out_gib_destroy;
442
Michael Mueller308c3e62018-11-30 15:32:06 +0100443 return 0;
444
Michael Muellerb1d1e762019-01-31 09:52:45 +0100445out_gib_destroy:
446 kvm_s390_gib_destroy();
Michael Mueller308c3e62018-11-30 15:32:06 +0100447out_debug_unreg:
448 debug_unregister(kvm_s390_dbf);
449 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100450}
451
Christian Borntraeger78f26132015-07-22 15:50:58 +0200452void kvm_arch_exit(void)
453{
Michael Mueller1282c212019-01-31 09:52:40 +0100454 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200455 debug_unregister(kvm_s390_dbf);
456}
457
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100458/* Section: device related */
459long kvm_arch_dev_ioctl(struct file *filp,
460 unsigned int ioctl, unsigned long arg)
461{
462 if (ioctl == KVM_S390_ENABLE_SIE)
463 return s390_enable_sie();
464 return -EINVAL;
465}
466
Alexander Graf784aa3d2014-07-14 18:27:35 +0200467int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100468{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100469 int r;
470
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200471 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100472 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200473 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100474 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100475#ifdef CONFIG_KVM_S390_UCONTROL
476 case KVM_CAP_S390_UCONTROL:
477#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200478 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100479 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200480 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100481 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100482 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100483 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200484 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200485 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200486 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200487 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100488 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100489 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200490 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100491 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400492 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100493 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200494 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200495 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100496 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100497 case KVM_CAP_S390_AIS_MIGRATION:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100498 r = 1;
499 break;
Janosch Franka4499382018-07-13 11:28:31 +0100500 case KVM_CAP_S390_HPAGE_1M:
501 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100502 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100503 r = 1;
504 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100505 case KVM_CAP_S390_MEM_OP:
506 r = MEM_OP_MAX_SIZE;
507 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200508 case KVM_CAP_NR_VCPUS:
509 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100510 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200511 if (!kvm_s390_use_sca_entries())
512 r = KVM_MAX_VCPUS;
513 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100514 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200515 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100516 case KVM_CAP_NR_MEMSLOTS:
517 r = KVM_USER_MEM_SLOTS;
518 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200519 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100520 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200521 break;
Eric Farman68c55752014-06-09 10:57:26 -0400522 case KVM_CAP_S390_VECTOR_REGISTERS:
523 r = MACHINE_HAS_VX;
524 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800525 case KVM_CAP_S390_RI:
526 r = test_facility(64);
527 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100528 case KVM_CAP_S390_GS:
529 r = test_facility(133);
530 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100531 case KVM_CAP_S390_BPB:
532 r = test_facility(82);
533 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200534 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100535 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200536 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100537 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100538}
539
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400540static void kvm_s390_sync_dirty_log(struct kvm *kvm,
Janosch Frank0959e162018-07-17 13:21:22 +0100541 struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400542{
Janosch Frank0959e162018-07-17 13:21:22 +0100543 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400544 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100545 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400546 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100547 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400548
Janosch Frank0959e162018-07-17 13:21:22 +0100549 /* Loop over all guest segments */
550 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400551 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100552 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
553 gaddr = gfn_to_gpa(cur_gfn);
554 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
555 if (kvm_is_error_hva(vmaddr))
556 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400557
Janosch Frank0959e162018-07-17 13:21:22 +0100558 bitmap_zero(bitmap, _PAGE_ENTRIES);
559 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
560 for (i = 0; i < _PAGE_ENTRIES; i++) {
561 if (test_bit(i, bitmap))
562 mark_page_dirty(kvm, cur_gfn + i);
563 }
564
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100565 if (fatal_signal_pending(current))
566 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100567 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400568 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400569}
570
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100571/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200572static void sca_del_vcpu(struct kvm_vcpu *vcpu);
573
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100574/*
575 * Get (and clear) the dirty memory log for a memory slot.
576 */
577int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
578 struct kvm_dirty_log *log)
579{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400580 int r;
581 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200582 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400583 struct kvm_memory_slot *memslot;
584 int is_dirty = 0;
585
Janosch Franke1e8a962017-02-02 16:39:31 +0100586 if (kvm_is_ucontrol(kvm))
587 return -EINVAL;
588
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400589 mutex_lock(&kvm->slots_lock);
590
591 r = -EINVAL;
592 if (log->slot >= KVM_USER_MEM_SLOTS)
593 goto out;
594
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200595 slots = kvm_memslots(kvm);
596 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400597 r = -ENOENT;
598 if (!memslot->dirty_bitmap)
599 goto out;
600
601 kvm_s390_sync_dirty_log(kvm, memslot);
602 r = kvm_get_dirty_log(kvm, log, &is_dirty);
603 if (r)
604 goto out;
605
606 /* Clear the dirty log */
607 if (is_dirty) {
608 n = kvm_dirty_bitmap_bytes(memslot);
609 memset(memslot->dirty_bitmap, 0, n);
610 }
611 r = 0;
612out:
613 mutex_unlock(&kvm->slots_lock);
614 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100615}
616
David Hildenbrand6502a342016-06-21 14:19:51 +0200617static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
618{
619 unsigned int i;
620 struct kvm_vcpu *vcpu;
621
622 kvm_for_each_vcpu(i, vcpu, kvm) {
623 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
624 }
625}
626
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100627int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200628{
629 int r;
630
631 if (cap->flags)
632 return -EINVAL;
633
634 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200635 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200636 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200637 kvm->arch.use_irqchip = 1;
638 r = 0;
639 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200640 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200641 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200642 kvm->arch.user_sigp = 1;
643 r = 0;
644 break;
Eric Farman68c55752014-06-09 10:57:26 -0400645 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100646 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200647 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100648 r = -EBUSY;
649 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100650 set_kvm_facility(kvm->arch.model.fac_mask, 129);
651 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200652 if (test_facility(134)) {
653 set_kvm_facility(kvm->arch.model.fac_mask, 134);
654 set_kvm_facility(kvm->arch.model.fac_list, 134);
655 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100656 if (test_facility(135)) {
657 set_kvm_facility(kvm->arch.model.fac_mask, 135);
658 set_kvm_facility(kvm->arch.model.fac_list, 135);
659 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100660 if (test_facility(148)) {
661 set_kvm_facility(kvm->arch.model.fac_mask, 148);
662 set_kvm_facility(kvm->arch.model.fac_list, 148);
663 }
Michael Mueller18280d82015-03-16 16:05:41 +0100664 r = 0;
665 } else
666 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100667 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200668 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
669 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400670 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800671 case KVM_CAP_S390_RI:
672 r = -EINVAL;
673 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200674 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800675 r = -EBUSY;
676 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100677 set_kvm_facility(kvm->arch.model.fac_mask, 64);
678 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800679 r = 0;
680 }
681 mutex_unlock(&kvm->lock);
682 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
683 r ? "(not available)" : "(success)");
684 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100685 case KVM_CAP_S390_AIS:
686 mutex_lock(&kvm->lock);
687 if (kvm->created_vcpus) {
688 r = -EBUSY;
689 } else {
690 set_kvm_facility(kvm->arch.model.fac_mask, 72);
691 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100692 r = 0;
693 }
694 mutex_unlock(&kvm->lock);
695 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
696 r ? "(not available)" : "(success)");
697 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100698 case KVM_CAP_S390_GS:
699 r = -EINVAL;
700 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100701 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100702 r = -EBUSY;
703 } else if (test_facility(133)) {
704 set_kvm_facility(kvm->arch.model.fac_mask, 133);
705 set_kvm_facility(kvm->arch.model.fac_list, 133);
706 r = 0;
707 }
708 mutex_unlock(&kvm->lock);
709 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
710 r ? "(not available)" : "(success)");
711 break;
Janosch Franka4499382018-07-13 11:28:31 +0100712 case KVM_CAP_S390_HPAGE_1M:
713 mutex_lock(&kvm->lock);
714 if (kvm->created_vcpus)
715 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100716 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100717 r = -EINVAL;
718 else {
719 r = 0;
Janosch Frankdf88f312018-08-30 16:14:18 +0200720 down_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100721 kvm->mm->context.allow_gmap_hpage_1m = 1;
Janosch Frankdf88f312018-08-30 16:14:18 +0200722 up_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100723 /*
724 * We might have to create fake 4k page
725 * tables. To avoid that the hardware works on
726 * stale PGSTEs, we emulate these instructions.
727 */
728 kvm->arch.use_skf = 0;
729 kvm->arch.use_pfmfi = 0;
730 }
731 mutex_unlock(&kvm->lock);
732 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
733 r ? "(not available)" : "(success)");
734 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100735 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200736 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100737 kvm->arch.user_stsi = 1;
738 r = 0;
739 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200740 case KVM_CAP_S390_USER_INSTR0:
741 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
742 kvm->arch.user_instr0 = 1;
743 icpt_operexc_on_all_vcpus(kvm);
744 r = 0;
745 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200746 default:
747 r = -EINVAL;
748 break;
749 }
750 return r;
751}
752
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100753static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
754{
755 int ret;
756
757 switch (attr->attr) {
758 case KVM_S390_VM_MEM_LIMIT_SIZE:
759 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200760 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100761 kvm->arch.mem_limit);
762 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100763 ret = -EFAULT;
764 break;
765 default:
766 ret = -ENXIO;
767 break;
768 }
769 return ret;
770}
771
772static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200773{
774 int ret;
775 unsigned int idx;
776 switch (attr->attr) {
777 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100778 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100779 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200780 break;
781
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200782 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200783 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100784 if (kvm->created_vcpus)
785 ret = -EBUSY;
786 else if (kvm->mm->context.allow_gmap_hpage_1m)
787 ret = -EINVAL;
788 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200789 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100790 /* Not compatible with cmma. */
791 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200792 ret = 0;
793 }
794 mutex_unlock(&kvm->lock);
795 break;
796 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100797 ret = -ENXIO;
798 if (!sclp.has_cmma)
799 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200800 ret = -EINVAL;
801 if (!kvm->arch.use_cmma)
802 break;
803
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200804 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200805 mutex_lock(&kvm->lock);
806 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200807 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200808 srcu_read_unlock(&kvm->srcu, idx);
809 mutex_unlock(&kvm->lock);
810 ret = 0;
811 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100812 case KVM_S390_VM_MEM_LIMIT_SIZE: {
813 unsigned long new_limit;
814
815 if (kvm_is_ucontrol(kvm))
816 return -EINVAL;
817
818 if (get_user(new_limit, (u64 __user *)attr->addr))
819 return -EFAULT;
820
Dominik Dingela3a92c32014-12-01 17:24:42 +0100821 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
822 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100823 return -E2BIG;
824
Dominik Dingela3a92c32014-12-01 17:24:42 +0100825 if (!new_limit)
826 return -EINVAL;
827
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100828 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100829 if (new_limit != KVM_S390_NO_MEM_LIMIT)
830 new_limit -= 1;
831
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100832 ret = -EBUSY;
833 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200834 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100835 /* gmap_create will round the limit up */
836 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100837
838 if (!new) {
839 ret = -ENOMEM;
840 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100841 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100842 new->private = kvm;
843 kvm->arch.gmap = new;
844 ret = 0;
845 }
846 }
847 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100848 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
849 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
850 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100851 break;
852 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200853 default:
854 ret = -ENXIO;
855 break;
856 }
857 return ret;
858}
859
Tony Krowiaka374e892014-09-03 10:13:53 +0200860static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
861
Tony Krowiak20c922f2018-04-22 11:37:03 -0400862void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200863{
864 struct kvm_vcpu *vcpu;
865 int i;
866
Tony Krowiak20c922f2018-04-22 11:37:03 -0400867 kvm_s390_vcpu_block_all(kvm);
868
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400869 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400870 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400871 /* recreate the shadow crycb by leaving the VSIE handler */
872 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
873 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400874
875 kvm_s390_vcpu_unblock_all(kvm);
876}
877
878static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
879{
Tony Krowiaka374e892014-09-03 10:13:53 +0200880 mutex_lock(&kvm->lock);
881 switch (attr->attr) {
882 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200883 if (!test_kvm_facility(kvm, 76)) {
884 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400885 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200886 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200887 get_random_bytes(
888 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
889 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
890 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200891 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200892 break;
893 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200894 if (!test_kvm_facility(kvm, 76)) {
895 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400896 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200897 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200898 get_random_bytes(
899 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
900 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
901 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200902 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200903 break;
904 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200905 if (!test_kvm_facility(kvm, 76)) {
906 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400907 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200908 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200909 kvm->arch.crypto.aes_kw = 0;
910 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
911 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200912 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200913 break;
914 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200915 if (!test_kvm_facility(kvm, 76)) {
916 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400917 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200918 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200919 kvm->arch.crypto.dea_kw = 0;
920 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
921 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200922 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200923 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400924 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
925 if (!ap_instructions_available()) {
926 mutex_unlock(&kvm->lock);
927 return -EOPNOTSUPP;
928 }
929 kvm->arch.crypto.apie = 1;
930 break;
931 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
932 if (!ap_instructions_available()) {
933 mutex_unlock(&kvm->lock);
934 return -EOPNOTSUPP;
935 }
936 kvm->arch.crypto.apie = 0;
937 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200938 default:
939 mutex_unlock(&kvm->lock);
940 return -ENXIO;
941 }
942
Tony Krowiak20c922f2018-04-22 11:37:03 -0400943 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200944 mutex_unlock(&kvm->lock);
945 return 0;
946}
947
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200948static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
949{
950 int cx;
951 struct kvm_vcpu *vcpu;
952
953 kvm_for_each_vcpu(cx, vcpu, kvm)
954 kvm_s390_sync_request(req, vcpu);
955}
956
957/*
958 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100959 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200960 */
961static int kvm_s390_vm_start_migration(struct kvm *kvm)
962{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200963 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200964 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200965 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200966 int slotnr;
967
968 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200969 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200970 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200971 slots = kvm_memslots(kvm);
972 if (!slots || !slots->used_slots)
973 return -EINVAL;
974
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200975 if (!kvm->arch.use_cmma) {
976 kvm->arch.migration_mode = 1;
977 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200978 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200979 /* mark all the pages in active slots as dirty */
980 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
981 ms = slots->memslots + slotnr;
982 /*
983 * The second half of the bitmap is only used on x86,
984 * and would be wasted otherwise, so we put it to good
985 * use here to keep track of the state of the storage
986 * attributes.
987 */
988 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
989 ram_pages += ms->npages;
990 }
991 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
992 kvm->arch.migration_mode = 1;
993 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200994 return 0;
995}
996
997/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100998 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200999 * kvm_s390_vm_start_migration.
1000 */
1001static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1002{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001003 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001004 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001005 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001006 kvm->arch.migration_mode = 0;
1007 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001008 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001009 return 0;
1010}
1011
1012static int kvm_s390_vm_set_migration(struct kvm *kvm,
1013 struct kvm_device_attr *attr)
1014{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001015 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001016
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001017 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001018 switch (attr->attr) {
1019 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001020 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001021 break;
1022 case KVM_S390_VM_MIGRATION_STOP:
1023 res = kvm_s390_vm_stop_migration(kvm);
1024 break;
1025 default:
1026 break;
1027 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001028 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001029
1030 return res;
1031}
1032
1033static int kvm_s390_vm_get_migration(struct kvm *kvm,
1034 struct kvm_device_attr *attr)
1035{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001036 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001037
1038 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1039 return -ENXIO;
1040
1041 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1042 return -EFAULT;
1043 return 0;
1044}
1045
Collin L. Walling8fa16962016-07-26 15:29:44 -04001046static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1047{
1048 struct kvm_s390_vm_tod_clock gtod;
1049
1050 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1051 return -EFAULT;
1052
David Hildenbrand0e7def52018-02-07 12:46:43 +01001053 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001054 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001055 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001056
1057 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1058 gtod.epoch_idx, gtod.tod);
1059
1060 return 0;
1061}
1062
Jason J. Herne72f25022014-11-25 09:46:02 -05001063static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1064{
1065 u8 gtod_high;
1066
1067 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1068 sizeof(gtod_high)))
1069 return -EFAULT;
1070
1071 if (gtod_high != 0)
1072 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001073 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001074
1075 return 0;
1076}
1077
1078static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1079{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001080 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001081
David Hildenbrand0e7def52018-02-07 12:46:43 +01001082 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1083 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001084 return -EFAULT;
1085
David Hildenbrand0e7def52018-02-07 12:46:43 +01001086 kvm_s390_set_tod_clock(kvm, &gtod);
1087 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001088 return 0;
1089}
1090
1091static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1092{
1093 int ret;
1094
1095 if (attr->flags)
1096 return -EINVAL;
1097
1098 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001099 case KVM_S390_VM_TOD_EXT:
1100 ret = kvm_s390_set_tod_ext(kvm, attr);
1101 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001102 case KVM_S390_VM_TOD_HIGH:
1103 ret = kvm_s390_set_tod_high(kvm, attr);
1104 break;
1105 case KVM_S390_VM_TOD_LOW:
1106 ret = kvm_s390_set_tod_low(kvm, attr);
1107 break;
1108 default:
1109 ret = -ENXIO;
1110 break;
1111 }
1112 return ret;
1113}
1114
David Hildenbrand33d1b272018-04-27 14:36:13 +02001115static void kvm_s390_get_tod_clock(struct kvm *kvm,
1116 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001117{
1118 struct kvm_s390_tod_clock_ext htod;
1119
1120 preempt_disable();
1121
1122 get_tod_clock_ext((char *)&htod);
1123
1124 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001125 gtod->epoch_idx = 0;
1126 if (test_kvm_facility(kvm, 139)) {
1127 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1128 if (gtod->tod < htod.tod)
1129 gtod->epoch_idx += 1;
1130 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001131
1132 preempt_enable();
1133}
1134
1135static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1136{
1137 struct kvm_s390_vm_tod_clock gtod;
1138
1139 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001140 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001141 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1142 return -EFAULT;
1143
1144 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1145 gtod.epoch_idx, gtod.tod);
1146 return 0;
1147}
1148
Jason J. Herne72f25022014-11-25 09:46:02 -05001149static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1150{
1151 u8 gtod_high = 0;
1152
1153 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1154 sizeof(gtod_high)))
1155 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001156 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001157
1158 return 0;
1159}
1160
1161static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1162{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001163 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001164
David Hildenbrand60417fc2015-09-29 16:20:36 +02001165 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001166 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1167 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001168 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001169
1170 return 0;
1171}
1172
1173static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1174{
1175 int ret;
1176
1177 if (attr->flags)
1178 return -EINVAL;
1179
1180 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001181 case KVM_S390_VM_TOD_EXT:
1182 ret = kvm_s390_get_tod_ext(kvm, attr);
1183 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001184 case KVM_S390_VM_TOD_HIGH:
1185 ret = kvm_s390_get_tod_high(kvm, attr);
1186 break;
1187 case KVM_S390_VM_TOD_LOW:
1188 ret = kvm_s390_get_tod_low(kvm, attr);
1189 break;
1190 default:
1191 ret = -ENXIO;
1192 break;
1193 }
1194 return ret;
1195}
1196
Michael Mueller658b6ed2015-02-02 15:49:35 +01001197static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1198{
1199 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001200 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001201 int ret = 0;
1202
1203 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001204 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001205 ret = -EBUSY;
1206 goto out;
1207 }
1208 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1209 if (!proc) {
1210 ret = -ENOMEM;
1211 goto out;
1212 }
1213 if (!copy_from_user(proc, (void __user *)attr->addr,
1214 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001215 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001216 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1217 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001218 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001219 if (proc->ibc > unblocked_ibc)
1220 kvm->arch.model.ibc = unblocked_ibc;
1221 else if (proc->ibc < lowest_ibc)
1222 kvm->arch.model.ibc = lowest_ibc;
1223 else
1224 kvm->arch.model.ibc = proc->ibc;
1225 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001226 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001227 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001228 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1229 kvm->arch.model.ibc,
1230 kvm->arch.model.cpuid);
1231 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1232 kvm->arch.model.fac_list[0],
1233 kvm->arch.model.fac_list[1],
1234 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001235 } else
1236 ret = -EFAULT;
1237 kfree(proc);
1238out:
1239 mutex_unlock(&kvm->lock);
1240 return ret;
1241}
1242
David Hildenbrand15c97052015-03-19 17:36:43 +01001243static int kvm_s390_set_processor_feat(struct kvm *kvm,
1244 struct kvm_device_attr *attr)
1245{
1246 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001247
1248 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1249 return -EFAULT;
1250 if (!bitmap_subset((unsigned long *) data.feat,
1251 kvm_s390_available_cpu_feat,
1252 KVM_S390_VM_CPU_FEAT_NR_BITS))
1253 return -EINVAL;
1254
1255 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001256 if (kvm->created_vcpus) {
1257 mutex_unlock(&kvm->lock);
1258 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001259 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001260 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1261 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001262 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001263 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1264 data.feat[0],
1265 data.feat[1],
1266 data.feat[2]);
1267 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001268}
1269
David Hildenbrand0a763c72016-05-18 16:03:47 +02001270static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1271 struct kvm_device_attr *attr)
1272{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001273 mutex_lock(&kvm->lock);
1274 if (kvm->created_vcpus) {
1275 mutex_unlock(&kvm->lock);
1276 return -EBUSY;
1277 }
1278
1279 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1280 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1281 mutex_unlock(&kvm->lock);
1282 return -EFAULT;
1283 }
1284 mutex_unlock(&kvm->lock);
1285
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001286 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1287 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1288 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1289 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1290 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1291 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1292 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1293 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1294 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1295 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1296 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1297 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1298 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1299 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1300 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1301 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1302 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1303 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1304 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1305 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1306 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1307 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1308 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1309 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1310 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1311 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1312 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1313 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1314 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1315 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1316 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1317 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1318 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1319 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1320 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1321 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1322 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1323 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1324 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1325 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1326 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1327 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1328 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1329 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1330
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001331 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001332}
1333
Michael Mueller658b6ed2015-02-02 15:49:35 +01001334static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1335{
1336 int ret = -ENXIO;
1337
1338 switch (attr->attr) {
1339 case KVM_S390_VM_CPU_PROCESSOR:
1340 ret = kvm_s390_set_processor(kvm, attr);
1341 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001342 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1343 ret = kvm_s390_set_processor_feat(kvm, attr);
1344 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001345 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1346 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1347 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001348 }
1349 return ret;
1350}
1351
1352static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1353{
1354 struct kvm_s390_vm_cpu_processor *proc;
1355 int ret = 0;
1356
1357 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1358 if (!proc) {
1359 ret = -ENOMEM;
1360 goto out;
1361 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001362 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001363 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001364 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1365 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001366 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1367 kvm->arch.model.ibc,
1368 kvm->arch.model.cpuid);
1369 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1370 kvm->arch.model.fac_list[0],
1371 kvm->arch.model.fac_list[1],
1372 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001373 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1374 ret = -EFAULT;
1375 kfree(proc);
1376out:
1377 return ret;
1378}
1379
1380static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1381{
1382 struct kvm_s390_vm_cpu_machine *mach;
1383 int ret = 0;
1384
1385 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1386 if (!mach) {
1387 ret = -ENOMEM;
1388 goto out;
1389 }
1390 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001391 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001392 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001393 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001394 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001395 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001396 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1397 kvm->arch.model.ibc,
1398 kvm->arch.model.cpuid);
1399 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1400 mach->fac_mask[0],
1401 mach->fac_mask[1],
1402 mach->fac_mask[2]);
1403 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1404 mach->fac_list[0],
1405 mach->fac_list[1],
1406 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001407 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1408 ret = -EFAULT;
1409 kfree(mach);
1410out:
1411 return ret;
1412}
1413
David Hildenbrand15c97052015-03-19 17:36:43 +01001414static int kvm_s390_get_processor_feat(struct kvm *kvm,
1415 struct kvm_device_attr *attr)
1416{
1417 struct kvm_s390_vm_cpu_feat data;
1418
1419 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1420 KVM_S390_VM_CPU_FEAT_NR_BITS);
1421 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1422 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001423 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1424 data.feat[0],
1425 data.feat[1],
1426 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001427 return 0;
1428}
1429
1430static int kvm_s390_get_machine_feat(struct kvm *kvm,
1431 struct kvm_device_attr *attr)
1432{
1433 struct kvm_s390_vm_cpu_feat data;
1434
1435 bitmap_copy((unsigned long *) data.feat,
1436 kvm_s390_available_cpu_feat,
1437 KVM_S390_VM_CPU_FEAT_NR_BITS);
1438 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1439 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001440 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1441 data.feat[0],
1442 data.feat[1],
1443 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001444 return 0;
1445}
1446
David Hildenbrand0a763c72016-05-18 16:03:47 +02001447static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1448 struct kvm_device_attr *attr)
1449{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001450 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1451 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1452 return -EFAULT;
1453
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001454 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1455 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1456 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1457 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1458 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1459 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1460 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1461 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1462 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1463 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1464 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1465 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1466 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1467 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1468 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1469 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1470 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1471 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1472 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1473 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1474 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1475 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1476 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1477 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1478 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1479 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1480 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1481 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1482 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1483 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1484 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1485 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1486 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1487 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1488 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1489 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1490 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1491 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1492 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1493 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1494 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1495 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1496 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1497 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1498
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001499 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001500}
1501
1502static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1503 struct kvm_device_attr *attr)
1504{
1505 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1506 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1507 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001508
1509 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1510 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1511 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1512 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1513 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1514 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1515 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1516 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1517 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1518 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1519 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1520 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1521 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1522 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1523 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1524 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1525 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1526 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1527 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1528 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1529 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1530 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1531 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1532 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1533 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1534 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1535 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1536 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1537 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1538 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1539 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1540 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1541 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1542 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1543 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1544 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1545 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1546 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1547 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1548 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1549 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1550 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1551 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1552 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
1553
David Hildenbrand0a763c72016-05-18 16:03:47 +02001554 return 0;
1555}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001556
Michael Mueller658b6ed2015-02-02 15:49:35 +01001557static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1558{
1559 int ret = -ENXIO;
1560
1561 switch (attr->attr) {
1562 case KVM_S390_VM_CPU_PROCESSOR:
1563 ret = kvm_s390_get_processor(kvm, attr);
1564 break;
1565 case KVM_S390_VM_CPU_MACHINE:
1566 ret = kvm_s390_get_machine(kvm, attr);
1567 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001568 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1569 ret = kvm_s390_get_processor_feat(kvm, attr);
1570 break;
1571 case KVM_S390_VM_CPU_MACHINE_FEAT:
1572 ret = kvm_s390_get_machine_feat(kvm, attr);
1573 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001574 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1575 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1576 break;
1577 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1578 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1579 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001580 }
1581 return ret;
1582}
1583
Dominik Dingelf2061652014-04-09 13:13:00 +02001584static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1585{
1586 int ret;
1587
1588 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001589 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001590 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001591 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001592 case KVM_S390_VM_TOD:
1593 ret = kvm_s390_set_tod(kvm, attr);
1594 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001595 case KVM_S390_VM_CPU_MODEL:
1596 ret = kvm_s390_set_cpu_model(kvm, attr);
1597 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001598 case KVM_S390_VM_CRYPTO:
1599 ret = kvm_s390_vm_set_crypto(kvm, attr);
1600 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001601 case KVM_S390_VM_MIGRATION:
1602 ret = kvm_s390_vm_set_migration(kvm, attr);
1603 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001604 default:
1605 ret = -ENXIO;
1606 break;
1607 }
1608
1609 return ret;
1610}
1611
1612static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1613{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001614 int ret;
1615
1616 switch (attr->group) {
1617 case KVM_S390_VM_MEM_CTRL:
1618 ret = kvm_s390_get_mem_control(kvm, attr);
1619 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001620 case KVM_S390_VM_TOD:
1621 ret = kvm_s390_get_tod(kvm, attr);
1622 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001623 case KVM_S390_VM_CPU_MODEL:
1624 ret = kvm_s390_get_cpu_model(kvm, attr);
1625 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001626 case KVM_S390_VM_MIGRATION:
1627 ret = kvm_s390_vm_get_migration(kvm, attr);
1628 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001629 default:
1630 ret = -ENXIO;
1631 break;
1632 }
1633
1634 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001635}
1636
1637static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1638{
1639 int ret;
1640
1641 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001642 case KVM_S390_VM_MEM_CTRL:
1643 switch (attr->attr) {
1644 case KVM_S390_VM_MEM_ENABLE_CMMA:
1645 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001646 ret = sclp.has_cmma ? 0 : -ENXIO;
1647 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001648 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001649 ret = 0;
1650 break;
1651 default:
1652 ret = -ENXIO;
1653 break;
1654 }
1655 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001656 case KVM_S390_VM_TOD:
1657 switch (attr->attr) {
1658 case KVM_S390_VM_TOD_LOW:
1659 case KVM_S390_VM_TOD_HIGH:
1660 ret = 0;
1661 break;
1662 default:
1663 ret = -ENXIO;
1664 break;
1665 }
1666 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001667 case KVM_S390_VM_CPU_MODEL:
1668 switch (attr->attr) {
1669 case KVM_S390_VM_CPU_PROCESSOR:
1670 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001671 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1672 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001673 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001674 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001675 ret = 0;
1676 break;
1677 default:
1678 ret = -ENXIO;
1679 break;
1680 }
1681 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001682 case KVM_S390_VM_CRYPTO:
1683 switch (attr->attr) {
1684 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1685 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1686 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1687 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1688 ret = 0;
1689 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001690 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1691 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1692 ret = ap_instructions_available() ? 0 : -ENXIO;
1693 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001694 default:
1695 ret = -ENXIO;
1696 break;
1697 }
1698 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001699 case KVM_S390_VM_MIGRATION:
1700 ret = 0;
1701 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001702 default:
1703 ret = -ENXIO;
1704 break;
1705 }
1706
1707 return ret;
1708}
1709
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001710static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1711{
1712 uint8_t *keys;
1713 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001714 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001715
1716 if (args->flags != 0)
1717 return -EINVAL;
1718
1719 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001720 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001721 return KVM_S390_GET_SKEYS_NONE;
1722
1723 /* Enforce sane limit on memory allocation */
1724 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1725 return -EINVAL;
1726
Michal Hocko752ade62017-05-08 15:57:27 -07001727 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001728 if (!keys)
1729 return -ENOMEM;
1730
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001731 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001732 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001733 for (i = 0; i < args->count; i++) {
1734 hva = gfn_to_hva(kvm, args->start_gfn + i);
1735 if (kvm_is_error_hva(hva)) {
1736 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001737 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001738 }
1739
David Hildenbrand154c8c12016-05-09 11:22:34 +02001740 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1741 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001742 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001743 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001744 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001745 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001746
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001747 if (!r) {
1748 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1749 sizeof(uint8_t) * args->count);
1750 if (r)
1751 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001752 }
1753
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001754 kvfree(keys);
1755 return r;
1756}
1757
1758static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1759{
1760 uint8_t *keys;
1761 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001762 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001763 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001764
1765 if (args->flags != 0)
1766 return -EINVAL;
1767
1768 /* Enforce sane limit on memory allocation */
1769 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1770 return -EINVAL;
1771
Michal Hocko752ade62017-05-08 15:57:27 -07001772 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001773 if (!keys)
1774 return -ENOMEM;
1775
1776 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1777 sizeof(uint8_t) * args->count);
1778 if (r) {
1779 r = -EFAULT;
1780 goto out;
1781 }
1782
1783 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001784 r = s390_enable_skey();
1785 if (r)
1786 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001787
Janosch Frankbd096f62018-07-18 13:40:22 +01001788 i = 0;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001789 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001790 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001791 while (i < args->count) {
1792 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001793 hva = gfn_to_hva(kvm, args->start_gfn + i);
1794 if (kvm_is_error_hva(hva)) {
1795 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001796 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001797 }
1798
1799 /* Lowest order bit is reserved */
1800 if (keys[i] & 0x01) {
1801 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001802 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001803 }
1804
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001805 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001806 if (r) {
1807 r = fixup_user_fault(current, current->mm, hva,
1808 FAULT_FLAG_WRITE, &unlocked);
1809 if (r)
1810 break;
1811 }
1812 if (!r)
1813 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001814 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001815 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001816 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001817out:
1818 kvfree(keys);
1819 return r;
1820}
1821
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001822/*
1823 * Base address and length must be sent at the start of each block, therefore
1824 * it's cheaper to send some clean data, as long as it's less than the size of
1825 * two longs.
1826 */
1827#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1828/* for consistency */
1829#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1830
1831/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001832 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1833 * address falls in a hole. In that case the index of one of the memslots
1834 * bordering the hole is returned.
1835 */
1836static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1837{
1838 int start = 0, end = slots->used_slots;
1839 int slot = atomic_read(&slots->lru_slot);
1840 struct kvm_memory_slot *memslots = slots->memslots;
1841
1842 if (gfn >= memslots[slot].base_gfn &&
1843 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1844 return slot;
1845
1846 while (start < end) {
1847 slot = start + (end - start) / 2;
1848
1849 if (gfn >= memslots[slot].base_gfn)
1850 end = slot;
1851 else
1852 start = slot + 1;
1853 }
1854
1855 if (gfn >= memslots[start].base_gfn &&
1856 gfn < memslots[start].base_gfn + memslots[start].npages) {
1857 atomic_set(&slots->lru_slot, start);
1858 }
1859
1860 return start;
1861}
1862
1863static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1864 u8 *res, unsigned long bufsize)
1865{
1866 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1867
1868 args->count = 0;
1869 while (args->count < bufsize) {
1870 hva = gfn_to_hva(kvm, cur_gfn);
1871 /*
1872 * We return an error if the first value was invalid, but we
1873 * return successfully if at least one value was copied.
1874 */
1875 if (kvm_is_error_hva(hva))
1876 return args->count ? 0 : -EFAULT;
1877 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1878 pgstev = 0;
1879 res[args->count++] = (pgstev >> 24) & 0x43;
1880 cur_gfn++;
1881 }
1882
1883 return 0;
1884}
1885
1886static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1887 unsigned long cur_gfn)
1888{
1889 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1890 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1891 unsigned long ofs = cur_gfn - ms->base_gfn;
1892
1893 if (ms->base_gfn + ms->npages <= cur_gfn) {
1894 slotidx--;
1895 /* If we are above the highest slot, wrap around */
1896 if (slotidx < 0)
1897 slotidx = slots->used_slots - 1;
1898
1899 ms = slots->memslots + slotidx;
1900 ofs = 0;
1901 }
1902 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1903 while ((slotidx > 0) && (ofs >= ms->npages)) {
1904 slotidx--;
1905 ms = slots->memslots + slotidx;
1906 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1907 }
1908 return ms->base_gfn + ofs;
1909}
1910
1911static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1912 u8 *res, unsigned long bufsize)
1913{
1914 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
1915 struct kvm_memslots *slots = kvm_memslots(kvm);
1916 struct kvm_memory_slot *ms;
1917
1918 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
1919 ms = gfn_to_memslot(kvm, cur_gfn);
1920 args->count = 0;
1921 args->start_gfn = cur_gfn;
1922 if (!ms)
1923 return 0;
1924 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
1925 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
1926
1927 while (args->count < bufsize) {
1928 hva = gfn_to_hva(kvm, cur_gfn);
1929 if (kvm_is_error_hva(hva))
1930 return 0;
1931 /* Decrement only if we actually flipped the bit to 0 */
1932 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
1933 atomic64_dec(&kvm->arch.cmma_dirty_pages);
1934 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1935 pgstev = 0;
1936 /* Save the value */
1937 res[args->count++] = (pgstev >> 24) & 0x43;
1938 /* If the next bit is too far away, stop. */
1939 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
1940 return 0;
1941 /* If we reached the previous "next", find the next one */
1942 if (cur_gfn == next_gfn)
1943 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
1944 /* Reached the end of memory or of the buffer, stop */
1945 if ((next_gfn >= mem_end) ||
1946 (next_gfn - args->start_gfn >= bufsize))
1947 return 0;
1948 cur_gfn++;
1949 /* Reached the end of the current memslot, take the next one. */
1950 if (cur_gfn - ms->base_gfn >= ms->npages) {
1951 ms = gfn_to_memslot(kvm, cur_gfn);
1952 if (!ms)
1953 return 0;
1954 }
1955 }
1956 return 0;
1957}
1958
1959/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001960 * This function searches for the next page with dirty CMMA attributes, and
1961 * saves the attributes in the buffer up to either the end of the buffer or
1962 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
1963 * no trailing clean bytes are saved.
1964 * In case no dirty bits were found, or if CMMA was not enabled or used, the
1965 * output buffer will indicate 0 as length.
1966 */
1967static int kvm_s390_get_cmma_bits(struct kvm *kvm,
1968 struct kvm_s390_cmma_log *args)
1969{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001970 unsigned long bufsize;
1971 int srcu_idx, peek, ret;
1972 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001973
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001974 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001975 return -ENXIO;
1976 /* Invalid/unsupported flags were specified */
1977 if (args->flags & ~KVM_S390_CMMA_PEEK)
1978 return -EINVAL;
1979 /* Migration mode query, and we are not doing a migration */
1980 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001981 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001982 return -EINVAL;
1983 /* CMMA is disabled or was not used, or the buffer has length zero */
1984 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01001985 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001986 memset(args, 0, sizeof(*args));
1987 return 0;
1988 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001989 /* We are not peeking, and there are no dirty pages */
1990 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
1991 memset(args, 0, sizeof(*args));
1992 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001993 }
1994
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001995 values = vmalloc(bufsize);
1996 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001997 return -ENOMEM;
1998
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001999 down_read(&kvm->mm->mmap_sem);
2000 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002001 if (peek)
2002 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2003 else
2004 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002005 srcu_read_unlock(&kvm->srcu, srcu_idx);
2006 up_read(&kvm->mm->mmap_sem);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002007
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002008 if (kvm->arch.migration_mode)
2009 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2010 else
2011 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002012
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002013 if (copy_to_user((void __user *)args->values, values, args->count))
2014 ret = -EFAULT;
2015
2016 vfree(values);
2017 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002018}
2019
2020/*
2021 * This function sets the CMMA attributes for the given pages. If the input
2022 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002023 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002024 */
2025static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2026 const struct kvm_s390_cmma_log *args)
2027{
2028 unsigned long hva, mask, pgstev, i;
2029 uint8_t *bits;
2030 int srcu_idx, r = 0;
2031
2032 mask = args->mask;
2033
2034 if (!kvm->arch.use_cmma)
2035 return -ENXIO;
2036 /* invalid/unsupported flags */
2037 if (args->flags != 0)
2038 return -EINVAL;
2039 /* Enforce sane limit on memory allocation */
2040 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2041 return -EINVAL;
2042 /* Nothing to do */
2043 if (args->count == 0)
2044 return 0;
2045
Kees Cook42bc47b2018-06-12 14:27:11 -07002046 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002047 if (!bits)
2048 return -ENOMEM;
2049
2050 r = copy_from_user(bits, (void __user *)args->values, args->count);
2051 if (r) {
2052 r = -EFAULT;
2053 goto out;
2054 }
2055
2056 down_read(&kvm->mm->mmap_sem);
2057 srcu_idx = srcu_read_lock(&kvm->srcu);
2058 for (i = 0; i < args->count; i++) {
2059 hva = gfn_to_hva(kvm, args->start_gfn + i);
2060 if (kvm_is_error_hva(hva)) {
2061 r = -EFAULT;
2062 break;
2063 }
2064
2065 pgstev = bits[i];
2066 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002067 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002068 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2069 }
2070 srcu_read_unlock(&kvm->srcu, srcu_idx);
2071 up_read(&kvm->mm->mmap_sem);
2072
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002073 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002074 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002075 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002076 up_write(&kvm->mm->mmap_sem);
2077 }
2078out:
2079 vfree(bits);
2080 return r;
2081}
2082
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002083long kvm_arch_vm_ioctl(struct file *filp,
2084 unsigned int ioctl, unsigned long arg)
2085{
2086 struct kvm *kvm = filp->private_data;
2087 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002088 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002089 int r;
2090
2091 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002092 case KVM_S390_INTERRUPT: {
2093 struct kvm_s390_interrupt s390int;
2094
2095 r = -EFAULT;
2096 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2097 break;
2098 r = kvm_s390_inject_vm(kvm, &s390int);
2099 break;
2100 }
Cornelia Huck84223592013-07-15 13:36:01 +02002101 case KVM_CREATE_IRQCHIP: {
2102 struct kvm_irq_routing_entry routing;
2103
2104 r = -EINVAL;
2105 if (kvm->arch.use_irqchip) {
2106 /* Set up dummy routing. */
2107 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002108 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002109 }
2110 break;
2111 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002112 case KVM_SET_DEVICE_ATTR: {
2113 r = -EFAULT;
2114 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2115 break;
2116 r = kvm_s390_vm_set_attr(kvm, &attr);
2117 break;
2118 }
2119 case KVM_GET_DEVICE_ATTR: {
2120 r = -EFAULT;
2121 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2122 break;
2123 r = kvm_s390_vm_get_attr(kvm, &attr);
2124 break;
2125 }
2126 case KVM_HAS_DEVICE_ATTR: {
2127 r = -EFAULT;
2128 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2129 break;
2130 r = kvm_s390_vm_has_attr(kvm, &attr);
2131 break;
2132 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002133 case KVM_S390_GET_SKEYS: {
2134 struct kvm_s390_skeys args;
2135
2136 r = -EFAULT;
2137 if (copy_from_user(&args, argp,
2138 sizeof(struct kvm_s390_skeys)))
2139 break;
2140 r = kvm_s390_get_skeys(kvm, &args);
2141 break;
2142 }
2143 case KVM_S390_SET_SKEYS: {
2144 struct kvm_s390_skeys args;
2145
2146 r = -EFAULT;
2147 if (copy_from_user(&args, argp,
2148 sizeof(struct kvm_s390_skeys)))
2149 break;
2150 r = kvm_s390_set_skeys(kvm, &args);
2151 break;
2152 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002153 case KVM_S390_GET_CMMA_BITS: {
2154 struct kvm_s390_cmma_log args;
2155
2156 r = -EFAULT;
2157 if (copy_from_user(&args, argp, sizeof(args)))
2158 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002159 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002160 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002161 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002162 if (!r) {
2163 r = copy_to_user(argp, &args, sizeof(args));
2164 if (r)
2165 r = -EFAULT;
2166 }
2167 break;
2168 }
2169 case KVM_S390_SET_CMMA_BITS: {
2170 struct kvm_s390_cmma_log args;
2171
2172 r = -EFAULT;
2173 if (copy_from_user(&args, argp, sizeof(args)))
2174 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002175 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002176 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002177 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002178 break;
2179 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002180 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002181 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002182 }
2183
2184 return r;
2185}
2186
Tony Krowiak45c9b472015-01-13 11:33:26 -05002187static int kvm_s390_apxa_installed(void)
2188{
Tony Krowiake585b242018-09-25 19:16:18 -04002189 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002190
Tony Krowiake585b242018-09-25 19:16:18 -04002191 if (ap_instructions_available()) {
2192 if (ap_qci(&info) == 0)
2193 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002194 }
2195
2196 return 0;
2197}
2198
Tony Krowiake585b242018-09-25 19:16:18 -04002199/*
2200 * The format of the crypto control block (CRYCB) is specified in the 3 low
2201 * order bits of the CRYCB designation (CRYCBD) field as follows:
2202 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2203 * AP extended addressing (APXA) facility are installed.
2204 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2205 * Format 2: Both the APXA and MSAX3 facilities are installed
2206 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002207static void kvm_s390_set_crycb_format(struct kvm *kvm)
2208{
2209 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2210
Tony Krowiake585b242018-09-25 19:16:18 -04002211 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2212 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2213
2214 /* Check whether MSAX3 is installed */
2215 if (!test_kvm_facility(kvm, 76))
2216 return;
2217
Tony Krowiak45c9b472015-01-13 11:33:26 -05002218 if (kvm_s390_apxa_installed())
2219 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2220 else
2221 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2222}
2223
Pierre Morel0e237e42018-10-05 10:31:09 +02002224void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2225 unsigned long *aqm, unsigned long *adm)
2226{
2227 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2228
2229 mutex_lock(&kvm->lock);
2230 kvm_s390_vcpu_block_all(kvm);
2231
2232 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2233 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2234 memcpy(crycb->apcb1.apm, apm, 32);
2235 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2236 apm[0], apm[1], apm[2], apm[3]);
2237 memcpy(crycb->apcb1.aqm, aqm, 32);
2238 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2239 aqm[0], aqm[1], aqm[2], aqm[3]);
2240 memcpy(crycb->apcb1.adm, adm, 32);
2241 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2242 adm[0], adm[1], adm[2], adm[3]);
2243 break;
2244 case CRYCB_FORMAT1:
2245 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2246 memcpy(crycb->apcb0.apm, apm, 8);
2247 memcpy(crycb->apcb0.aqm, aqm, 2);
2248 memcpy(crycb->apcb0.adm, adm, 2);
2249 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2250 apm[0], *((unsigned short *)aqm),
2251 *((unsigned short *)adm));
2252 break;
2253 default: /* Can not happen */
2254 break;
2255 }
2256
2257 /* recreate the shadow crycb for each vcpu */
2258 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2259 kvm_s390_vcpu_unblock_all(kvm);
2260 mutex_unlock(&kvm->lock);
2261}
2262EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2263
Tony Krowiak421045982018-09-25 19:16:25 -04002264void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2265{
2266 mutex_lock(&kvm->lock);
2267 kvm_s390_vcpu_block_all(kvm);
2268
2269 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2270 sizeof(kvm->arch.crypto.crycb->apcb0));
2271 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2272 sizeof(kvm->arch.crypto.crycb->apcb1));
2273
Pierre Morel0e237e42018-10-05 10:31:09 +02002274 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002275 /* recreate the shadow crycb for each vcpu */
2276 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002277 kvm_s390_vcpu_unblock_all(kvm);
2278 mutex_unlock(&kvm->lock);
2279}
2280EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2281
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002282static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002283{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002284 struct cpuid cpuid;
2285
2286 get_cpu_id(&cpuid);
2287 cpuid.version = 0xff;
2288 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002289}
2290
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002291static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002292{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002293 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002294 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002295
Tony Krowiake585b242018-09-25 19:16:18 -04002296 if (!test_kvm_facility(kvm, 76))
2297 return;
2298
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002299 /* Enable AES/DEA protected key functions by default */
2300 kvm->arch.crypto.aes_kw = 1;
2301 kvm->arch.crypto.dea_kw = 1;
2302 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2303 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2304 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2305 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002306}
2307
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002308static void sca_dispose(struct kvm *kvm)
2309{
2310 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002311 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002312 else
2313 free_page((unsigned long)(kvm->arch.sca));
2314 kvm->arch.sca = NULL;
2315}
2316
Carsten Ottee08b9632012-01-04 10:25:20 +01002317int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002318{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002319 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002320 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002321 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002322 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002323
Carsten Ottee08b9632012-01-04 10:25:20 +01002324 rc = -EINVAL;
2325#ifdef CONFIG_KVM_S390_UCONTROL
2326 if (type & ~KVM_VM_S390_UCONTROL)
2327 goto out_err;
2328 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2329 goto out_err;
2330#else
2331 if (type)
2332 goto out_err;
2333#endif
2334
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002335 rc = s390_enable_sie();
2336 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002337 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002338
Carsten Otteb2904112011-10-18 12:27:13 +02002339 rc = -ENOMEM;
2340
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002341 if (!sclp.has_64bscao)
2342 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002343 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002344 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002345 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002346 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002347 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002348 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002349 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002350 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002351 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002352 kvm->arch.sca = (struct bsca_block *)
2353 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002354 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002355
2356 sprintf(debug_name, "kvm-%u", current->pid);
2357
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002358 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002359 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002360 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002361
Michael Mueller19114be2017-05-30 14:26:02 +02002362 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002363 kvm->arch.sie_page2 =
2364 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2365 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002366 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002367
Michael Mueller25c84db2019-01-31 09:52:41 +01002368 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002369 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002370
2371 for (i = 0; i < kvm_s390_fac_size(); i++) {
2372 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2373 (kvm_s390_fac_base[i] |
2374 kvm_s390_fac_ext[i]);
2375 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2376 kvm_s390_fac_base[i];
2377 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002378 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002379
David Hildenbrand19352222017-08-29 16:31:08 +02002380 /* we are always in czam mode - even on pre z14 machines */
2381 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2382 set_kvm_facility(kvm->arch.model.fac_list, 138);
2383 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002384 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2385 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002386 if (MACHINE_HAS_TLB_GUEST) {
2387 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2388 set_kvm_facility(kvm->arch.model.fac_list, 147);
2389 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002390
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002391 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002392 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002393
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002394 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002395
Fei Li51978392017-02-17 17:06:26 +08002396 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002397 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002398 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2399 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002400 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002401 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002402
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002403 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002404 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002405
Carsten Ottee08b9632012-01-04 10:25:20 +01002406 if (type & KVM_VM_S390_UCONTROL) {
2407 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002408 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002409 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002410 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002411 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002412 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002413 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002414 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002415 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002416 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002417 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002418 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002419 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002420 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002421
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002422 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002423 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002424 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002425 kvm_s390_vsie_init(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002426 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002427 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002428
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002429 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002430out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002431 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002432 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002433 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002434 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002435 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002436}
2437
Luiz Capitulino235539b2016-09-07 14:47:23 -04002438bool kvm_arch_has_vcpu_debugfs(void)
2439{
2440 return false;
2441}
2442
2443int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2444{
2445 return 0;
2446}
2447
Christian Borntraegerd329c032008-11-26 14:50:27 +01002448void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2449{
2450 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002451 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002452 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002453 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002454 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002455 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002456
2457 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002458 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002459
Dominik Dingele6db1d62015-05-07 15:41:57 +02002460 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002461 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002462 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002463
Christian Borntraeger6692cef2008-11-26 14:51:08 +01002464 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02002465 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002466}
2467
2468static void kvm_free_vcpus(struct kvm *kvm)
2469{
2470 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002471 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002472
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002473 kvm_for_each_vcpu(i, vcpu, kvm)
2474 kvm_arch_vcpu_destroy(vcpu);
2475
2476 mutex_lock(&kvm->lock);
2477 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2478 kvm->vcpus[i] = NULL;
2479
2480 atomic_set(&kvm->online_vcpus, 0);
2481 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002482}
2483
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002484void kvm_arch_destroy_vm(struct kvm *kvm)
2485{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002486 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002487 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002488 debug_unregister(kvm->arch.dbf);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002489 kvm_s390_gisa_destroy(kvm);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002490 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002491 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002492 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002493 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002494 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002495 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002496 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002497}
2498
2499/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002500static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2501{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002502 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002503 if (!vcpu->arch.gmap)
2504 return -ENOMEM;
2505 vcpu->arch.gmap->private = vcpu->kvm;
2506
2507 return 0;
2508}
2509
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002510static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2511{
David Hildenbranda6940672016-08-08 22:39:32 +02002512 if (!kvm_s390_use_sca_entries())
2513 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002514 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002515 if (vcpu->kvm->arch.use_esca) {
2516 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002517
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002518 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002519 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002520 } else {
2521 struct bsca_block *sca = vcpu->kvm->arch.sca;
2522
2523 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002524 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002525 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002526 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002527}
2528
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002529static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002530{
David Hildenbranda6940672016-08-08 22:39:32 +02002531 if (!kvm_s390_use_sca_entries()) {
2532 struct bsca_block *sca = vcpu->kvm->arch.sca;
2533
2534 /* we still need the basic sca for the ipte control */
2535 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2536 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002537 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002538 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002539 read_lock(&vcpu->kvm->arch.sca_lock);
2540 if (vcpu->kvm->arch.use_esca) {
2541 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002542
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002543 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002544 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2545 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002546 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002547 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002548 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002549 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002550
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002551 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002552 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2553 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002554 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002555 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002556 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002557}
2558
2559/* Basic SCA to Extended SCA data copy routines */
2560static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2561{
2562 d->sda = s->sda;
2563 d->sigp_ctrl.c = s->sigp_ctrl.c;
2564 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2565}
2566
2567static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2568{
2569 int i;
2570
2571 d->ipte_control = s->ipte_control;
2572 d->mcn[0] = s->mcn;
2573 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2574 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2575}
2576
2577static int sca_switch_to_extended(struct kvm *kvm)
2578{
2579 struct bsca_block *old_sca = kvm->arch.sca;
2580 struct esca_block *new_sca;
2581 struct kvm_vcpu *vcpu;
2582 unsigned int vcpu_idx;
2583 u32 scaol, scaoh;
2584
2585 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2586 if (!new_sca)
2587 return -ENOMEM;
2588
2589 scaoh = (u32)((u64)(new_sca) >> 32);
2590 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2591
2592 kvm_s390_vcpu_block_all(kvm);
2593 write_lock(&kvm->arch.sca_lock);
2594
2595 sca_copy_b_to_e(new_sca, old_sca);
2596
2597 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2598 vcpu->arch.sie_block->scaoh = scaoh;
2599 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002600 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002601 }
2602 kvm->arch.sca = new_sca;
2603 kvm->arch.use_esca = 1;
2604
2605 write_unlock(&kvm->arch.sca_lock);
2606 kvm_s390_vcpu_unblock_all(kvm);
2607
2608 free_page((unsigned long)old_sca);
2609
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002610 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2611 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002612 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002613}
2614
2615static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2616{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002617 int rc;
2618
David Hildenbranda6940672016-08-08 22:39:32 +02002619 if (!kvm_s390_use_sca_entries()) {
2620 if (id < KVM_MAX_VCPUS)
2621 return true;
2622 return false;
2623 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002624 if (id < KVM_S390_BSCA_CPU_SLOTS)
2625 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002626 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002627 return false;
2628
2629 mutex_lock(&kvm->lock);
2630 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2631 mutex_unlock(&kvm->lock);
2632
2633 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002634}
2635
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002636int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2637{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002638 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2639 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002640 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2641 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002642 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002643 KVM_SYNC_CRS |
2644 KVM_SYNC_ARCH0 |
2645 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002646 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002647 if (test_kvm_facility(vcpu->kvm, 64))
2648 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002649 if (test_kvm_facility(vcpu->kvm, 82))
2650 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002651 if (test_kvm_facility(vcpu->kvm, 133))
2652 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002653 if (test_kvm_facility(vcpu->kvm, 156))
2654 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002655 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2656 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2657 */
2658 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002659 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002660 else
2661 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002662
2663 if (kvm_is_ucontrol(vcpu->kvm))
2664 return __kvm_ucontrol_vcpu_init(vcpu);
2665
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002666 return 0;
2667}
2668
David Hildenbranddb0758b2016-02-15 09:42:25 +01002669/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2670static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2671{
2672 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002673 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002674 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002675 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002676}
2677
2678/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2679static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2680{
2681 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002682 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002683 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2684 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002685 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002686}
2687
2688/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2689static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2690{
2691 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2692 vcpu->arch.cputm_enabled = true;
2693 __start_cpu_timer_accounting(vcpu);
2694}
2695
2696/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2697static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2698{
2699 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2700 __stop_cpu_timer_accounting(vcpu);
2701 vcpu->arch.cputm_enabled = false;
2702}
2703
2704static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2705{
2706 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2707 __enable_cpu_timer_accounting(vcpu);
2708 preempt_enable();
2709}
2710
2711static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2712{
2713 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2714 __disable_cpu_timer_accounting(vcpu);
2715 preempt_enable();
2716}
2717
David Hildenbrand4287f242016-02-15 09:40:12 +01002718/* set the cpu timer - may only be called from the VCPU thread itself */
2719void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2720{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002721 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002722 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002723 if (vcpu->arch.cputm_enabled)
2724 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002725 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002726 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002727 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002728}
2729
David Hildenbranddb0758b2016-02-15 09:42:25 +01002730/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002731__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2732{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002733 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002734 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002735
2736 if (unlikely(!vcpu->arch.cputm_enabled))
2737 return vcpu->arch.sie_block->cputm;
2738
David Hildenbrand9c23a132016-02-17 21:53:33 +01002739 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2740 do {
2741 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2742 /*
2743 * If the writer would ever execute a read in the critical
2744 * section, e.g. in irq context, we have a deadlock.
2745 */
2746 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2747 value = vcpu->arch.sie_block->cputm;
2748 /* if cputm_start is 0, accounting is being started/stopped */
2749 if (likely(vcpu->arch.cputm_start))
2750 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2751 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2752 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002753 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002754}
2755
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002756void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2757{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002758
David Hildenbrand37d9df92015-03-11 16:47:33 +01002759 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002760 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002761 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002762 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002763 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002764}
2765
2766void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2767{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002768 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002769 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002770 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01002771 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002772 vcpu->arch.enabled_gmap = gmap_get_enabled();
2773 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002774
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002775}
2776
2777static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2778{
2779 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2780 vcpu->arch.sie_block->gpsw.mask = 0UL;
2781 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002782 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002783 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002784 vcpu->arch.sie_block->ckc = 0UL;
2785 vcpu->arch.sie_block->todpr = 0;
2786 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
David Hildenbrandb9224cd2018-04-30 17:55:24 +02002787 vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 |
2788 CR0_INTERRUPT_KEY_SUBMASK |
2789 CR0_MEASUREMENT_ALERT_SUBMASK;
2790 vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
2791 CR14_UNUSED_33 |
2792 CR14_EXTERNAL_DAMAGE_SUBMASK;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002793 /* make sure the new fpc will be lazily loaded */
2794 save_fpu_regs();
2795 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002796 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002797 vcpu->arch.sie_block->pp = 0;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002798 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002799 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2800 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002801 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2802 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002803 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002804}
2805
Dominik Dingel31928aa2014-12-04 15:47:07 +01002806void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002807{
Jason J. Herne72f25022014-11-25 09:46:02 -05002808 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002809 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002810 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01002811 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02002812 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002813 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002814 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002815 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002816 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002817 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002818 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2819 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002820 /* make vcpu_load load the right gmap on the first trigger */
2821 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002822}
2823
Tony Krowiak5102ee82014-06-27 14:46:01 -04002824static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2825{
Tony Krowiake585b242018-09-25 19:16:18 -04002826 /*
2827 * If the AP instructions are not being interpreted and the MSAX3
2828 * facility is not configured for the guest, there is nothing to set up.
2829 */
2830 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002831 return;
2832
Tony Krowiake585b242018-09-25 19:16:18 -04002833 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02002834 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04002835 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Tony Krowiaka374e892014-09-03 10:13:53 +02002836
Tony Krowiake585b242018-09-25 19:16:18 -04002837 if (vcpu->kvm->arch.crypto.apie)
2838 vcpu->arch.sie_block->eca |= ECA_APIE;
2839
2840 /* Set up protected key support */
Tony Krowiaka374e892014-09-03 10:13:53 +02002841 if (vcpu->kvm->arch.crypto.aes_kw)
2842 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2843 if (vcpu->kvm->arch.crypto.dea_kw)
2844 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04002845}
2846
Dominik Dingelb31605c2014-03-25 13:47:11 +01002847void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2848{
2849 free_page(vcpu->arch.sie_block->cbrlo);
2850 vcpu->arch.sie_block->cbrlo = 0;
2851}
2852
2853int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2854{
2855 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2856 if (!vcpu->arch.sie_block->cbrlo)
2857 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002858 return 0;
2859}
2860
Michael Mueller91520f12015-02-27 14:32:11 +01002861static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2862{
2863 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2864
Michael Mueller91520f12015-02-27 14:32:11 +01002865 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002866 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002867 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002868}
2869
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002870int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2871{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002872 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002873
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002874 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2875 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002876 CPUSTAT_STOPPED);
2877
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002878 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002879 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002880 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002881 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002882
Michael Mueller91520f12015-02-27 14:32:11 +01002883 kvm_s390_vcpu_setup_model(vcpu);
2884
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002885 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2886 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002887 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002888 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002889 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002890 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002891 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002892
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002893 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002894 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002895 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002896 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2897 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002898 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002899 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002900 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002901 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002902 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002903 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002904 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002905 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002906 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002907 vcpu->arch.sie_block->eca |= ECA_VX;
2908 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002909 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04002910 if (test_kvm_facility(vcpu->kvm, 139))
2911 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002912 if (test_kvm_facility(vcpu->kvm, 156))
2913 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002914 if (vcpu->arch.sie_block->gd) {
2915 vcpu->arch.sie_block->eca |= ECA_AIV;
2916 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
2917 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
2918 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002919 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2920 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002921 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002922
2923 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002924 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05002925 else
2926 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002927
Dominik Dingele6db1d62015-05-07 15:41:57 +02002928 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002929 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2930 if (rc)
2931 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002932 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01002933 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002934 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002935
Collin Walling67d49d52018-08-31 12:51:19 -04002936 vcpu->arch.sie_block->hpid = HPID_KVM;
2937
Tony Krowiak5102ee82014-06-27 14:46:01 -04002938 kvm_s390_vcpu_crypto_setup(vcpu);
2939
Dominik Dingelb31605c2014-03-25 13:47:11 +01002940 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002941}
2942
2943struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2944 unsigned int id)
2945{
Carsten Otte4d475552011-10-18 12:27:12 +02002946 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002947 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02002948 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002949
David Hildenbrand42158252015-10-12 12:57:22 +02002950 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02002951 goto out;
2952
2953 rc = -ENOMEM;
2954
Michael Muellerb110fea2013-06-12 13:54:54 +02002955 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002956 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02002957 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002958
QingFeng Haoda72ca42017-06-07 11:41:19 +02002959 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002960 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2961 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002962 goto out_free_cpu;
2963
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002964 vcpu->arch.sie_block = &sie_page->sie_block;
2965 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2966
David Hildenbrandefed1102015-04-16 12:32:41 +02002967 /* the real guest size will always be smaller than msl */
2968 vcpu->arch.sie_block->mso = 0;
2969 vcpu->arch.sie_block->msl = sclp.hamax;
2970
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002971 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002972 spin_lock_init(&vcpu->arch.local_int.lock);
Michael Mueller982cff42019-01-31 09:52:38 +01002973 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02002974 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
2975 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002976 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002977
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002978 rc = kvm_vcpu_init(vcpu, kvm, id);
2979 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002980 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002981 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002982 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02002983 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002984
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002985 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08002986out_free_sie_block:
2987 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002988out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02002989 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02002990out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002991 return ERR_PTR(rc);
2992}
2993
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002994int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2995{
David Hildenbrand9a022062014-08-05 17:40:47 +02002996 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002997}
2998
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002999bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3000{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003001 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003002}
3003
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003004void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003005{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003006 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003007 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003008}
3009
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003010void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003011{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003012 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003013}
3014
Christian Borntraeger8e236542015-04-09 13:49:04 +02003015static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3016{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003017 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003018 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003019}
3020
David Hildenbrand9ea59722018-09-25 19:16:16 -04003021bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3022{
3023 return atomic_read(&vcpu->arch.sie_block->prog20) &
3024 (PROG_BLOCK_SIE | PROG_REQUEST);
3025}
3026
Christian Borntraeger8e236542015-04-09 13:49:04 +02003027static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3028{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003029 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003030}
3031
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003032/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003033 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003034 * If the CPU is not running (e.g. waiting as idle) the function will
3035 * return immediately. */
3036void exit_sie(struct kvm_vcpu *vcpu)
3037{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003038 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003039 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003040 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3041 cpu_relax();
3042}
3043
Christian Borntraeger8e236542015-04-09 13:49:04 +02003044/* Kick a guest cpu out of SIE to process a request synchronously */
3045void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003046{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003047 kvm_make_request(req, vcpu);
3048 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003049}
3050
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003051static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3052 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003053{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003054 struct kvm *kvm = gmap->private;
3055 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003056 unsigned long prefix;
3057 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003058
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003059 if (gmap_is_shadow(gmap))
3060 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003061 if (start >= 1UL << 31)
3062 /* We are only interested in prefix pages */
3063 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003064 kvm_for_each_vcpu(i, vcpu, kvm) {
3065 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003066 prefix = kvm_s390_get_prefix(vcpu);
3067 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3068 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3069 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003070 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003071 }
3072 }
3073}
3074
Christoffer Dallb6d33832012-03-08 16:44:24 -05003075int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3076{
3077 /* kvm common code refers to this, but never calls it */
3078 BUG();
3079 return 0;
3080}
3081
Carsten Otte14eebd92012-05-15 14:15:26 +02003082static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3083 struct kvm_one_reg *reg)
3084{
3085 int r = -EINVAL;
3086
3087 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003088 case KVM_REG_S390_TODPR:
3089 r = put_user(vcpu->arch.sie_block->todpr,
3090 (u32 __user *)reg->addr);
3091 break;
3092 case KVM_REG_S390_EPOCHDIFF:
3093 r = put_user(vcpu->arch.sie_block->epoch,
3094 (u64 __user *)reg->addr);
3095 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003096 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003097 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003098 (u64 __user *)reg->addr);
3099 break;
3100 case KVM_REG_S390_CLOCK_COMP:
3101 r = put_user(vcpu->arch.sie_block->ckc,
3102 (u64 __user *)reg->addr);
3103 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003104 case KVM_REG_S390_PFTOKEN:
3105 r = put_user(vcpu->arch.pfault_token,
3106 (u64 __user *)reg->addr);
3107 break;
3108 case KVM_REG_S390_PFCOMPARE:
3109 r = put_user(vcpu->arch.pfault_compare,
3110 (u64 __user *)reg->addr);
3111 break;
3112 case KVM_REG_S390_PFSELECT:
3113 r = put_user(vcpu->arch.pfault_select,
3114 (u64 __user *)reg->addr);
3115 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003116 case KVM_REG_S390_PP:
3117 r = put_user(vcpu->arch.sie_block->pp,
3118 (u64 __user *)reg->addr);
3119 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003120 case KVM_REG_S390_GBEA:
3121 r = put_user(vcpu->arch.sie_block->gbea,
3122 (u64 __user *)reg->addr);
3123 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003124 default:
3125 break;
3126 }
3127
3128 return r;
3129}
3130
3131static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3132 struct kvm_one_reg *reg)
3133{
3134 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003135 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003136
3137 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003138 case KVM_REG_S390_TODPR:
3139 r = get_user(vcpu->arch.sie_block->todpr,
3140 (u32 __user *)reg->addr);
3141 break;
3142 case KVM_REG_S390_EPOCHDIFF:
3143 r = get_user(vcpu->arch.sie_block->epoch,
3144 (u64 __user *)reg->addr);
3145 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003146 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003147 r = get_user(val, (u64 __user *)reg->addr);
3148 if (!r)
3149 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003150 break;
3151 case KVM_REG_S390_CLOCK_COMP:
3152 r = get_user(vcpu->arch.sie_block->ckc,
3153 (u64 __user *)reg->addr);
3154 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003155 case KVM_REG_S390_PFTOKEN:
3156 r = get_user(vcpu->arch.pfault_token,
3157 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003158 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3159 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003160 break;
3161 case KVM_REG_S390_PFCOMPARE:
3162 r = get_user(vcpu->arch.pfault_compare,
3163 (u64 __user *)reg->addr);
3164 break;
3165 case KVM_REG_S390_PFSELECT:
3166 r = get_user(vcpu->arch.pfault_select,
3167 (u64 __user *)reg->addr);
3168 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003169 case KVM_REG_S390_PP:
3170 r = get_user(vcpu->arch.sie_block->pp,
3171 (u64 __user *)reg->addr);
3172 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003173 case KVM_REG_S390_GBEA:
3174 r = get_user(vcpu->arch.sie_block->gbea,
3175 (u64 __user *)reg->addr);
3176 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003177 default:
3178 break;
3179 }
3180
3181 return r;
3182}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003183
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003184static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3185{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003186 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003187 return 0;
3188}
3189
3190int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3191{
Christoffer Dall875656f2017-12-04 21:35:27 +01003192 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003193 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003194 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003195 return 0;
3196}
3197
3198int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3199{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003200 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003201 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003202 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003203 return 0;
3204}
3205
3206int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3207 struct kvm_sregs *sregs)
3208{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003209 vcpu_load(vcpu);
3210
Christian Borntraeger59674c12012-01-11 11:20:33 +01003211 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003212 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003213
3214 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003215 return 0;
3216}
3217
3218int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3219 struct kvm_sregs *sregs)
3220{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003221 vcpu_load(vcpu);
3222
Christian Borntraeger59674c12012-01-11 11:20:33 +01003223 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003224 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003225
3226 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003227 return 0;
3228}
3229
3230int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3231{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003232 int ret = 0;
3233
3234 vcpu_load(vcpu);
3235
3236 if (test_fp_ctl(fpu->fpc)) {
3237 ret = -EINVAL;
3238 goto out;
3239 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003240 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003241 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003242 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3243 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003244 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003245 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003246
3247out:
3248 vcpu_put(vcpu);
3249 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003250}
3251
3252int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3253{
Christoffer Dall13931232017-12-04 21:35:34 +01003254 vcpu_load(vcpu);
3255
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003256 /* make sure we have the latest values */
3257 save_fpu_regs();
3258 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003259 convert_vx_to_fp((freg_t *) fpu->fprs,
3260 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003261 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003262 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003263 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003264
3265 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003266 return 0;
3267}
3268
3269static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3270{
3271 int rc = 0;
3272
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003273 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003274 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003275 else {
3276 vcpu->run->psw_mask = psw.mask;
3277 vcpu->run->psw_addr = psw.addr;
3278 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003279 return rc;
3280}
3281
3282int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3283 struct kvm_translation *tr)
3284{
3285 return -EINVAL; /* not implemented yet */
3286}
3287
David Hildenbrand27291e22014-01-23 12:26:52 +01003288#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3289 KVM_GUESTDBG_USE_HW_BP | \
3290 KVM_GUESTDBG_ENABLE)
3291
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003292int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3293 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003294{
David Hildenbrand27291e22014-01-23 12:26:52 +01003295 int rc = 0;
3296
Christoffer Dall66b56562017-12-04 21:35:33 +01003297 vcpu_load(vcpu);
3298
David Hildenbrand27291e22014-01-23 12:26:52 +01003299 vcpu->guest_debug = 0;
3300 kvm_s390_clear_bp_data(vcpu);
3301
Christoffer Dall66b56562017-12-04 21:35:33 +01003302 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3303 rc = -EINVAL;
3304 goto out;
3305 }
3306 if (!sclp.has_gpere) {
3307 rc = -EINVAL;
3308 goto out;
3309 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003310
3311 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3312 vcpu->guest_debug = dbg->control;
3313 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003314 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003315
3316 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3317 rc = kvm_s390_import_bp_data(vcpu, dbg);
3318 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003319 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003320 vcpu->arch.guestdbg.last_bp = 0;
3321 }
3322
3323 if (rc) {
3324 vcpu->guest_debug = 0;
3325 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003326 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003327 }
3328
Christoffer Dall66b56562017-12-04 21:35:33 +01003329out:
3330 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003331 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003332}
3333
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003334int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3335 struct kvm_mp_state *mp_state)
3336{
Christoffer Dallfd232562017-12-04 21:35:30 +01003337 int ret;
3338
3339 vcpu_load(vcpu);
3340
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003341 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003342 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3343 KVM_MP_STATE_OPERATING;
3344
3345 vcpu_put(vcpu);
3346 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003347}
3348
3349int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3350 struct kvm_mp_state *mp_state)
3351{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003352 int rc = 0;
3353
Christoffer Dalle83dff52017-12-04 21:35:31 +01003354 vcpu_load(vcpu);
3355
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003356 /* user space knows about this interface - let it control the state */
3357 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3358
3359 switch (mp_state->mp_state) {
3360 case KVM_MP_STATE_STOPPED:
3361 kvm_s390_vcpu_stop(vcpu);
3362 break;
3363 case KVM_MP_STATE_OPERATING:
3364 kvm_s390_vcpu_start(vcpu);
3365 break;
3366 case KVM_MP_STATE_LOAD:
3367 case KVM_MP_STATE_CHECK_STOP:
3368 /* fall through - CHECK_STOP and LOAD are not supported yet */
3369 default:
3370 rc = -ENXIO;
3371 }
3372
Christoffer Dalle83dff52017-12-04 21:35:31 +01003373 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003374 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003375}
3376
David Hildenbrand8ad35752014-03-14 11:00:21 +01003377static bool ibs_enabled(struct kvm_vcpu *vcpu)
3378{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003379 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003380}
3381
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003382static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3383{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003384retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003385 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003386 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003387 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003388 /*
3389 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003390 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003391 * This ensures that the ipte instruction for this request has
3392 * already finished. We might race against a second unmapper that
3393 * wants to set the blocking bit. Lets just retry the request loop.
3394 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003395 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003396 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003397 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3398 kvm_s390_get_prefix(vcpu),
3399 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003400 if (rc) {
3401 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003402 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003403 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003404 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003405 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003406
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003407 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3408 vcpu->arch.sie_block->ihcpu = 0xffff;
3409 goto retry;
3410 }
3411
David Hildenbrand8ad35752014-03-14 11:00:21 +01003412 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3413 if (!ibs_enabled(vcpu)) {
3414 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003415 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003416 }
3417 goto retry;
3418 }
3419
3420 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3421 if (ibs_enabled(vcpu)) {
3422 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003423 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003424 }
3425 goto retry;
3426 }
3427
David Hildenbrand6502a342016-06-21 14:19:51 +02003428 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3429 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3430 goto retry;
3431 }
3432
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003433 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3434 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003435 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003436 * instruction manually, in order to provide additional
3437 * functionalities needed for live migration.
3438 */
3439 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3440 goto retry;
3441 }
3442
3443 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3444 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003445 * Re-enable CMM virtualization if CMMA is available and
3446 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003447 */
3448 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003449 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003450 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3451 goto retry;
3452 }
3453
David Hildenbrand0759d062014-05-13 16:54:32 +02003454 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003455 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003456 /* we left the vsie handler, nothing to do, just clear the request */
3457 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003458
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003459 return 0;
3460}
3461
David Hildenbrand0e7def52018-02-07 12:46:43 +01003462void kvm_s390_set_tod_clock(struct kvm *kvm,
3463 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003464{
3465 struct kvm_vcpu *vcpu;
3466 struct kvm_s390_tod_clock_ext htod;
3467 int i;
3468
3469 mutex_lock(&kvm->lock);
3470 preempt_disable();
3471
3472 get_tod_clock_ext((char *)&htod);
3473
3474 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003475 kvm->arch.epdx = 0;
3476 if (test_kvm_facility(kvm, 139)) {
3477 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3478 if (kvm->arch.epoch > gtod->tod)
3479 kvm->arch.epdx -= 1;
3480 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003481
3482 kvm_s390_vcpu_block_all(kvm);
3483 kvm_for_each_vcpu(i, vcpu, kvm) {
3484 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3485 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3486 }
3487
3488 kvm_s390_vcpu_unblock_all(kvm);
3489 preempt_enable();
3490 mutex_unlock(&kvm->lock);
3491}
3492
Thomas Huthfa576c52014-05-06 17:20:16 +02003493/**
3494 * kvm_arch_fault_in_page - fault-in guest page if necessary
3495 * @vcpu: The corresponding virtual cpu
3496 * @gpa: Guest physical address
3497 * @writable: Whether the page should be writable or not
3498 *
3499 * Make sure that a guest page has been faulted-in on the host.
3500 *
3501 * Return: Zero on success, negative error code otherwise.
3502 */
3503long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003504{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003505 return gmap_fault(vcpu->arch.gmap, gpa,
3506 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003507}
3508
Dominik Dingel3c038e62013-10-07 17:11:48 +02003509static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3510 unsigned long token)
3511{
3512 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003513 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003514
3515 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003516 irq.u.ext.ext_params2 = token;
3517 irq.type = KVM_S390_INT_PFAULT_INIT;
3518 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003519 } else {
3520 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003521 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003522 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3523 }
3524}
3525
3526void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3527 struct kvm_async_pf *work)
3528{
3529 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3530 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3531}
3532
3533void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3534 struct kvm_async_pf *work)
3535{
3536 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3537 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3538}
3539
3540void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3541 struct kvm_async_pf *work)
3542{
3543 /* s390 will always inject the page directly */
3544}
3545
3546bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3547{
3548 /*
3549 * s390 will always inject the page directly,
3550 * but we still want check_async_completion to cleanup
3551 */
3552 return true;
3553}
3554
3555static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3556{
3557 hva_t hva;
3558 struct kvm_arch_async_pf arch;
3559 int rc;
3560
3561 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3562 return 0;
3563 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3564 vcpu->arch.pfault_compare)
3565 return 0;
3566 if (psw_extint_disabled(vcpu))
3567 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003568 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003569 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003570 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003571 return 0;
3572 if (!vcpu->arch.gmap->pfault_enabled)
3573 return 0;
3574
Heiko Carstens81480cc2014-01-01 16:36:07 +01003575 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3576 hva += current->thread.gmap_addr & ~PAGE_MASK;
3577 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003578 return 0;
3579
3580 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3581 return rc;
3582}
3583
Thomas Huth3fb4c402013-09-12 10:33:43 +02003584static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003585{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003586 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003587
Dominik Dingel3c038e62013-10-07 17:11:48 +02003588 /*
3589 * On s390 notifications for arriving pages will be delivered directly
3590 * to the guest but the house keeping for completed pfaults is
3591 * handled outside the worker.
3592 */
3593 kvm_check_async_pf_completion(vcpu);
3594
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003595 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3596 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003597
3598 if (need_resched())
3599 schedule();
3600
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003601 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003602 s390_handle_mcck();
3603
Jens Freimann79395032014-04-17 10:10:30 +02003604 if (!kvm_is_ucontrol(vcpu->kvm)) {
3605 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3606 if (rc)
3607 return rc;
3608 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003609
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003610 rc = kvm_s390_handle_requests(vcpu);
3611 if (rc)
3612 return rc;
3613
David Hildenbrand27291e22014-01-23 12:26:52 +01003614 if (guestdbg_enabled(vcpu)) {
3615 kvm_s390_backup_guest_per_regs(vcpu);
3616 kvm_s390_patch_guest_per_regs(vcpu);
3617 }
3618
Michael Mueller9f30f622019-01-31 09:52:44 +01003619 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
3620
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003621 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003622 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3623 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3624 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003625
Thomas Huth3fb4c402013-09-12 10:33:43 +02003626 return 0;
3627}
3628
Thomas Huth492d8642015-02-10 16:11:01 +01003629static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3630{
David Hildenbrand56317922016-01-12 17:37:58 +01003631 struct kvm_s390_pgm_info pgm_info = {
3632 .code = PGM_ADDRESSING,
3633 };
3634 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003635 int rc;
3636
3637 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3638 trace_kvm_s390_sie_fault(vcpu);
3639
3640 /*
3641 * We want to inject an addressing exception, which is defined as a
3642 * suppressing or terminating exception. However, since we came here
3643 * by a DAT access exception, the PSW still points to the faulting
3644 * instruction since DAT exceptions are nullifying. So we've got
3645 * to look up the current opcode to get the length of the instruction
3646 * to be able to forward the PSW.
3647 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003648 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003649 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003650 if (rc < 0) {
3651 return rc;
3652 } else if (rc) {
3653 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3654 * Forward by arbitrary ilc, injection will take care of
3655 * nullification if necessary.
3656 */
3657 pgm_info = vcpu->arch.pgm;
3658 ilen = 4;
3659 }
David Hildenbrand56317922016-01-12 17:37:58 +01003660 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3661 kvm_s390_forward_psw(vcpu, ilen);
3662 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003663}
3664
Thomas Huth3fb4c402013-09-12 10:33:43 +02003665static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3666{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003667 struct mcck_volatile_info *mcck_info;
3668 struct sie_page *sie_page;
3669
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003670 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3671 vcpu->arch.sie_block->icptcode);
3672 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3673
David Hildenbrand27291e22014-01-23 12:26:52 +01003674 if (guestdbg_enabled(vcpu))
3675 kvm_s390_restore_guest_per_regs(vcpu);
3676
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003677 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3678 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003679
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003680 if (exit_reason == -EINTR) {
3681 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3682 sie_page = container_of(vcpu->arch.sie_block,
3683 struct sie_page, sie_block);
3684 mcck_info = &sie_page->mcck_info;
3685 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3686 return 0;
3687 }
3688
David Hildenbrand71f116b2015-10-19 16:24:28 +02003689 if (vcpu->arch.sie_block->icptcode > 0) {
3690 int rc = kvm_handle_sie_intercept(vcpu);
3691
3692 if (rc != -EOPNOTSUPP)
3693 return rc;
3694 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3695 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3696 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3697 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3698 return -EREMOTE;
3699 } else if (exit_reason != -EFAULT) {
3700 vcpu->stat.exit_null++;
3701 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003702 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3703 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3704 vcpu->run->s390_ucontrol.trans_exc_code =
3705 current->thread.gmap_addr;
3706 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003707 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003708 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003709 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003710 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003711 if (kvm_arch_setup_async_pf(vcpu))
3712 return 0;
3713 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003714 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003715 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003716}
3717
3718static int __vcpu_run(struct kvm_vcpu *vcpu)
3719{
3720 int rc, exit_reason;
3721
Thomas Huth800c1062013-09-12 10:33:45 +02003722 /*
3723 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3724 * ning the guest), so that memslots (and other stuff) are protected
3725 */
3726 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3727
Thomas Hutha76ccff2013-09-12 10:33:44 +02003728 do {
3729 rc = vcpu_pre_run(vcpu);
3730 if (rc)
3731 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003732
Thomas Huth800c1062013-09-12 10:33:45 +02003733 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003734 /*
3735 * As PF_VCPU will be used in fault handler, between
3736 * guest_enter and guest_exit should be no uaccess.
3737 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003738 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003739 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003740 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003741 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003742 exit_reason = sie64a(vcpu->arch.sie_block,
3743 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003744 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003745 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003746 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003747 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003748 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003749
Thomas Hutha76ccff2013-09-12 10:33:44 +02003750 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003751 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003752
Thomas Huth800c1062013-09-12 10:33:45 +02003753 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003754 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003755}
3756
David Hildenbrandb028ee32014-07-17 10:47:43 +02003757static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3758{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003759 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003760 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003761
3762 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003763 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003764 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3765 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3766 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3767 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3768 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3769 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003770 /* some control register changes require a tlb flush */
3771 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003772 }
3773 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003774 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003775 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3776 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3777 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3778 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3779 }
3780 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3781 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3782 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3783 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003784 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3785 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003786 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003787 /*
3788 * If userspace sets the riccb (e.g. after migration) to a valid state,
3789 * we should enable RI here instead of doing the lazy enablement.
3790 */
3791 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003792 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003793 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003794 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003795 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003796 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003797 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003798 /*
3799 * If userspace sets the gscb (e.g. after migration) to non-zero,
3800 * we should enable GS here instead of doing the lazy enablement.
3801 */
3802 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3803 test_kvm_facility(vcpu->kvm, 133) &&
3804 gscb->gssm &&
3805 !vcpu->arch.gs_enabled) {
3806 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3807 vcpu->arch.sie_block->ecb |= ECB_GS;
3808 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3809 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003810 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003811 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3812 test_kvm_facility(vcpu->kvm, 82)) {
3813 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3814 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3815 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003816 save_access_regs(vcpu->arch.host_acrs);
3817 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003818 /* save host (userspace) fprs/vrs */
3819 save_fpu_regs();
3820 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3821 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3822 if (MACHINE_HAS_VX)
3823 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3824 else
3825 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3826 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3827 if (test_fp_ctl(current->thread.fpu.fpc))
3828 /* User space provided an invalid FPC, let's clear it */
3829 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003830 if (MACHINE_HAS_GS) {
3831 preempt_disable();
3832 __ctl_set_bit(2, 4);
3833 if (current->thread.gs_cb) {
3834 vcpu->arch.host_gscb = current->thread.gs_cb;
3835 save_gs_cb(vcpu->arch.host_gscb);
3836 }
3837 if (vcpu->arch.gs_enabled) {
3838 current->thread.gs_cb = (struct gs_cb *)
3839 &vcpu->run->s.regs.gscb;
3840 restore_gs_cb(current->thread.gs_cb);
3841 }
3842 preempt_enable();
3843 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003844 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Fan Zhang80cd8762016-08-15 04:53:22 +02003845
David Hildenbrandb028ee32014-07-17 10:47:43 +02003846 kvm_run->kvm_dirty_regs = 0;
3847}
3848
3849static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3850{
3851 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3852 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3853 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3854 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003855 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003856 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3857 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3858 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3859 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3860 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3861 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3862 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003863 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003864 save_access_regs(vcpu->run->s.regs.acrs);
3865 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003866 /* Save guest register state */
3867 save_fpu_regs();
3868 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3869 /* Restore will be done lazily at return */
3870 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3871 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003872 if (MACHINE_HAS_GS) {
3873 __ctl_set_bit(2, 4);
3874 if (vcpu->arch.gs_enabled)
3875 save_gs_cb(current->thread.gs_cb);
3876 preempt_disable();
3877 current->thread.gs_cb = vcpu->arch.host_gscb;
3878 restore_gs_cb(vcpu->arch.host_gscb);
3879 preempt_enable();
3880 if (!vcpu->arch.host_gscb)
3881 __ctl_clear_bit(2, 4);
3882 vcpu->arch.host_gscb = NULL;
3883 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003884 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02003885}
3886
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003887int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3888{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003889 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003890
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003891 if (kvm_run->immediate_exit)
3892 return -EINTR;
3893
Christoffer Dallaccb7572017-12-04 21:35:25 +01003894 vcpu_load(vcpu);
3895
David Hildenbrand27291e22014-01-23 12:26:52 +01003896 if (guestdbg_exit_pending(vcpu)) {
3897 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01003898 rc = 0;
3899 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01003900 }
3901
Jan H. Schönherr20b70352017-11-24 22:39:01 +01003902 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003903
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003904 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3905 kvm_s390_vcpu_start(vcpu);
3906 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003907 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003908 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01003909 rc = -EINVAL;
3910 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003911 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003912
David Hildenbrandb028ee32014-07-17 10:47:43 +02003913 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003914 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003915
Heiko Carstensdab4079d2009-06-12 10:26:32 +02003916 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003917 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02003918
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003919 if (signal_pending(current) && !rc) {
3920 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003921 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003922 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003923
David Hildenbrand27291e22014-01-23 12:26:52 +01003924 if (guestdbg_exit_pending(vcpu) && !rc) {
3925 kvm_s390_prepare_debug_exit(vcpu);
3926 rc = 0;
3927 }
3928
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003929 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02003930 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003931 rc = 0;
3932 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003933
David Hildenbranddb0758b2016-02-15 09:42:25 +01003934 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003935 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003936
Jan H. Schönherr20b70352017-11-24 22:39:01 +01003937 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003938
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003939 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01003940out:
3941 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02003942 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003943}
3944
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003945/*
3946 * store status at address
3947 * we use have two special cases:
3948 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3949 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3950 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01003951int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003952{
Carsten Otte092670c2011-07-24 10:48:22 +02003953 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003954 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02003955 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01003956 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003957 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003958
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003959 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01003960 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
3961 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003962 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003963 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003964 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
3965 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003966 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003967 gpa = px;
3968 } else
3969 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003970
3971 /* manually convert vector registers if necessary */
3972 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01003973 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003974 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3975 fprs, 128);
3976 } else {
3977 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01003978 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003979 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003980 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003981 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003982 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003983 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003984 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02003985 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003986 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003987 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003988 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003989 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01003990 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003991 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01003992 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01003993 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003994 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003995 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003996 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003997 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003998 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003999 &vcpu->arch.sie_block->gcr, 128);
4000 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004001}
4002
Thomas Huthe8798922013-11-06 15:46:33 +01004003int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4004{
4005 /*
4006 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004007 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004008 * it into the save area
4009 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004010 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004011 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004012 save_access_regs(vcpu->run->s.regs.acrs);
4013
4014 return kvm_s390_store_status_unloaded(vcpu, addr);
4015}
4016
David Hildenbrand8ad35752014-03-14 11:00:21 +01004017static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4018{
4019 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004020 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004021}
4022
4023static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4024{
4025 unsigned int i;
4026 struct kvm_vcpu *vcpu;
4027
4028 kvm_for_each_vcpu(i, vcpu, kvm) {
4029 __disable_ibs_on_vcpu(vcpu);
4030 }
4031}
4032
4033static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4034{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004035 if (!sclp.has_ibs)
4036 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004037 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004038 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004039}
4040
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004041void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
4042{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004043 int i, online_vcpus, started_vcpus = 0;
4044
4045 if (!is_vcpu_stopped(vcpu))
4046 return;
4047
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004048 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004049 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004050 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004051 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4052
4053 for (i = 0; i < online_vcpus; i++) {
4054 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4055 started_vcpus++;
4056 }
4057
4058 if (started_vcpus == 0) {
4059 /* we're the only active VCPU -> speed it up */
4060 __enable_ibs_on_vcpu(vcpu);
4061 } else if (started_vcpus == 1) {
4062 /*
4063 * As we are starting a second VCPU, we have to disable
4064 * the IBS facility on all VCPUs to remove potentially
4065 * oustanding ENABLE requests.
4066 */
4067 __disable_ibs_on_all_vcpus(vcpu->kvm);
4068 }
4069
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004070 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004071 /*
4072 * Another VCPU might have used IBS while we were offline.
4073 * Let's play safe and flush the VCPU at startup.
4074 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004075 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004076 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004077 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004078}
4079
4080void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
4081{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004082 int i, online_vcpus, started_vcpus = 0;
4083 struct kvm_vcpu *started_vcpu = NULL;
4084
4085 if (is_vcpu_stopped(vcpu))
4086 return;
4087
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004088 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004089 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004090 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004091 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4092
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004093 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004094 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004095
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004096 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004097 __disable_ibs_on_vcpu(vcpu);
4098
4099 for (i = 0; i < online_vcpus; i++) {
4100 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4101 started_vcpus++;
4102 started_vcpu = vcpu->kvm->vcpus[i];
4103 }
4104 }
4105
4106 if (started_vcpus == 1) {
4107 /*
4108 * As we only have one VCPU left, we want to enable the
4109 * IBS facility for that VCPU to speed it up.
4110 */
4111 __enable_ibs_on_vcpu(started_vcpu);
4112 }
4113
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004114 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004115 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004116}
4117
Cornelia Huckd6712df2012-12-20 15:32:11 +01004118static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4119 struct kvm_enable_cap *cap)
4120{
4121 int r;
4122
4123 if (cap->flags)
4124 return -EINVAL;
4125
4126 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004127 case KVM_CAP_S390_CSS_SUPPORT:
4128 if (!vcpu->kvm->arch.css_support) {
4129 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004130 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004131 trace_kvm_s390_enable_css(vcpu->kvm);
4132 }
4133 r = 0;
4134 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004135 default:
4136 r = -EINVAL;
4137 break;
4138 }
4139 return r;
4140}
4141
Thomas Huth41408c282015-02-06 15:01:21 +01004142static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4143 struct kvm_s390_mem_op *mop)
4144{
4145 void __user *uaddr = (void __user *)mop->buf;
4146 void *tmpbuf = NULL;
4147 int r, srcu_idx;
4148 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4149 | KVM_S390_MEMOP_F_CHECK_ONLY;
4150
4151 if (mop->flags & ~supported_flags)
4152 return -EINVAL;
4153
4154 if (mop->size > MEM_OP_MAX_SIZE)
4155 return -E2BIG;
4156
4157 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4158 tmpbuf = vmalloc(mop->size);
4159 if (!tmpbuf)
4160 return -ENOMEM;
4161 }
4162
4163 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4164
4165 switch (mop->op) {
4166 case KVM_S390_MEMOP_LOGICAL_READ:
4167 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004168 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4169 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004170 break;
4171 }
4172 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4173 if (r == 0) {
4174 if (copy_to_user(uaddr, tmpbuf, mop->size))
4175 r = -EFAULT;
4176 }
4177 break;
4178 case KVM_S390_MEMOP_LOGICAL_WRITE:
4179 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004180 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4181 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004182 break;
4183 }
4184 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4185 r = -EFAULT;
4186 break;
4187 }
4188 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4189 break;
4190 default:
4191 r = -EINVAL;
4192 }
4193
4194 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4195
4196 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4197 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4198
4199 vfree(tmpbuf);
4200 return r;
4201}
4202
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004203long kvm_arch_vcpu_async_ioctl(struct file *filp,
4204 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004205{
4206 struct kvm_vcpu *vcpu = filp->private_data;
4207 void __user *argp = (void __user *)arg;
4208
Avi Kivity93736622010-05-13 12:35:17 +03004209 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004210 case KVM_S390_IRQ: {
4211 struct kvm_s390_irq s390irq;
4212
Jens Freimann47b43c52014-11-11 20:57:06 +01004213 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004214 return -EFAULT;
4215 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004216 }
Avi Kivity93736622010-05-13 12:35:17 +03004217 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004218 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02004219 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01004220
4221 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004222 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004223 if (s390int_to_s390irq(&s390int, &s390irq))
4224 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004225 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004226 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004227 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004228 return -ENOIOCTLCMD;
4229}
4230
4231long kvm_arch_vcpu_ioctl(struct file *filp,
4232 unsigned int ioctl, unsigned long arg)
4233{
4234 struct kvm_vcpu *vcpu = filp->private_data;
4235 void __user *argp = (void __user *)arg;
4236 int idx;
4237 long r;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004238
4239 vcpu_load(vcpu);
4240
4241 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004242 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004243 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004244 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004245 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004246 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004247 case KVM_S390_SET_INITIAL_PSW: {
4248 psw_t psw;
4249
Avi Kivitybc923cc2010-05-13 12:21:46 +03004250 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004251 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004252 break;
4253 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4254 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004255 }
4256 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03004257 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4258 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004259 case KVM_SET_ONE_REG:
4260 case KVM_GET_ONE_REG: {
4261 struct kvm_one_reg reg;
4262 r = -EFAULT;
4263 if (copy_from_user(&reg, argp, sizeof(reg)))
4264 break;
4265 if (ioctl == KVM_SET_ONE_REG)
4266 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4267 else
4268 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4269 break;
4270 }
Carsten Otte27e03932012-01-04 10:25:21 +01004271#ifdef CONFIG_KVM_S390_UCONTROL
4272 case KVM_S390_UCAS_MAP: {
4273 struct kvm_s390_ucas_mapping ucasmap;
4274
4275 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4276 r = -EFAULT;
4277 break;
4278 }
4279
4280 if (!kvm_is_ucontrol(vcpu->kvm)) {
4281 r = -EINVAL;
4282 break;
4283 }
4284
4285 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4286 ucasmap.vcpu_addr, ucasmap.length);
4287 break;
4288 }
4289 case KVM_S390_UCAS_UNMAP: {
4290 struct kvm_s390_ucas_mapping ucasmap;
4291
4292 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4293 r = -EFAULT;
4294 break;
4295 }
4296
4297 if (!kvm_is_ucontrol(vcpu->kvm)) {
4298 r = -EINVAL;
4299 break;
4300 }
4301
4302 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4303 ucasmap.length);
4304 break;
4305 }
4306#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004307 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004308 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004309 break;
4310 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004311 case KVM_ENABLE_CAP:
4312 {
4313 struct kvm_enable_cap cap;
4314 r = -EFAULT;
4315 if (copy_from_user(&cap, argp, sizeof(cap)))
4316 break;
4317 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4318 break;
4319 }
Thomas Huth41408c282015-02-06 15:01:21 +01004320 case KVM_S390_MEM_OP: {
4321 struct kvm_s390_mem_op mem_op;
4322
4323 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4324 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4325 else
4326 r = -EFAULT;
4327 break;
4328 }
Jens Freimann816c7662014-11-24 17:13:46 +01004329 case KVM_S390_SET_IRQ_STATE: {
4330 struct kvm_s390_irq_state irq_state;
4331
4332 r = -EFAULT;
4333 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4334 break;
4335 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4336 irq_state.len == 0 ||
4337 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4338 r = -EINVAL;
4339 break;
4340 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004341 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004342 r = kvm_s390_set_irq_state(vcpu,
4343 (void __user *) irq_state.buf,
4344 irq_state.len);
4345 break;
4346 }
4347 case KVM_S390_GET_IRQ_STATE: {
4348 struct kvm_s390_irq_state irq_state;
4349
4350 r = -EFAULT;
4351 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4352 break;
4353 if (irq_state.len == 0) {
4354 r = -EINVAL;
4355 break;
4356 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004357 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004358 r = kvm_s390_get_irq_state(vcpu,
4359 (__u8 __user *) irq_state.buf,
4360 irq_state.len);
4361 break;
4362 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004363 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004364 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004365 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004366
4367 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004368 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004369}
4370
Souptick Joarder1499fa82018-04-19 00:49:58 +05304371vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004372{
4373#ifdef CONFIG_KVM_S390_UCONTROL
4374 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4375 && (kvm_is_ucontrol(vcpu->kvm))) {
4376 vmf->page = virt_to_page(vcpu->arch.sie_block);
4377 get_page(vmf->page);
4378 return 0;
4379 }
4380#endif
4381 return VM_FAULT_SIGBUS;
4382}
4383
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05304384int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4385 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09004386{
4387 return 0;
4388}
4389
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004390/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004391int kvm_arch_prepare_memory_region(struct kvm *kvm,
4392 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004393 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004394 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004395{
Nick Wangdd2887e2013-03-25 17:22:57 +01004396 /* A few sanity checks. We can have memory slots which have to be
4397 located/ended at a segment boundary (1MB). The memory in userland is
4398 ok to be fragmented into various different vmas. It is okay to mmap()
4399 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004400
Carsten Otte598841c2011-07-24 10:48:21 +02004401 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004402 return -EINVAL;
4403
Carsten Otte598841c2011-07-24 10:48:21 +02004404 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004405 return -EINVAL;
4406
Dominik Dingela3a92c32014-12-01 17:24:42 +01004407 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4408 return -EINVAL;
4409
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004410 return 0;
4411}
4412
4413void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004414 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004415 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004416 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004417 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004418{
Carsten Ottef7850c92011-07-24 10:48:23 +02004419 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004420
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01004421 /* If the basics of the memslot do not change, we do not want
4422 * to update the gmap. Every update causes several unnecessary
4423 * segment translation exceptions. This is usually handled just
4424 * fine by the normal fault handler + gmap, but it will also
4425 * cause faults on the prefix page of running guest CPUs.
4426 */
4427 if (old->userspace_addr == mem->userspace_addr &&
4428 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
4429 old->npages * PAGE_SIZE == mem->memory_size)
4430 return;
Carsten Otte598841c2011-07-24 10:48:21 +02004431
4432 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4433 mem->guest_phys_addr, mem->memory_size);
4434 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004435 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02004436 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004437}
4438
Alexander Yarygin60a37702016-04-01 15:38:57 +03004439static inline unsigned long nonhyp_mask(int i)
4440{
4441 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4442
4443 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4444}
4445
Christian Borntraeger3491caf2016-05-13 12:16:35 +02004446void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4447{
4448 vcpu->valid_wakeup = false;
4449}
4450
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004451static int __init kvm_s390_init(void)
4452{
Alexander Yarygin60a37702016-04-01 15:38:57 +03004453 int i;
4454
David Hildenbrand07197fd2015-01-30 16:01:38 +01004455 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004456 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01004457 return -ENODEV;
4458 }
4459
Janosch Franka4499382018-07-13 11:28:31 +01004460 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004461 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01004462 return -EINVAL;
4463 }
4464
Alexander Yarygin60a37702016-04-01 15:38:57 +03004465 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00004466 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03004467 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4468
Michael Mueller9d8d5782015-02-02 15:42:51 +01004469 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004470}
4471
4472static void __exit kvm_s390_exit(void)
4473{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004474 kvm_exit();
4475}
4476
4477module_init(kvm_s390_init);
4478module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02004479
4480/*
4481 * Enable autoloading of the kvm module.
4482 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4483 * since x86 takes a different approach.
4484 */
4485#include <linux/miscdevice.h>
4486MODULE_ALIAS_MISCDEV(KVM_MINOR);
4487MODULE_ALIAS("devname:kvm");