blob: b298fcc9ec5674154e3bc04e5932675e618269da [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Christian Borntraegera37cb072018-01-23 13:28:40 +01005 * Copyright IBM Corp. 2008, 2018
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010047#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010048#include "gaccess.h"
49
Cornelia Huck5786fff2012-07-23 17:20:29 +020050#define CREATE_TRACE_POINTS
51#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020052#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020053
Thomas Huth41408c282015-02-06 15:01:21 +010054#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010055#define LOCAL_IRQS 32
56#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010058
Heiko Carstensb0c632d2008-03-25 18:47:20 +010059#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Christian Borntraegerccc40c52018-03-08 12:48:05 +000060#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061
62struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020064 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010065 { "exit_validity", VCPU_STAT(exit_validity) },
66 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 { "exit_external_request", VCPU_STAT(exit_external_request) },
Christian Borntraegera5e0ace2018-02-23 07:57:33 +000068 { "exit_io_request", VCPU_STAT(exit_io_request) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010069 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030071 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020074 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010075 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020076 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020077 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
Christian Borntraeger8b905d22019-03-05 05:30:02 -050078 { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020079 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020080 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010082 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000084 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
85 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010086 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020087 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010088 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000089 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010090 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
91 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
92 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000093 { "deliver_program", VCPU_STAT(deliver_program) },
94 { "deliver_io", VCPU_STAT(deliver_io) },
QingFeng Hao32de0742018-03-02 11:56:47 +010095 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010096 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000097 { "inject_ckc", VCPU_STAT(inject_ckc) },
98 { "inject_cputm", VCPU_STAT(inject_cputm) },
99 { "inject_external_call", VCPU_STAT(inject_external_call) },
100 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
101 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
102 { "inject_io", VM_STAT(inject_io) },
103 { "inject_mchk", VCPU_STAT(inject_mchk) },
104 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
105 { "inject_program", VCPU_STAT(inject_program) },
106 { "inject_restart", VCPU_STAT(inject_restart) },
107 { "inject_service_signal", VM_STAT(inject_service_signal) },
108 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
109 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
110 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
111 { "inject_virtio", VM_STAT(inject_virtio) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100112 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
113 { "instruction_gs", VCPU_STAT(instruction_gs) },
114 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
115 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
116 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200117 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100118 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100119 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100120 { "instruction_sck", VCPU_STAT(instruction_sck) },
121 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100122 { "instruction_spx", VCPU_STAT(instruction_spx) },
123 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
124 { "instruction_stap", VCPU_STAT(instruction_stap) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100125 { "instruction_iske", VCPU_STAT(instruction_iske) },
126 { "instruction_ri", VCPU_STAT(instruction_ri) },
127 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
128 { "instruction_sske", VCPU_STAT(instruction_sske) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100129 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200130 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100131 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
132 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100133 { "instruction_tb", VCPU_STAT(instruction_tb) },
134 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200135 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100136 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200137 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200138 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100139 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100140 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200141 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100142 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200143 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
144 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100145 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200146 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
147 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500148 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100149 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
150 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
151 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200152 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
153 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
154 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100155 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
156 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
157 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
158 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
159 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
160 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100161 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100162 { NULL }
163};
164
Collin L. Walling8fa16962016-07-26 15:29:44 -0400165struct kvm_s390_tod_clock_ext {
166 __u8 epoch_idx;
167 __u64 tod;
168 __u8 reserved[7];
169} __packed;
170
David Hildenbranda411edf2016-02-02 15:41:22 +0100171/* allow nested virtualization in KVM (if enabled by user space) */
172static int nested;
173module_param(nested, int, S_IRUGO);
174MODULE_PARM_DESC(nested, "Nested virtualization support");
175
Janosch Franka4499382018-07-13 11:28:31 +0100176/* allow 1m huge page guest backing, if !nested */
177static int hpage;
178module_param(hpage, int, 0444);
179MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100180
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500181/* maximum percentage of steal time for polling. >100 is treated like 100 */
182static u8 halt_poll_max_steal = 10;
183module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000184MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500185
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000186/*
187 * For now we handle at most 16 double words as this is what the s390 base
188 * kernel handles and stores in the prefix page. If we ever need to go beyond
189 * this, this requires changes to code, but the external uapi can stay.
190 */
191#define SIZE_INTERNAL 16
192
193/*
194 * Base feature mask that defines default mask for facilities. Consists of the
195 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
196 */
197static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
198/*
199 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
200 * and defines the facilities that can be enabled via a cpu model.
201 */
202static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
203
204static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200205{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000206 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
207 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
208 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
209 sizeof(S390_lowcore.stfle_fac_list));
210
211 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200212}
213
David Hildenbrand15c97052015-03-19 17:36:43 +0100214/* available cpu features supported by kvm */
215static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200216/* available subfunctions indicated via query / "test bit" */
217static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100218
Michael Mueller9d8d5782015-02-02 15:42:51 +0100219static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200220static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200221debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100222
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100223/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200224int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100225{
226 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200227 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100228}
229
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700230int kvm_arch_check_processor_compat(void)
231{
232 return 0;
233}
234
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100235static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
236 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200237
David Hildenbrand15757672018-02-07 12:46:45 +0100238static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
239{
240 u8 delta_idx = 0;
241
242 /*
243 * The TOD jumps by delta, we have to compensate this by adding
244 * -delta to the epoch.
245 */
246 delta = -delta;
247
248 /* sign-extension - we're adding to signed values below */
249 if ((s64)delta < 0)
250 delta_idx = -1;
251
252 scb->epoch += delta;
253 if (scb->ecd & ECD_MEF) {
254 scb->epdx += delta_idx;
255 if (scb->epoch < delta)
256 scb->epdx += 1;
257 }
258}
259
Fan Zhangfdf03652015-05-13 10:58:41 +0200260/*
261 * This callback is executed during stop_machine(). All CPUs are therefore
262 * temporarily stopped. In order not to change guest behavior, we have to
263 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
264 * so a CPU won't be stopped while calculating with the epoch.
265 */
266static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
267 void *v)
268{
269 struct kvm *kvm;
270 struct kvm_vcpu *vcpu;
271 int i;
272 unsigned long long *delta = v;
273
274 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200275 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100276 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
277 if (i == 0) {
278 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
279 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
280 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100281 if (vcpu->arch.cputm_enabled)
282 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100283 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100284 kvm_clock_sync_scb(vcpu->arch.vsie_block,
285 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200286 }
287 }
288 return NOTIFY_OK;
289}
290
291static struct notifier_block kvm_clock_notifier = {
292 .notifier_call = kvm_clock_sync,
293};
294
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100295int kvm_arch_hardware_setup(void)
296{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200297 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100298 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200299 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
300 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200301 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
302 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100303 return 0;
304}
305
306void kvm_arch_hardware_unsetup(void)
307{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100308 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200309 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200310 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
311 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100312}
313
David Hildenbrand22be5a12016-01-21 13:22:54 +0100314static void allow_cpu_feat(unsigned long nr)
315{
316 set_bit_inv(nr, kvm_s390_available_cpu_feat);
317}
318
David Hildenbrand0a763c72016-05-18 16:03:47 +0200319static inline int plo_test_bit(unsigned char nr)
320{
321 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100322 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200323
324 asm volatile(
325 /* Parameter registers are ignored for "test bit" */
326 " plo 0,0,0,0(0)\n"
327 " ipm %0\n"
328 " srl %0,28\n"
329 : "=d" (cc)
330 : "d" (r0)
331 : "cc");
332 return cc == 0;
333}
334
Christian Borntraegerd6681392019-02-20 03:04:07 -0500335static inline void __insn32_query(unsigned int opcode, u8 query[32])
336{
337 register unsigned long r0 asm("0") = 0; /* query function */
338 register unsigned long r1 asm("1") = (unsigned long) query;
339
340 asm volatile(
341 /* Parameter regs are ignored */
342 " .insn rrf,%[opc] << 16,2,4,6,0\n"
343 : "=m" (*query)
344 : "d" (r0), "a" (r1), [opc] "i" (opcode)
345 : "cc");
346}
347
Christian Borntraeger173aec22018-12-28 10:59:06 +0100348#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100349#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100350
David Hildenbrand22be5a12016-01-21 13:22:54 +0100351static void kvm_s390_cpu_feat_init(void)
352{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200353 int i;
354
355 for (i = 0; i < 256; ++i) {
356 if (plo_test_bit(i))
357 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
358 }
359
360 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400361 ptff(kvm_s390_available_subfunc.ptff,
362 sizeof(kvm_s390_available_subfunc.ptff),
363 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200364
365 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200366 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
367 kvm_s390_available_subfunc.kmac);
368 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
369 kvm_s390_available_subfunc.kmc);
370 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
371 kvm_s390_available_subfunc.km);
372 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
373 kvm_s390_available_subfunc.kimd);
374 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
375 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200376 }
377 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200378 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
379 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200380 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200381 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
382 kvm_s390_available_subfunc.kmctr);
383 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
384 kvm_s390_available_subfunc.kmf);
385 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
386 kvm_s390_available_subfunc.kmo);
387 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
388 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200389 }
390 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100391 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200392 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200393
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400394 if (test_facility(146)) /* MSA8 */
395 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
396 kvm_s390_available_subfunc.kma);
397
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100398 if (test_facility(155)) /* MSA9 */
399 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
400 kvm_s390_available_subfunc.kdsa);
401
Christian Borntraeger173aec22018-12-28 10:59:06 +0100402 if (test_facility(150)) /* SORTL */
403 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
404
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100405 if (test_facility(151)) /* DFLTCC */
406 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
407
David Hildenbrand22be5a12016-01-21 13:22:54 +0100408 if (MACHINE_HAS_ESOP)
409 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200410 /*
411 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
412 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
413 */
414 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100415 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200416 return;
417 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100418 if (sclp.has_64bscao)
419 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100420 if (sclp.has_siif)
421 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100422 if (sclp.has_gpere)
423 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100424 if (sclp.has_gsls)
425 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100426 if (sclp.has_ib)
427 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100428 if (sclp.has_cei)
429 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100430 if (sclp.has_ibs)
431 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500432 if (sclp.has_kss)
433 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200434 /*
435 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
436 * all skey handling functions read/set the skey from the PGSTE
437 * instead of the real storage key.
438 *
439 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
440 * pages being detected as preserved although they are resident.
441 *
442 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
443 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
444 *
445 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
446 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
447 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
448 *
449 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
450 * cannot easily shadow the SCA because of the ipte lock.
451 */
David Hildenbrand22be5a12016-01-21 13:22:54 +0100452}
453
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100454int kvm_arch_init(void *opaque)
455{
Michael Mueller308c3e62018-11-30 15:32:06 +0100456 int rc;
457
Christian Borntraeger78f26132015-07-22 15:50:58 +0200458 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
459 if (!kvm_s390_dbf)
460 return -ENOMEM;
461
462 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
Michael Mueller308c3e62018-11-30 15:32:06 +0100463 rc = -ENOMEM;
464 goto out_debug_unreg;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200465 }
466
David Hildenbrand22be5a12016-01-21 13:22:54 +0100467 kvm_s390_cpu_feat_init();
468
Cornelia Huck84877d92014-09-02 10:27:35 +0100469 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100470 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
471 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100472 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Michael Mueller308c3e62018-11-30 15:32:06 +0100473 goto out_debug_unreg;
474 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100475
476 rc = kvm_s390_gib_init(GAL_ISC);
477 if (rc)
478 goto out_gib_destroy;
479
Michael Mueller308c3e62018-11-30 15:32:06 +0100480 return 0;
481
Michael Muellerb1d1e762019-01-31 09:52:45 +0100482out_gib_destroy:
483 kvm_s390_gib_destroy();
Michael Mueller308c3e62018-11-30 15:32:06 +0100484out_debug_unreg:
485 debug_unregister(kvm_s390_dbf);
486 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100487}
488
Christian Borntraeger78f26132015-07-22 15:50:58 +0200489void kvm_arch_exit(void)
490{
Michael Mueller1282c212019-01-31 09:52:40 +0100491 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200492 debug_unregister(kvm_s390_dbf);
493}
494
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100495/* Section: device related */
496long kvm_arch_dev_ioctl(struct file *filp,
497 unsigned int ioctl, unsigned long arg)
498{
499 if (ioctl == KVM_S390_ENABLE_SIE)
500 return s390_enable_sie();
501 return -EINVAL;
502}
503
Alexander Graf784aa3d2014-07-14 18:27:35 +0200504int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100505{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100506 int r;
507
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200508 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100509 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200510 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100511 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100512#ifdef CONFIG_KVM_S390_UCONTROL
513 case KVM_CAP_S390_UCONTROL:
514#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200515 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100516 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200517 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100518 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100519 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100520 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200521 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200522 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200523 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200524 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100525 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100526 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200527 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100528 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400529 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100530 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200531 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200532 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100533 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100534 case KVM_CAP_S390_AIS_MIGRATION:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100535 r = 1;
536 break;
Janosch Franka4499382018-07-13 11:28:31 +0100537 case KVM_CAP_S390_HPAGE_1M:
538 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100539 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100540 r = 1;
541 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100542 case KVM_CAP_S390_MEM_OP:
543 r = MEM_OP_MAX_SIZE;
544 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200545 case KVM_CAP_NR_VCPUS:
546 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200547 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100548 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200549 if (!kvm_s390_use_sca_entries())
550 r = KVM_MAX_VCPUS;
551 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100552 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200553 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200554 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100555 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200556 break;
Eric Farman68c55752014-06-09 10:57:26 -0400557 case KVM_CAP_S390_VECTOR_REGISTERS:
558 r = MACHINE_HAS_VX;
559 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800560 case KVM_CAP_S390_RI:
561 r = test_facility(64);
562 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100563 case KVM_CAP_S390_GS:
564 r = test_facility(133);
565 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100566 case KVM_CAP_S390_BPB:
567 r = test_facility(82);
568 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200569 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100570 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200571 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100572 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100573}
574
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400575static void kvm_s390_sync_dirty_log(struct kvm *kvm,
Janosch Frank0959e162018-07-17 13:21:22 +0100576 struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400577{
Janosch Frank0959e162018-07-17 13:21:22 +0100578 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400579 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100580 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400581 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100582 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400583
Janosch Frank0959e162018-07-17 13:21:22 +0100584 /* Loop over all guest segments */
585 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400586 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100587 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
588 gaddr = gfn_to_gpa(cur_gfn);
589 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
590 if (kvm_is_error_hva(vmaddr))
591 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400592
Janosch Frank0959e162018-07-17 13:21:22 +0100593 bitmap_zero(bitmap, _PAGE_ENTRIES);
594 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
595 for (i = 0; i < _PAGE_ENTRIES; i++) {
596 if (test_bit(i, bitmap))
597 mark_page_dirty(kvm, cur_gfn + i);
598 }
599
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100600 if (fatal_signal_pending(current))
601 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100602 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400603 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400604}
605
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100606/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200607static void sca_del_vcpu(struct kvm_vcpu *vcpu);
608
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100609/*
610 * Get (and clear) the dirty memory log for a memory slot.
611 */
612int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
613 struct kvm_dirty_log *log)
614{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400615 int r;
616 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200617 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400618 struct kvm_memory_slot *memslot;
619 int is_dirty = 0;
620
Janosch Franke1e8a962017-02-02 16:39:31 +0100621 if (kvm_is_ucontrol(kvm))
622 return -EINVAL;
623
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400624 mutex_lock(&kvm->slots_lock);
625
626 r = -EINVAL;
627 if (log->slot >= KVM_USER_MEM_SLOTS)
628 goto out;
629
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200630 slots = kvm_memslots(kvm);
631 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400632 r = -ENOENT;
633 if (!memslot->dirty_bitmap)
634 goto out;
635
636 kvm_s390_sync_dirty_log(kvm, memslot);
637 r = kvm_get_dirty_log(kvm, log, &is_dirty);
638 if (r)
639 goto out;
640
641 /* Clear the dirty log */
642 if (is_dirty) {
643 n = kvm_dirty_bitmap_bytes(memslot);
644 memset(memslot->dirty_bitmap, 0, n);
645 }
646 r = 0;
647out:
648 mutex_unlock(&kvm->slots_lock);
649 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100650}
651
David Hildenbrand6502a342016-06-21 14:19:51 +0200652static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
653{
654 unsigned int i;
655 struct kvm_vcpu *vcpu;
656
657 kvm_for_each_vcpu(i, vcpu, kvm) {
658 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
659 }
660}
661
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100662int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200663{
664 int r;
665
666 if (cap->flags)
667 return -EINVAL;
668
669 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200670 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200671 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200672 kvm->arch.use_irqchip = 1;
673 r = 0;
674 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200675 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200676 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200677 kvm->arch.user_sigp = 1;
678 r = 0;
679 break;
Eric Farman68c55752014-06-09 10:57:26 -0400680 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100681 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200682 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100683 r = -EBUSY;
684 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100685 set_kvm_facility(kvm->arch.model.fac_mask, 129);
686 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200687 if (test_facility(134)) {
688 set_kvm_facility(kvm->arch.model.fac_mask, 134);
689 set_kvm_facility(kvm->arch.model.fac_list, 134);
690 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100691 if (test_facility(135)) {
692 set_kvm_facility(kvm->arch.model.fac_mask, 135);
693 set_kvm_facility(kvm->arch.model.fac_list, 135);
694 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100695 if (test_facility(148)) {
696 set_kvm_facility(kvm->arch.model.fac_mask, 148);
697 set_kvm_facility(kvm->arch.model.fac_list, 148);
698 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100699 if (test_facility(152)) {
700 set_kvm_facility(kvm->arch.model.fac_mask, 152);
701 set_kvm_facility(kvm->arch.model.fac_list, 152);
702 }
Michael Mueller18280d82015-03-16 16:05:41 +0100703 r = 0;
704 } else
705 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100706 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200707 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
708 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400709 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800710 case KVM_CAP_S390_RI:
711 r = -EINVAL;
712 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200713 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800714 r = -EBUSY;
715 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100716 set_kvm_facility(kvm->arch.model.fac_mask, 64);
717 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800718 r = 0;
719 }
720 mutex_unlock(&kvm->lock);
721 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
722 r ? "(not available)" : "(success)");
723 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100724 case KVM_CAP_S390_AIS:
725 mutex_lock(&kvm->lock);
726 if (kvm->created_vcpus) {
727 r = -EBUSY;
728 } else {
729 set_kvm_facility(kvm->arch.model.fac_mask, 72);
730 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100731 r = 0;
732 }
733 mutex_unlock(&kvm->lock);
734 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
735 r ? "(not available)" : "(success)");
736 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100737 case KVM_CAP_S390_GS:
738 r = -EINVAL;
739 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100740 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100741 r = -EBUSY;
742 } else if (test_facility(133)) {
743 set_kvm_facility(kvm->arch.model.fac_mask, 133);
744 set_kvm_facility(kvm->arch.model.fac_list, 133);
745 r = 0;
746 }
747 mutex_unlock(&kvm->lock);
748 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
749 r ? "(not available)" : "(success)");
750 break;
Janosch Franka4499382018-07-13 11:28:31 +0100751 case KVM_CAP_S390_HPAGE_1M:
752 mutex_lock(&kvm->lock);
753 if (kvm->created_vcpus)
754 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100755 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100756 r = -EINVAL;
757 else {
758 r = 0;
Janosch Frankdf88f312018-08-30 16:14:18 +0200759 down_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100760 kvm->mm->context.allow_gmap_hpage_1m = 1;
Janosch Frankdf88f312018-08-30 16:14:18 +0200761 up_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100762 /*
763 * We might have to create fake 4k page
764 * tables. To avoid that the hardware works on
765 * stale PGSTEs, we emulate these instructions.
766 */
767 kvm->arch.use_skf = 0;
768 kvm->arch.use_pfmfi = 0;
769 }
770 mutex_unlock(&kvm->lock);
771 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
772 r ? "(not available)" : "(success)");
773 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100774 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200775 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100776 kvm->arch.user_stsi = 1;
777 r = 0;
778 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200779 case KVM_CAP_S390_USER_INSTR0:
780 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
781 kvm->arch.user_instr0 = 1;
782 icpt_operexc_on_all_vcpus(kvm);
783 r = 0;
784 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200785 default:
786 r = -EINVAL;
787 break;
788 }
789 return r;
790}
791
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100792static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
793{
794 int ret;
795
796 switch (attr->attr) {
797 case KVM_S390_VM_MEM_LIMIT_SIZE:
798 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200799 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100800 kvm->arch.mem_limit);
801 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100802 ret = -EFAULT;
803 break;
804 default:
805 ret = -ENXIO;
806 break;
807 }
808 return ret;
809}
810
811static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200812{
813 int ret;
814 unsigned int idx;
815 switch (attr->attr) {
816 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100817 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100818 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200819 break;
820
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200821 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200822 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100823 if (kvm->created_vcpus)
824 ret = -EBUSY;
825 else if (kvm->mm->context.allow_gmap_hpage_1m)
826 ret = -EINVAL;
827 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200828 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100829 /* Not compatible with cmma. */
830 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200831 ret = 0;
832 }
833 mutex_unlock(&kvm->lock);
834 break;
835 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100836 ret = -ENXIO;
837 if (!sclp.has_cmma)
838 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200839 ret = -EINVAL;
840 if (!kvm->arch.use_cmma)
841 break;
842
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200843 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200844 mutex_lock(&kvm->lock);
845 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200846 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200847 srcu_read_unlock(&kvm->srcu, idx);
848 mutex_unlock(&kvm->lock);
849 ret = 0;
850 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100851 case KVM_S390_VM_MEM_LIMIT_SIZE: {
852 unsigned long new_limit;
853
854 if (kvm_is_ucontrol(kvm))
855 return -EINVAL;
856
857 if (get_user(new_limit, (u64 __user *)attr->addr))
858 return -EFAULT;
859
Dominik Dingela3a92c32014-12-01 17:24:42 +0100860 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
861 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100862 return -E2BIG;
863
Dominik Dingela3a92c32014-12-01 17:24:42 +0100864 if (!new_limit)
865 return -EINVAL;
866
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100867 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100868 if (new_limit != KVM_S390_NO_MEM_LIMIT)
869 new_limit -= 1;
870
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100871 ret = -EBUSY;
872 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200873 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100874 /* gmap_create will round the limit up */
875 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100876
877 if (!new) {
878 ret = -ENOMEM;
879 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100880 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100881 new->private = kvm;
882 kvm->arch.gmap = new;
883 ret = 0;
884 }
885 }
886 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100887 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
888 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
889 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100890 break;
891 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200892 default:
893 ret = -ENXIO;
894 break;
895 }
896 return ret;
897}
898
Tony Krowiaka374e892014-09-03 10:13:53 +0200899static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
900
Tony Krowiak20c922f2018-04-22 11:37:03 -0400901void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200902{
903 struct kvm_vcpu *vcpu;
904 int i;
905
Tony Krowiak20c922f2018-04-22 11:37:03 -0400906 kvm_s390_vcpu_block_all(kvm);
907
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400908 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400909 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400910 /* recreate the shadow crycb by leaving the VSIE handler */
911 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
912 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400913
914 kvm_s390_vcpu_unblock_all(kvm);
915}
916
917static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
918{
Tony Krowiaka374e892014-09-03 10:13:53 +0200919 mutex_lock(&kvm->lock);
920 switch (attr->attr) {
921 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200922 if (!test_kvm_facility(kvm, 76)) {
923 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400924 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200925 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200926 get_random_bytes(
927 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
928 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
929 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200930 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200931 break;
932 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200933 if (!test_kvm_facility(kvm, 76)) {
934 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400935 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200936 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200937 get_random_bytes(
938 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
939 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
940 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200941 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200942 break;
943 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200944 if (!test_kvm_facility(kvm, 76)) {
945 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400946 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200947 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200948 kvm->arch.crypto.aes_kw = 0;
949 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
950 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200951 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200952 break;
953 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200954 if (!test_kvm_facility(kvm, 76)) {
955 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400956 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200957 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200958 kvm->arch.crypto.dea_kw = 0;
959 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
960 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200961 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200962 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400963 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
964 if (!ap_instructions_available()) {
965 mutex_unlock(&kvm->lock);
966 return -EOPNOTSUPP;
967 }
968 kvm->arch.crypto.apie = 1;
969 break;
970 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
971 if (!ap_instructions_available()) {
972 mutex_unlock(&kvm->lock);
973 return -EOPNOTSUPP;
974 }
975 kvm->arch.crypto.apie = 0;
976 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200977 default:
978 mutex_unlock(&kvm->lock);
979 return -ENXIO;
980 }
981
Tony Krowiak20c922f2018-04-22 11:37:03 -0400982 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200983 mutex_unlock(&kvm->lock);
984 return 0;
985}
986
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200987static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
988{
989 int cx;
990 struct kvm_vcpu *vcpu;
991
992 kvm_for_each_vcpu(cx, vcpu, kvm)
993 kvm_s390_sync_request(req, vcpu);
994}
995
996/*
997 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100998 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200999 */
1000static int kvm_s390_vm_start_migration(struct kvm *kvm)
1001{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001002 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001003 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001004 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001005 int slotnr;
1006
1007 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001008 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001009 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001010 slots = kvm_memslots(kvm);
1011 if (!slots || !slots->used_slots)
1012 return -EINVAL;
1013
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001014 if (!kvm->arch.use_cmma) {
1015 kvm->arch.migration_mode = 1;
1016 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001017 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001018 /* mark all the pages in active slots as dirty */
1019 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1020 ms = slots->memslots + slotnr;
Igor Mammedov13a17cc2019-09-11 03:52:18 -04001021 if (!ms->dirty_bitmap)
1022 return -EINVAL;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001023 /*
1024 * The second half of the bitmap is only used on x86,
1025 * and would be wasted otherwise, so we put it to good
1026 * use here to keep track of the state of the storage
1027 * attributes.
1028 */
1029 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1030 ram_pages += ms->npages;
1031 }
1032 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1033 kvm->arch.migration_mode = 1;
1034 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001035 return 0;
1036}
1037
1038/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001039 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001040 * kvm_s390_vm_start_migration.
1041 */
1042static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1043{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001044 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001045 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001046 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001047 kvm->arch.migration_mode = 0;
1048 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001049 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001050 return 0;
1051}
1052
1053static int kvm_s390_vm_set_migration(struct kvm *kvm,
1054 struct kvm_device_attr *attr)
1055{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001056 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001057
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001058 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001059 switch (attr->attr) {
1060 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001061 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001062 break;
1063 case KVM_S390_VM_MIGRATION_STOP:
1064 res = kvm_s390_vm_stop_migration(kvm);
1065 break;
1066 default:
1067 break;
1068 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001069 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001070
1071 return res;
1072}
1073
1074static int kvm_s390_vm_get_migration(struct kvm *kvm,
1075 struct kvm_device_attr *attr)
1076{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001077 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001078
1079 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1080 return -ENXIO;
1081
1082 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1083 return -EFAULT;
1084 return 0;
1085}
1086
Collin L. Walling8fa16962016-07-26 15:29:44 -04001087static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1088{
1089 struct kvm_s390_vm_tod_clock gtod;
1090
1091 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1092 return -EFAULT;
1093
David Hildenbrand0e7def52018-02-07 12:46:43 +01001094 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001095 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001096 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001097
1098 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1099 gtod.epoch_idx, gtod.tod);
1100
1101 return 0;
1102}
1103
Jason J. Herne72f25022014-11-25 09:46:02 -05001104static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1105{
1106 u8 gtod_high;
1107
1108 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1109 sizeof(gtod_high)))
1110 return -EFAULT;
1111
1112 if (gtod_high != 0)
1113 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001114 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001115
1116 return 0;
1117}
1118
1119static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1120{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001121 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001122
David Hildenbrand0e7def52018-02-07 12:46:43 +01001123 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1124 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001125 return -EFAULT;
1126
David Hildenbrand0e7def52018-02-07 12:46:43 +01001127 kvm_s390_set_tod_clock(kvm, &gtod);
1128 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001129 return 0;
1130}
1131
1132static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1133{
1134 int ret;
1135
1136 if (attr->flags)
1137 return -EINVAL;
1138
1139 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001140 case KVM_S390_VM_TOD_EXT:
1141 ret = kvm_s390_set_tod_ext(kvm, attr);
1142 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001143 case KVM_S390_VM_TOD_HIGH:
1144 ret = kvm_s390_set_tod_high(kvm, attr);
1145 break;
1146 case KVM_S390_VM_TOD_LOW:
1147 ret = kvm_s390_set_tod_low(kvm, attr);
1148 break;
1149 default:
1150 ret = -ENXIO;
1151 break;
1152 }
1153 return ret;
1154}
1155
David Hildenbrand33d1b272018-04-27 14:36:13 +02001156static void kvm_s390_get_tod_clock(struct kvm *kvm,
1157 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001158{
1159 struct kvm_s390_tod_clock_ext htod;
1160
1161 preempt_disable();
1162
1163 get_tod_clock_ext((char *)&htod);
1164
1165 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001166 gtod->epoch_idx = 0;
1167 if (test_kvm_facility(kvm, 139)) {
1168 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1169 if (gtod->tod < htod.tod)
1170 gtod->epoch_idx += 1;
1171 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001172
1173 preempt_enable();
1174}
1175
1176static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1177{
1178 struct kvm_s390_vm_tod_clock gtod;
1179
1180 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001181 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001182 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1183 return -EFAULT;
1184
1185 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1186 gtod.epoch_idx, gtod.tod);
1187 return 0;
1188}
1189
Jason J. Herne72f25022014-11-25 09:46:02 -05001190static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1191{
1192 u8 gtod_high = 0;
1193
1194 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1195 sizeof(gtod_high)))
1196 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001197 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001198
1199 return 0;
1200}
1201
1202static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1203{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001204 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001205
David Hildenbrand60417fc2015-09-29 16:20:36 +02001206 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001207 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1208 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001209 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001210
1211 return 0;
1212}
1213
1214static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1215{
1216 int ret;
1217
1218 if (attr->flags)
1219 return -EINVAL;
1220
1221 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001222 case KVM_S390_VM_TOD_EXT:
1223 ret = kvm_s390_get_tod_ext(kvm, attr);
1224 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001225 case KVM_S390_VM_TOD_HIGH:
1226 ret = kvm_s390_get_tod_high(kvm, attr);
1227 break;
1228 case KVM_S390_VM_TOD_LOW:
1229 ret = kvm_s390_get_tod_low(kvm, attr);
1230 break;
1231 default:
1232 ret = -ENXIO;
1233 break;
1234 }
1235 return ret;
1236}
1237
Michael Mueller658b6ed2015-02-02 15:49:35 +01001238static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1239{
1240 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001241 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001242 int ret = 0;
1243
1244 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001245 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001246 ret = -EBUSY;
1247 goto out;
1248 }
1249 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1250 if (!proc) {
1251 ret = -ENOMEM;
1252 goto out;
1253 }
1254 if (!copy_from_user(proc, (void __user *)attr->addr,
1255 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001256 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001257 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1258 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001259 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001260 if (proc->ibc > unblocked_ibc)
1261 kvm->arch.model.ibc = unblocked_ibc;
1262 else if (proc->ibc < lowest_ibc)
1263 kvm->arch.model.ibc = lowest_ibc;
1264 else
1265 kvm->arch.model.ibc = proc->ibc;
1266 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001267 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001268 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001269 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1270 kvm->arch.model.ibc,
1271 kvm->arch.model.cpuid);
1272 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1273 kvm->arch.model.fac_list[0],
1274 kvm->arch.model.fac_list[1],
1275 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001276 } else
1277 ret = -EFAULT;
1278 kfree(proc);
1279out:
1280 mutex_unlock(&kvm->lock);
1281 return ret;
1282}
1283
David Hildenbrand15c97052015-03-19 17:36:43 +01001284static int kvm_s390_set_processor_feat(struct kvm *kvm,
1285 struct kvm_device_attr *attr)
1286{
1287 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001288
1289 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1290 return -EFAULT;
1291 if (!bitmap_subset((unsigned long *) data.feat,
1292 kvm_s390_available_cpu_feat,
1293 KVM_S390_VM_CPU_FEAT_NR_BITS))
1294 return -EINVAL;
1295
1296 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001297 if (kvm->created_vcpus) {
1298 mutex_unlock(&kvm->lock);
1299 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001300 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001301 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1302 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001303 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001304 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1305 data.feat[0],
1306 data.feat[1],
1307 data.feat[2]);
1308 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001309}
1310
David Hildenbrand0a763c72016-05-18 16:03:47 +02001311static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1312 struct kvm_device_attr *attr)
1313{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001314 mutex_lock(&kvm->lock);
1315 if (kvm->created_vcpus) {
1316 mutex_unlock(&kvm->lock);
1317 return -EBUSY;
1318 }
1319
1320 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1321 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1322 mutex_unlock(&kvm->lock);
1323 return -EFAULT;
1324 }
1325 mutex_unlock(&kvm->lock);
1326
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001327 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1328 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1329 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1330 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1331 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1332 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1333 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1334 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1335 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1336 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1337 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1338 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1339 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1340 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1341 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1342 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1343 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1344 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1345 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1346 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1347 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1348 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1349 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1350 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1351 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1352 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1353 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1354 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1355 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1356 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1357 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1358 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1359 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1360 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1361 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1362 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1363 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1364 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1365 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1366 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1367 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1368 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1369 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1370 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001371 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1372 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1373 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001374 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1375 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1376 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1377 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1378 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001379 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1380 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1381 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1382 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1383 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001384
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001385 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001386}
1387
Michael Mueller658b6ed2015-02-02 15:49:35 +01001388static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1389{
1390 int ret = -ENXIO;
1391
1392 switch (attr->attr) {
1393 case KVM_S390_VM_CPU_PROCESSOR:
1394 ret = kvm_s390_set_processor(kvm, attr);
1395 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001396 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1397 ret = kvm_s390_set_processor_feat(kvm, attr);
1398 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001399 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1400 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1401 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001402 }
1403 return ret;
1404}
1405
1406static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1407{
1408 struct kvm_s390_vm_cpu_processor *proc;
1409 int ret = 0;
1410
1411 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1412 if (!proc) {
1413 ret = -ENOMEM;
1414 goto out;
1415 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001416 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001417 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001418 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1419 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001420 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1421 kvm->arch.model.ibc,
1422 kvm->arch.model.cpuid);
1423 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1424 kvm->arch.model.fac_list[0],
1425 kvm->arch.model.fac_list[1],
1426 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001427 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1428 ret = -EFAULT;
1429 kfree(proc);
1430out:
1431 return ret;
1432}
1433
1434static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1435{
1436 struct kvm_s390_vm_cpu_machine *mach;
1437 int ret = 0;
1438
1439 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1440 if (!mach) {
1441 ret = -ENOMEM;
1442 goto out;
1443 }
1444 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001445 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001446 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001447 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001448 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001449 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001450 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1451 kvm->arch.model.ibc,
1452 kvm->arch.model.cpuid);
1453 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1454 mach->fac_mask[0],
1455 mach->fac_mask[1],
1456 mach->fac_mask[2]);
1457 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1458 mach->fac_list[0],
1459 mach->fac_list[1],
1460 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001461 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1462 ret = -EFAULT;
1463 kfree(mach);
1464out:
1465 return ret;
1466}
1467
David Hildenbrand15c97052015-03-19 17:36:43 +01001468static int kvm_s390_get_processor_feat(struct kvm *kvm,
1469 struct kvm_device_attr *attr)
1470{
1471 struct kvm_s390_vm_cpu_feat data;
1472
1473 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1474 KVM_S390_VM_CPU_FEAT_NR_BITS);
1475 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1476 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001477 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1478 data.feat[0],
1479 data.feat[1],
1480 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001481 return 0;
1482}
1483
1484static int kvm_s390_get_machine_feat(struct kvm *kvm,
1485 struct kvm_device_attr *attr)
1486{
1487 struct kvm_s390_vm_cpu_feat data;
1488
1489 bitmap_copy((unsigned long *) data.feat,
1490 kvm_s390_available_cpu_feat,
1491 KVM_S390_VM_CPU_FEAT_NR_BITS);
1492 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1493 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001494 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1495 data.feat[0],
1496 data.feat[1],
1497 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001498 return 0;
1499}
1500
David Hildenbrand0a763c72016-05-18 16:03:47 +02001501static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1502 struct kvm_device_attr *attr)
1503{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001504 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1505 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1506 return -EFAULT;
1507
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001508 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1509 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1510 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1511 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1512 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1513 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1514 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1515 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1516 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1517 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1518 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1519 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1520 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1521 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1522 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1523 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1524 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1525 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1526 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1527 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1528 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1529 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1530 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1531 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1532 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1533 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1534 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1535 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1536 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1537 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1538 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1539 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1540 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1541 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1542 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1543 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1544 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1545 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1546 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1547 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1548 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1549 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1550 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1551 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001552 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1553 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1554 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001555 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1556 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1557 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1558 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1559 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001560 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1561 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1562 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1563 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1564 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001565
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001566 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001567}
1568
1569static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1570 struct kvm_device_attr *attr)
1571{
1572 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1573 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1574 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001575
1576 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1577 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1578 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1579 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1580 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1581 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1582 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1583 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1584 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1585 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1586 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1587 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1588 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1589 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1590 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1591 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1592 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1593 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1594 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1595 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1596 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1597 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1598 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1599 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1600 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1601 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1602 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1603 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1604 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1605 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1606 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1607 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1608 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1609 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1610 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1611 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1612 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1613 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1614 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1615 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1616 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1617 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1618 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1619 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001620 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1621 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1622 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001623 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1624 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1625 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1626 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1627 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001628 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1629 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1630 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1631 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1632 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001633
David Hildenbrand0a763c72016-05-18 16:03:47 +02001634 return 0;
1635}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001636
Michael Mueller658b6ed2015-02-02 15:49:35 +01001637static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1638{
1639 int ret = -ENXIO;
1640
1641 switch (attr->attr) {
1642 case KVM_S390_VM_CPU_PROCESSOR:
1643 ret = kvm_s390_get_processor(kvm, attr);
1644 break;
1645 case KVM_S390_VM_CPU_MACHINE:
1646 ret = kvm_s390_get_machine(kvm, attr);
1647 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001648 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1649 ret = kvm_s390_get_processor_feat(kvm, attr);
1650 break;
1651 case KVM_S390_VM_CPU_MACHINE_FEAT:
1652 ret = kvm_s390_get_machine_feat(kvm, attr);
1653 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001654 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1655 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1656 break;
1657 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1658 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1659 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001660 }
1661 return ret;
1662}
1663
Dominik Dingelf2061652014-04-09 13:13:00 +02001664static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1665{
1666 int ret;
1667
1668 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001669 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001670 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001671 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001672 case KVM_S390_VM_TOD:
1673 ret = kvm_s390_set_tod(kvm, attr);
1674 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001675 case KVM_S390_VM_CPU_MODEL:
1676 ret = kvm_s390_set_cpu_model(kvm, attr);
1677 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001678 case KVM_S390_VM_CRYPTO:
1679 ret = kvm_s390_vm_set_crypto(kvm, attr);
1680 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001681 case KVM_S390_VM_MIGRATION:
1682 ret = kvm_s390_vm_set_migration(kvm, attr);
1683 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001684 default:
1685 ret = -ENXIO;
1686 break;
1687 }
1688
1689 return ret;
1690}
1691
1692static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1693{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001694 int ret;
1695
1696 switch (attr->group) {
1697 case KVM_S390_VM_MEM_CTRL:
1698 ret = kvm_s390_get_mem_control(kvm, attr);
1699 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001700 case KVM_S390_VM_TOD:
1701 ret = kvm_s390_get_tod(kvm, attr);
1702 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001703 case KVM_S390_VM_CPU_MODEL:
1704 ret = kvm_s390_get_cpu_model(kvm, attr);
1705 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001706 case KVM_S390_VM_MIGRATION:
1707 ret = kvm_s390_vm_get_migration(kvm, attr);
1708 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001709 default:
1710 ret = -ENXIO;
1711 break;
1712 }
1713
1714 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001715}
1716
1717static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1718{
1719 int ret;
1720
1721 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001722 case KVM_S390_VM_MEM_CTRL:
1723 switch (attr->attr) {
1724 case KVM_S390_VM_MEM_ENABLE_CMMA:
1725 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001726 ret = sclp.has_cmma ? 0 : -ENXIO;
1727 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001728 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001729 ret = 0;
1730 break;
1731 default:
1732 ret = -ENXIO;
1733 break;
1734 }
1735 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001736 case KVM_S390_VM_TOD:
1737 switch (attr->attr) {
1738 case KVM_S390_VM_TOD_LOW:
1739 case KVM_S390_VM_TOD_HIGH:
1740 ret = 0;
1741 break;
1742 default:
1743 ret = -ENXIO;
1744 break;
1745 }
1746 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001747 case KVM_S390_VM_CPU_MODEL:
1748 switch (attr->attr) {
1749 case KVM_S390_VM_CPU_PROCESSOR:
1750 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001751 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1752 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001753 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001754 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001755 ret = 0;
1756 break;
1757 default:
1758 ret = -ENXIO;
1759 break;
1760 }
1761 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001762 case KVM_S390_VM_CRYPTO:
1763 switch (attr->attr) {
1764 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1765 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1766 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1767 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1768 ret = 0;
1769 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001770 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1771 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1772 ret = ap_instructions_available() ? 0 : -ENXIO;
1773 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001774 default:
1775 ret = -ENXIO;
1776 break;
1777 }
1778 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001779 case KVM_S390_VM_MIGRATION:
1780 ret = 0;
1781 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001782 default:
1783 ret = -ENXIO;
1784 break;
1785 }
1786
1787 return ret;
1788}
1789
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001790static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1791{
1792 uint8_t *keys;
1793 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001794 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001795
1796 if (args->flags != 0)
1797 return -EINVAL;
1798
1799 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001800 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001801 return KVM_S390_GET_SKEYS_NONE;
1802
1803 /* Enforce sane limit on memory allocation */
1804 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1805 return -EINVAL;
1806
Michal Hocko752ade62017-05-08 15:57:27 -07001807 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001808 if (!keys)
1809 return -ENOMEM;
1810
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001811 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001812 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001813 for (i = 0; i < args->count; i++) {
1814 hva = gfn_to_hva(kvm, args->start_gfn + i);
1815 if (kvm_is_error_hva(hva)) {
1816 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001817 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001818 }
1819
David Hildenbrand154c8c12016-05-09 11:22:34 +02001820 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1821 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001822 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001823 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001824 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001825 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001826
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001827 if (!r) {
1828 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1829 sizeof(uint8_t) * args->count);
1830 if (r)
1831 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001832 }
1833
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001834 kvfree(keys);
1835 return r;
1836}
1837
1838static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1839{
1840 uint8_t *keys;
1841 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001842 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001843 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001844
1845 if (args->flags != 0)
1846 return -EINVAL;
1847
1848 /* Enforce sane limit on memory allocation */
1849 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1850 return -EINVAL;
1851
Michal Hocko752ade62017-05-08 15:57:27 -07001852 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001853 if (!keys)
1854 return -ENOMEM;
1855
1856 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1857 sizeof(uint8_t) * args->count);
1858 if (r) {
1859 r = -EFAULT;
1860 goto out;
1861 }
1862
1863 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001864 r = s390_enable_skey();
1865 if (r)
1866 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001867
Janosch Frankbd096f62018-07-18 13:40:22 +01001868 i = 0;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001869 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001870 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001871 while (i < args->count) {
1872 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001873 hva = gfn_to_hva(kvm, args->start_gfn + i);
1874 if (kvm_is_error_hva(hva)) {
1875 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001876 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001877 }
1878
1879 /* Lowest order bit is reserved */
1880 if (keys[i] & 0x01) {
1881 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001882 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001883 }
1884
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001885 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001886 if (r) {
1887 r = fixup_user_fault(current, current->mm, hva,
1888 FAULT_FLAG_WRITE, &unlocked);
1889 if (r)
1890 break;
1891 }
1892 if (!r)
1893 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001894 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001895 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001896 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001897out:
1898 kvfree(keys);
1899 return r;
1900}
1901
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001902/*
1903 * Base address and length must be sent at the start of each block, therefore
1904 * it's cheaper to send some clean data, as long as it's less than the size of
1905 * two longs.
1906 */
1907#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1908/* for consistency */
1909#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1910
1911/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001912 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1913 * address falls in a hole. In that case the index of one of the memslots
1914 * bordering the hole is returned.
1915 */
1916static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1917{
1918 int start = 0, end = slots->used_slots;
1919 int slot = atomic_read(&slots->lru_slot);
1920 struct kvm_memory_slot *memslots = slots->memslots;
1921
1922 if (gfn >= memslots[slot].base_gfn &&
1923 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1924 return slot;
1925
1926 while (start < end) {
1927 slot = start + (end - start) / 2;
1928
1929 if (gfn >= memslots[slot].base_gfn)
1930 end = slot;
1931 else
1932 start = slot + 1;
1933 }
1934
1935 if (gfn >= memslots[start].base_gfn &&
1936 gfn < memslots[start].base_gfn + memslots[start].npages) {
1937 atomic_set(&slots->lru_slot, start);
1938 }
1939
1940 return start;
1941}
1942
1943static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1944 u8 *res, unsigned long bufsize)
1945{
1946 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1947
1948 args->count = 0;
1949 while (args->count < bufsize) {
1950 hva = gfn_to_hva(kvm, cur_gfn);
1951 /*
1952 * We return an error if the first value was invalid, but we
1953 * return successfully if at least one value was copied.
1954 */
1955 if (kvm_is_error_hva(hva))
1956 return args->count ? 0 : -EFAULT;
1957 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1958 pgstev = 0;
1959 res[args->count++] = (pgstev >> 24) & 0x43;
1960 cur_gfn++;
1961 }
1962
1963 return 0;
1964}
1965
1966static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1967 unsigned long cur_gfn)
1968{
1969 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1970 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1971 unsigned long ofs = cur_gfn - ms->base_gfn;
1972
1973 if (ms->base_gfn + ms->npages <= cur_gfn) {
1974 slotidx--;
1975 /* If we are above the highest slot, wrap around */
1976 if (slotidx < 0)
1977 slotidx = slots->used_slots - 1;
1978
1979 ms = slots->memslots + slotidx;
1980 ofs = 0;
1981 }
1982 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1983 while ((slotidx > 0) && (ofs >= ms->npages)) {
1984 slotidx--;
1985 ms = slots->memslots + slotidx;
1986 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1987 }
1988 return ms->base_gfn + ofs;
1989}
1990
1991static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1992 u8 *res, unsigned long bufsize)
1993{
1994 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
1995 struct kvm_memslots *slots = kvm_memslots(kvm);
1996 struct kvm_memory_slot *ms;
1997
1998 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
1999 ms = gfn_to_memslot(kvm, cur_gfn);
2000 args->count = 0;
2001 args->start_gfn = cur_gfn;
2002 if (!ms)
2003 return 0;
2004 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2005 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2006
2007 while (args->count < bufsize) {
2008 hva = gfn_to_hva(kvm, cur_gfn);
2009 if (kvm_is_error_hva(hva))
2010 return 0;
2011 /* Decrement only if we actually flipped the bit to 0 */
2012 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2013 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2014 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2015 pgstev = 0;
2016 /* Save the value */
2017 res[args->count++] = (pgstev >> 24) & 0x43;
2018 /* If the next bit is too far away, stop. */
2019 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2020 return 0;
2021 /* If we reached the previous "next", find the next one */
2022 if (cur_gfn == next_gfn)
2023 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2024 /* Reached the end of memory or of the buffer, stop */
2025 if ((next_gfn >= mem_end) ||
2026 (next_gfn - args->start_gfn >= bufsize))
2027 return 0;
2028 cur_gfn++;
2029 /* Reached the end of the current memslot, take the next one. */
2030 if (cur_gfn - ms->base_gfn >= ms->npages) {
2031 ms = gfn_to_memslot(kvm, cur_gfn);
2032 if (!ms)
2033 return 0;
2034 }
2035 }
2036 return 0;
2037}
2038
2039/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002040 * This function searches for the next page with dirty CMMA attributes, and
2041 * saves the attributes in the buffer up to either the end of the buffer or
2042 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2043 * no trailing clean bytes are saved.
2044 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2045 * output buffer will indicate 0 as length.
2046 */
2047static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2048 struct kvm_s390_cmma_log *args)
2049{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002050 unsigned long bufsize;
2051 int srcu_idx, peek, ret;
2052 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002053
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002054 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002055 return -ENXIO;
2056 /* Invalid/unsupported flags were specified */
2057 if (args->flags & ~KVM_S390_CMMA_PEEK)
2058 return -EINVAL;
2059 /* Migration mode query, and we are not doing a migration */
2060 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002061 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002062 return -EINVAL;
2063 /* CMMA is disabled or was not used, or the buffer has length zero */
2064 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002065 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002066 memset(args, 0, sizeof(*args));
2067 return 0;
2068 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002069 /* We are not peeking, and there are no dirty pages */
2070 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2071 memset(args, 0, sizeof(*args));
2072 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002073 }
2074
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002075 values = vmalloc(bufsize);
2076 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002077 return -ENOMEM;
2078
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002079 down_read(&kvm->mm->mmap_sem);
2080 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002081 if (peek)
2082 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2083 else
2084 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002085 srcu_read_unlock(&kvm->srcu, srcu_idx);
2086 up_read(&kvm->mm->mmap_sem);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002087
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002088 if (kvm->arch.migration_mode)
2089 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2090 else
2091 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002092
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002093 if (copy_to_user((void __user *)args->values, values, args->count))
2094 ret = -EFAULT;
2095
2096 vfree(values);
2097 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002098}
2099
2100/*
2101 * This function sets the CMMA attributes for the given pages. If the input
2102 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002103 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002104 */
2105static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2106 const struct kvm_s390_cmma_log *args)
2107{
2108 unsigned long hva, mask, pgstev, i;
2109 uint8_t *bits;
2110 int srcu_idx, r = 0;
2111
2112 mask = args->mask;
2113
2114 if (!kvm->arch.use_cmma)
2115 return -ENXIO;
2116 /* invalid/unsupported flags */
2117 if (args->flags != 0)
2118 return -EINVAL;
2119 /* Enforce sane limit on memory allocation */
2120 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2121 return -EINVAL;
2122 /* Nothing to do */
2123 if (args->count == 0)
2124 return 0;
2125
Kees Cook42bc47b2018-06-12 14:27:11 -07002126 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002127 if (!bits)
2128 return -ENOMEM;
2129
2130 r = copy_from_user(bits, (void __user *)args->values, args->count);
2131 if (r) {
2132 r = -EFAULT;
2133 goto out;
2134 }
2135
2136 down_read(&kvm->mm->mmap_sem);
2137 srcu_idx = srcu_read_lock(&kvm->srcu);
2138 for (i = 0; i < args->count; i++) {
2139 hva = gfn_to_hva(kvm, args->start_gfn + i);
2140 if (kvm_is_error_hva(hva)) {
2141 r = -EFAULT;
2142 break;
2143 }
2144
2145 pgstev = bits[i];
2146 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002147 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002148 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2149 }
2150 srcu_read_unlock(&kvm->srcu, srcu_idx);
2151 up_read(&kvm->mm->mmap_sem);
2152
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002153 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002154 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002155 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002156 up_write(&kvm->mm->mmap_sem);
2157 }
2158out:
2159 vfree(bits);
2160 return r;
2161}
2162
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002163long kvm_arch_vm_ioctl(struct file *filp,
2164 unsigned int ioctl, unsigned long arg)
2165{
2166 struct kvm *kvm = filp->private_data;
2167 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002168 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002169 int r;
2170
2171 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002172 case KVM_S390_INTERRUPT: {
2173 struct kvm_s390_interrupt s390int;
2174
2175 r = -EFAULT;
2176 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2177 break;
2178 r = kvm_s390_inject_vm(kvm, &s390int);
2179 break;
2180 }
Cornelia Huck84223592013-07-15 13:36:01 +02002181 case KVM_CREATE_IRQCHIP: {
2182 struct kvm_irq_routing_entry routing;
2183
2184 r = -EINVAL;
2185 if (kvm->arch.use_irqchip) {
2186 /* Set up dummy routing. */
2187 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002188 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002189 }
2190 break;
2191 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002192 case KVM_SET_DEVICE_ATTR: {
2193 r = -EFAULT;
2194 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2195 break;
2196 r = kvm_s390_vm_set_attr(kvm, &attr);
2197 break;
2198 }
2199 case KVM_GET_DEVICE_ATTR: {
2200 r = -EFAULT;
2201 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2202 break;
2203 r = kvm_s390_vm_get_attr(kvm, &attr);
2204 break;
2205 }
2206 case KVM_HAS_DEVICE_ATTR: {
2207 r = -EFAULT;
2208 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2209 break;
2210 r = kvm_s390_vm_has_attr(kvm, &attr);
2211 break;
2212 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002213 case KVM_S390_GET_SKEYS: {
2214 struct kvm_s390_skeys args;
2215
2216 r = -EFAULT;
2217 if (copy_from_user(&args, argp,
2218 sizeof(struct kvm_s390_skeys)))
2219 break;
2220 r = kvm_s390_get_skeys(kvm, &args);
2221 break;
2222 }
2223 case KVM_S390_SET_SKEYS: {
2224 struct kvm_s390_skeys args;
2225
2226 r = -EFAULT;
2227 if (copy_from_user(&args, argp,
2228 sizeof(struct kvm_s390_skeys)))
2229 break;
2230 r = kvm_s390_set_skeys(kvm, &args);
2231 break;
2232 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002233 case KVM_S390_GET_CMMA_BITS: {
2234 struct kvm_s390_cmma_log args;
2235
2236 r = -EFAULT;
2237 if (copy_from_user(&args, argp, sizeof(args)))
2238 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002239 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002240 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002241 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002242 if (!r) {
2243 r = copy_to_user(argp, &args, sizeof(args));
2244 if (r)
2245 r = -EFAULT;
2246 }
2247 break;
2248 }
2249 case KVM_S390_SET_CMMA_BITS: {
2250 struct kvm_s390_cmma_log args;
2251
2252 r = -EFAULT;
2253 if (copy_from_user(&args, argp, sizeof(args)))
2254 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002255 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002256 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002257 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002258 break;
2259 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002260 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002261 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002262 }
2263
2264 return r;
2265}
2266
Tony Krowiak45c9b472015-01-13 11:33:26 -05002267static int kvm_s390_apxa_installed(void)
2268{
Tony Krowiake585b242018-09-25 19:16:18 -04002269 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002270
Tony Krowiake585b242018-09-25 19:16:18 -04002271 if (ap_instructions_available()) {
2272 if (ap_qci(&info) == 0)
2273 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002274 }
2275
2276 return 0;
2277}
2278
Tony Krowiake585b242018-09-25 19:16:18 -04002279/*
2280 * The format of the crypto control block (CRYCB) is specified in the 3 low
2281 * order bits of the CRYCB designation (CRYCBD) field as follows:
2282 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2283 * AP extended addressing (APXA) facility are installed.
2284 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2285 * Format 2: Both the APXA and MSAX3 facilities are installed
2286 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002287static void kvm_s390_set_crycb_format(struct kvm *kvm)
2288{
2289 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2290
Tony Krowiake585b242018-09-25 19:16:18 -04002291 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2292 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2293
2294 /* Check whether MSAX3 is installed */
2295 if (!test_kvm_facility(kvm, 76))
2296 return;
2297
Tony Krowiak45c9b472015-01-13 11:33:26 -05002298 if (kvm_s390_apxa_installed())
2299 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2300 else
2301 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2302}
2303
Pierre Morel0e237e42018-10-05 10:31:09 +02002304void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2305 unsigned long *aqm, unsigned long *adm)
2306{
2307 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2308
2309 mutex_lock(&kvm->lock);
2310 kvm_s390_vcpu_block_all(kvm);
2311
2312 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2313 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2314 memcpy(crycb->apcb1.apm, apm, 32);
2315 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2316 apm[0], apm[1], apm[2], apm[3]);
2317 memcpy(crycb->apcb1.aqm, aqm, 32);
2318 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2319 aqm[0], aqm[1], aqm[2], aqm[3]);
2320 memcpy(crycb->apcb1.adm, adm, 32);
2321 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2322 adm[0], adm[1], adm[2], adm[3]);
2323 break;
2324 case CRYCB_FORMAT1:
2325 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2326 memcpy(crycb->apcb0.apm, apm, 8);
2327 memcpy(crycb->apcb0.aqm, aqm, 2);
2328 memcpy(crycb->apcb0.adm, adm, 2);
2329 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2330 apm[0], *((unsigned short *)aqm),
2331 *((unsigned short *)adm));
2332 break;
2333 default: /* Can not happen */
2334 break;
2335 }
2336
2337 /* recreate the shadow crycb for each vcpu */
2338 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2339 kvm_s390_vcpu_unblock_all(kvm);
2340 mutex_unlock(&kvm->lock);
2341}
2342EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2343
Tony Krowiak421045982018-09-25 19:16:25 -04002344void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2345{
2346 mutex_lock(&kvm->lock);
2347 kvm_s390_vcpu_block_all(kvm);
2348
2349 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2350 sizeof(kvm->arch.crypto.crycb->apcb0));
2351 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2352 sizeof(kvm->arch.crypto.crycb->apcb1));
2353
Pierre Morel0e237e42018-10-05 10:31:09 +02002354 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002355 /* recreate the shadow crycb for each vcpu */
2356 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002357 kvm_s390_vcpu_unblock_all(kvm);
2358 mutex_unlock(&kvm->lock);
2359}
2360EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2361
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002362static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002363{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002364 struct cpuid cpuid;
2365
2366 get_cpu_id(&cpuid);
2367 cpuid.version = 0xff;
2368 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002369}
2370
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002371static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002372{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002373 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002374 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002375
Tony Krowiake585b242018-09-25 19:16:18 -04002376 if (!test_kvm_facility(kvm, 76))
2377 return;
2378
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002379 /* Enable AES/DEA protected key functions by default */
2380 kvm->arch.crypto.aes_kw = 1;
2381 kvm->arch.crypto.dea_kw = 1;
2382 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2383 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2384 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2385 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002386}
2387
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002388static void sca_dispose(struct kvm *kvm)
2389{
2390 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002391 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002392 else
2393 free_page((unsigned long)(kvm->arch.sca));
2394 kvm->arch.sca = NULL;
2395}
2396
Carsten Ottee08b9632012-01-04 10:25:20 +01002397int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002398{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002399 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002400 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002401 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002402 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002403
Carsten Ottee08b9632012-01-04 10:25:20 +01002404 rc = -EINVAL;
2405#ifdef CONFIG_KVM_S390_UCONTROL
2406 if (type & ~KVM_VM_S390_UCONTROL)
2407 goto out_err;
2408 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2409 goto out_err;
2410#else
2411 if (type)
2412 goto out_err;
2413#endif
2414
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002415 rc = s390_enable_sie();
2416 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002417 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002418
Carsten Otteb2904112011-10-18 12:27:13 +02002419 rc = -ENOMEM;
2420
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002421 if (!sclp.has_64bscao)
2422 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002423 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002424 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002425 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002426 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002427 goto out_err;
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002428 mutex_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002429 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002430 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002431 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002432 kvm->arch.sca = (struct bsca_block *)
2433 ((char *) kvm->arch.sca + sca_offset);
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002434 mutex_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002435
2436 sprintf(debug_name, "kvm-%u", current->pid);
2437
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002438 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002439 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002440 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002441
Michael Mueller19114be2017-05-30 14:26:02 +02002442 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002443 kvm->arch.sie_page2 =
2444 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2445 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002446 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002447
Michael Mueller25c84db2019-01-31 09:52:41 +01002448 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002449 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002450
2451 for (i = 0; i < kvm_s390_fac_size(); i++) {
2452 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2453 (kvm_s390_fac_base[i] |
2454 kvm_s390_fac_ext[i]);
2455 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2456 kvm_s390_fac_base[i];
2457 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002458 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002459
David Hildenbrand19352222017-08-29 16:31:08 +02002460 /* we are always in czam mode - even on pre z14 machines */
2461 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2462 set_kvm_facility(kvm->arch.model.fac_list, 138);
2463 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002464 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2465 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002466 if (MACHINE_HAS_TLB_GUEST) {
2467 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2468 set_kvm_facility(kvm->arch.model.fac_list, 147);
2469 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002470
Pierre Morel05f31e32019-05-21 17:34:37 +02002471 if (css_general_characteristics.aiv && test_facility(65))
2472 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2473
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002474 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002475 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002476
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002477 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002478
Fei Li51978392017-02-17 17:06:26 +08002479 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002480 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002481 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2482 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002483 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002484 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002485
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002486 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002487 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002488
Carsten Ottee08b9632012-01-04 10:25:20 +01002489 if (type & KVM_VM_S390_UCONTROL) {
2490 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002491 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002492 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002493 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002494 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002495 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002496 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002497 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002498 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002499 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002500 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002501 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002502 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002503 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002504
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002505 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002506 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002507 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002508 kvm_s390_vsie_init(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002509 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002510 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002511
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002512 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002513out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002514 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002515 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002516 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002517 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002518 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002519}
2520
Luiz Capitulino235539b2016-09-07 14:47:23 -04002521bool kvm_arch_has_vcpu_debugfs(void)
2522{
2523 return false;
2524}
2525
2526int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2527{
2528 return 0;
2529}
2530
Christian Borntraegerd329c032008-11-26 14:50:27 +01002531void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2532{
2533 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002534 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002535 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002536 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002537 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002538 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002539
2540 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002541 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002542
Dominik Dingele6db1d62015-05-07 15:41:57 +02002543 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002544 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002545 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002546
Christian Borntraeger6692cef2008-11-26 14:51:08 +01002547 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02002548 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002549}
2550
2551static void kvm_free_vcpus(struct kvm *kvm)
2552{
2553 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002554 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002555
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002556 kvm_for_each_vcpu(i, vcpu, kvm)
2557 kvm_arch_vcpu_destroy(vcpu);
2558
2559 mutex_lock(&kvm->lock);
2560 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2561 kvm->vcpus[i] = NULL;
2562
2563 atomic_set(&kvm->online_vcpus, 0);
2564 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002565}
2566
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002567void kvm_arch_destroy_vm(struct kvm *kvm)
2568{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002569 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002570 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002571 debug_unregister(kvm->arch.dbf);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002572 kvm_s390_gisa_destroy(kvm);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002573 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002574 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002575 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002576 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002577 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002578 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002579 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002580}
2581
2582/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002583static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2584{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002585 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002586 if (!vcpu->arch.gmap)
2587 return -ENOMEM;
2588 vcpu->arch.gmap->private = vcpu->kvm;
2589
2590 return 0;
2591}
2592
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002593static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2594{
David Hildenbranda6940672016-08-08 22:39:32 +02002595 if (!kvm_s390_use_sca_entries())
2596 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002597 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002598 if (vcpu->kvm->arch.use_esca) {
2599 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002600
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002601 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002602 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002603 } else {
2604 struct bsca_block *sca = vcpu->kvm->arch.sca;
2605
2606 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002607 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002608 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002609 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002610}
2611
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002612static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002613{
David Hildenbranda6940672016-08-08 22:39:32 +02002614 if (!kvm_s390_use_sca_entries()) {
2615 struct bsca_block *sca = vcpu->kvm->arch.sca;
2616
2617 /* we still need the basic sca for the ipte control */
2618 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2619 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002620 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002621 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002622 read_lock(&vcpu->kvm->arch.sca_lock);
2623 if (vcpu->kvm->arch.use_esca) {
2624 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002625
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002626 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002627 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2628 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002629 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002630 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002631 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002632 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002633
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002634 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002635 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2636 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002637 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002638 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002639 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002640}
2641
2642/* Basic SCA to Extended SCA data copy routines */
2643static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2644{
2645 d->sda = s->sda;
2646 d->sigp_ctrl.c = s->sigp_ctrl.c;
2647 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2648}
2649
2650static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2651{
2652 int i;
2653
2654 d->ipte_control = s->ipte_control;
2655 d->mcn[0] = s->mcn;
2656 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2657 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2658}
2659
2660static int sca_switch_to_extended(struct kvm *kvm)
2661{
2662 struct bsca_block *old_sca = kvm->arch.sca;
2663 struct esca_block *new_sca;
2664 struct kvm_vcpu *vcpu;
2665 unsigned int vcpu_idx;
2666 u32 scaol, scaoh;
2667
2668 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2669 if (!new_sca)
2670 return -ENOMEM;
2671
2672 scaoh = (u32)((u64)(new_sca) >> 32);
2673 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2674
2675 kvm_s390_vcpu_block_all(kvm);
2676 write_lock(&kvm->arch.sca_lock);
2677
2678 sca_copy_b_to_e(new_sca, old_sca);
2679
2680 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2681 vcpu->arch.sie_block->scaoh = scaoh;
2682 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002683 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002684 }
2685 kvm->arch.sca = new_sca;
2686 kvm->arch.use_esca = 1;
2687
2688 write_unlock(&kvm->arch.sca_lock);
2689 kvm_s390_vcpu_unblock_all(kvm);
2690
2691 free_page((unsigned long)old_sca);
2692
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002693 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2694 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002695 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002696}
2697
2698static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2699{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002700 int rc;
2701
David Hildenbranda6940672016-08-08 22:39:32 +02002702 if (!kvm_s390_use_sca_entries()) {
2703 if (id < KVM_MAX_VCPUS)
2704 return true;
2705 return false;
2706 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002707 if (id < KVM_S390_BSCA_CPU_SLOTS)
2708 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002709 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002710 return false;
2711
2712 mutex_lock(&kvm->lock);
2713 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2714 mutex_unlock(&kvm->lock);
2715
2716 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002717}
2718
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002719int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2720{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002721 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2722 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002723 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2724 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002725 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002726 KVM_SYNC_CRS |
2727 KVM_SYNC_ARCH0 |
2728 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002729 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002730 if (test_kvm_facility(vcpu->kvm, 64))
2731 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002732 if (test_kvm_facility(vcpu->kvm, 82))
2733 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002734 if (test_kvm_facility(vcpu->kvm, 133))
2735 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002736 if (test_kvm_facility(vcpu->kvm, 156))
2737 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002738 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2739 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2740 */
2741 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002742 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002743 else
2744 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002745
2746 if (kvm_is_ucontrol(vcpu->kvm))
2747 return __kvm_ucontrol_vcpu_init(vcpu);
2748
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002749 return 0;
2750}
2751
David Hildenbranddb0758b2016-02-15 09:42:25 +01002752/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2753static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2754{
2755 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002756 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002757 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002758 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002759}
2760
2761/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2762static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2763{
2764 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002765 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002766 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2767 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002768 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002769}
2770
2771/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2772static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2773{
2774 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2775 vcpu->arch.cputm_enabled = true;
2776 __start_cpu_timer_accounting(vcpu);
2777}
2778
2779/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2780static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2781{
2782 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2783 __stop_cpu_timer_accounting(vcpu);
2784 vcpu->arch.cputm_enabled = false;
2785}
2786
2787static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2788{
2789 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2790 __enable_cpu_timer_accounting(vcpu);
2791 preempt_enable();
2792}
2793
2794static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2795{
2796 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2797 __disable_cpu_timer_accounting(vcpu);
2798 preempt_enable();
2799}
2800
David Hildenbrand4287f242016-02-15 09:40:12 +01002801/* set the cpu timer - may only be called from the VCPU thread itself */
2802void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2803{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002804 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002805 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002806 if (vcpu->arch.cputm_enabled)
2807 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002808 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002809 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002810 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002811}
2812
David Hildenbranddb0758b2016-02-15 09:42:25 +01002813/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002814__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2815{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002816 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002817 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002818
2819 if (unlikely(!vcpu->arch.cputm_enabled))
2820 return vcpu->arch.sie_block->cputm;
2821
David Hildenbrand9c23a132016-02-17 21:53:33 +01002822 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2823 do {
2824 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2825 /*
2826 * If the writer would ever execute a read in the critical
2827 * section, e.g. in irq context, we have a deadlock.
2828 */
2829 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2830 value = vcpu->arch.sie_block->cputm;
2831 /* if cputm_start is 0, accounting is being started/stopped */
2832 if (likely(vcpu->arch.cputm_start))
2833 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2834 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2835 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002836 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002837}
2838
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002839void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2840{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002841
David Hildenbrand37d9df92015-03-11 16:47:33 +01002842 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002843 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002844 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002845 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002846 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002847}
2848
2849void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2850{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002851 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002852 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002853 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01002854 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002855 vcpu->arch.enabled_gmap = gmap_get_enabled();
2856 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002857
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002858}
2859
2860static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2861{
2862 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2863 vcpu->arch.sie_block->gpsw.mask = 0UL;
2864 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002865 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002866 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002867 vcpu->arch.sie_block->ckc = 0UL;
2868 vcpu->arch.sie_block->todpr = 0;
2869 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
David Hildenbrandb9224cd2018-04-30 17:55:24 +02002870 vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 |
2871 CR0_INTERRUPT_KEY_SUBMASK |
2872 CR0_MEASUREMENT_ALERT_SUBMASK;
2873 vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
2874 CR14_UNUSED_33 |
2875 CR14_EXTERNAL_DAMAGE_SUBMASK;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002876 /* make sure the new fpc will be lazily loaded */
2877 save_fpu_regs();
2878 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002879 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002880 vcpu->arch.sie_block->pp = 0;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002881 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002882 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2883 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002884 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2885 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002886 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002887}
2888
Dominik Dingel31928aa2014-12-04 15:47:07 +01002889void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002890{
Jason J. Herne72f25022014-11-25 09:46:02 -05002891 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002892 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002893 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01002894 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02002895 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002896 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002897 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002898 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002899 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002900 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002901 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2902 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002903 /* make vcpu_load load the right gmap on the first trigger */
2904 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002905}
2906
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002907static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
2908{
2909 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
2910 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
2911 return true;
2912 return false;
2913}
2914
2915static bool kvm_has_pckmo_ecc(struct kvm *kvm)
2916{
2917 /* At least one ECC subfunction must be present */
2918 return kvm_has_pckmo_subfunc(kvm, 32) ||
2919 kvm_has_pckmo_subfunc(kvm, 33) ||
2920 kvm_has_pckmo_subfunc(kvm, 34) ||
2921 kvm_has_pckmo_subfunc(kvm, 40) ||
2922 kvm_has_pckmo_subfunc(kvm, 41);
2923
2924}
2925
Tony Krowiak5102ee82014-06-27 14:46:01 -04002926static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2927{
Tony Krowiake585b242018-09-25 19:16:18 -04002928 /*
2929 * If the AP instructions are not being interpreted and the MSAX3
2930 * facility is not configured for the guest, there is nothing to set up.
2931 */
2932 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002933 return;
2934
Tony Krowiake585b242018-09-25 19:16:18 -04002935 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02002936 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04002937 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002938 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02002939
Tony Krowiake585b242018-09-25 19:16:18 -04002940 if (vcpu->kvm->arch.crypto.apie)
2941 vcpu->arch.sie_block->eca |= ECA_APIE;
2942
2943 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002944 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02002945 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002946 /* ecc is also wrapped with AES key */
2947 if (kvm_has_pckmo_ecc(vcpu->kvm))
2948 vcpu->arch.sie_block->ecd |= ECD_ECC;
2949 }
2950
Tony Krowiaka374e892014-09-03 10:13:53 +02002951 if (vcpu->kvm->arch.crypto.dea_kw)
2952 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04002953}
2954
Dominik Dingelb31605c2014-03-25 13:47:11 +01002955void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2956{
2957 free_page(vcpu->arch.sie_block->cbrlo);
2958 vcpu->arch.sie_block->cbrlo = 0;
2959}
2960
2961int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2962{
2963 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2964 if (!vcpu->arch.sie_block->cbrlo)
2965 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002966 return 0;
2967}
2968
Michael Mueller91520f12015-02-27 14:32:11 +01002969static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2970{
2971 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2972
Michael Mueller91520f12015-02-27 14:32:11 +01002973 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002974 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002975 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002976}
2977
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002978int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2979{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002980 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002981
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002982 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2983 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002984 CPUSTAT_STOPPED);
2985
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002986 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002987 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002988 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002989 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002990
Michael Mueller91520f12015-02-27 14:32:11 +01002991 kvm_s390_vcpu_setup_model(vcpu);
2992
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002993 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2994 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002995 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002996 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002997 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002998 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002999 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003000
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003001 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003002 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02003003 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003004 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3005 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02003006 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003007 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02003008 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003009 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003010 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003011 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003012 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003013 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01003014 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003015 vcpu->arch.sie_block->eca |= ECA_VX;
3016 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04003017 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003018 if (test_kvm_facility(vcpu->kvm, 139))
3019 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003020 if (test_kvm_facility(vcpu->kvm, 156))
3021 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02003022 if (vcpu->arch.sie_block->gd) {
3023 vcpu->arch.sie_block->eca |= ECA_AIV;
3024 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3025 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3026 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003027 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3028 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08003029 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05003030
3031 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003032 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05003033 else
3034 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05003035
Dominik Dingele6db1d62015-05-07 15:41:57 +02003036 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01003037 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3038 if (rc)
3039 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003040 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01003041 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02003042 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01003043
Collin Walling67d49d52018-08-31 12:51:19 -04003044 vcpu->arch.sie_block->hpid = HPID_KVM;
3045
Tony Krowiak5102ee82014-06-27 14:46:01 -04003046 kvm_s390_vcpu_crypto_setup(vcpu);
3047
Dominik Dingelb31605c2014-03-25 13:47:11 +01003048 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003049}
3050
3051struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
3052 unsigned int id)
3053{
Carsten Otte4d475552011-10-18 12:27:12 +02003054 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003055 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02003056 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003057
David Hildenbrand42158252015-10-12 12:57:22 +02003058 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02003059 goto out;
3060
3061 rc = -ENOMEM;
3062
Michael Muellerb110fea2013-06-12 13:54:54 +02003063 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003064 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02003065 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003066
QingFeng Haoda72ca42017-06-07 11:41:19 +02003067 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003068 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
3069 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003070 goto out_free_cpu;
3071
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003072 vcpu->arch.sie_block = &sie_page->sie_block;
3073 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3074
David Hildenbrandefed1102015-04-16 12:32:41 +02003075 /* the real guest size will always be smaller than msl */
3076 vcpu->arch.sie_block->mso = 0;
3077 vcpu->arch.sie_block->msl = sclp.hamax;
3078
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003079 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003080 spin_lock_init(&vcpu->arch.local_int.lock);
Michael Mueller982cff42019-01-31 09:52:38 +01003081 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003082 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3083 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003084 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003085
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003086 rc = kvm_vcpu_init(vcpu, kvm, id);
3087 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003088 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01003089 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003090 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02003091 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003092
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003093 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003094out_free_sie_block:
3095 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003096out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02003097 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02003098out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003099 return ERR_PTR(rc);
3100}
3101
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003102int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3103{
David Hildenbrand9a022062014-08-05 17:40:47 +02003104 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003105}
3106
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003107bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3108{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003109 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003110}
3111
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003112void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003113{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003114 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003115 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003116}
3117
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003118void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003119{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003120 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003121}
3122
Christian Borntraeger8e236542015-04-09 13:49:04 +02003123static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3124{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003125 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003126 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003127}
3128
David Hildenbrand9ea59722018-09-25 19:16:16 -04003129bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3130{
3131 return atomic_read(&vcpu->arch.sie_block->prog20) &
3132 (PROG_BLOCK_SIE | PROG_REQUEST);
3133}
3134
Christian Borntraeger8e236542015-04-09 13:49:04 +02003135static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3136{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003137 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003138}
3139
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003140/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003141 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003142 * If the CPU is not running (e.g. waiting as idle) the function will
3143 * return immediately. */
3144void exit_sie(struct kvm_vcpu *vcpu)
3145{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003146 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003147 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003148 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3149 cpu_relax();
3150}
3151
Christian Borntraeger8e236542015-04-09 13:49:04 +02003152/* Kick a guest cpu out of SIE to process a request synchronously */
3153void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003154{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003155 kvm_make_request(req, vcpu);
3156 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003157}
3158
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003159static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3160 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003161{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003162 struct kvm *kvm = gmap->private;
3163 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003164 unsigned long prefix;
3165 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003166
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003167 if (gmap_is_shadow(gmap))
3168 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003169 if (start >= 1UL << 31)
3170 /* We are only interested in prefix pages */
3171 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003172 kvm_for_each_vcpu(i, vcpu, kvm) {
3173 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003174 prefix = kvm_s390_get_prefix(vcpu);
3175 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3176 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3177 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003178 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003179 }
3180 }
3181}
3182
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003183bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3184{
3185 /* do not poll with more than halt_poll_max_steal percent of steal time */
3186 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3187 halt_poll_max_steal) {
3188 vcpu->stat.halt_no_poll_steal++;
3189 return true;
3190 }
3191 return false;
3192}
3193
Christoffer Dallb6d33832012-03-08 16:44:24 -05003194int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3195{
3196 /* kvm common code refers to this, but never calls it */
3197 BUG();
3198 return 0;
3199}
3200
Carsten Otte14eebd92012-05-15 14:15:26 +02003201static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3202 struct kvm_one_reg *reg)
3203{
3204 int r = -EINVAL;
3205
3206 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003207 case KVM_REG_S390_TODPR:
3208 r = put_user(vcpu->arch.sie_block->todpr,
3209 (u32 __user *)reg->addr);
3210 break;
3211 case KVM_REG_S390_EPOCHDIFF:
3212 r = put_user(vcpu->arch.sie_block->epoch,
3213 (u64 __user *)reg->addr);
3214 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003215 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003216 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003217 (u64 __user *)reg->addr);
3218 break;
3219 case KVM_REG_S390_CLOCK_COMP:
3220 r = put_user(vcpu->arch.sie_block->ckc,
3221 (u64 __user *)reg->addr);
3222 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003223 case KVM_REG_S390_PFTOKEN:
3224 r = put_user(vcpu->arch.pfault_token,
3225 (u64 __user *)reg->addr);
3226 break;
3227 case KVM_REG_S390_PFCOMPARE:
3228 r = put_user(vcpu->arch.pfault_compare,
3229 (u64 __user *)reg->addr);
3230 break;
3231 case KVM_REG_S390_PFSELECT:
3232 r = put_user(vcpu->arch.pfault_select,
3233 (u64 __user *)reg->addr);
3234 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003235 case KVM_REG_S390_PP:
3236 r = put_user(vcpu->arch.sie_block->pp,
3237 (u64 __user *)reg->addr);
3238 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003239 case KVM_REG_S390_GBEA:
3240 r = put_user(vcpu->arch.sie_block->gbea,
3241 (u64 __user *)reg->addr);
3242 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003243 default:
3244 break;
3245 }
3246
3247 return r;
3248}
3249
3250static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3251 struct kvm_one_reg *reg)
3252{
3253 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003254 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003255
3256 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003257 case KVM_REG_S390_TODPR:
3258 r = get_user(vcpu->arch.sie_block->todpr,
3259 (u32 __user *)reg->addr);
3260 break;
3261 case KVM_REG_S390_EPOCHDIFF:
3262 r = get_user(vcpu->arch.sie_block->epoch,
3263 (u64 __user *)reg->addr);
3264 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003265 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003266 r = get_user(val, (u64 __user *)reg->addr);
3267 if (!r)
3268 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003269 break;
3270 case KVM_REG_S390_CLOCK_COMP:
3271 r = get_user(vcpu->arch.sie_block->ckc,
3272 (u64 __user *)reg->addr);
3273 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003274 case KVM_REG_S390_PFTOKEN:
3275 r = get_user(vcpu->arch.pfault_token,
3276 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003277 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3278 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003279 break;
3280 case KVM_REG_S390_PFCOMPARE:
3281 r = get_user(vcpu->arch.pfault_compare,
3282 (u64 __user *)reg->addr);
3283 break;
3284 case KVM_REG_S390_PFSELECT:
3285 r = get_user(vcpu->arch.pfault_select,
3286 (u64 __user *)reg->addr);
3287 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003288 case KVM_REG_S390_PP:
3289 r = get_user(vcpu->arch.sie_block->pp,
3290 (u64 __user *)reg->addr);
3291 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003292 case KVM_REG_S390_GBEA:
3293 r = get_user(vcpu->arch.sie_block->gbea,
3294 (u64 __user *)reg->addr);
3295 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003296 default:
3297 break;
3298 }
3299
3300 return r;
3301}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003302
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003303static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3304{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003305 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003306 return 0;
3307}
3308
3309int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3310{
Christoffer Dall875656f2017-12-04 21:35:27 +01003311 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003312 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003313 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003314 return 0;
3315}
3316
3317int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3318{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003319 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003320 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003321 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003322 return 0;
3323}
3324
3325int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3326 struct kvm_sregs *sregs)
3327{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003328 vcpu_load(vcpu);
3329
Christian Borntraeger59674c12012-01-11 11:20:33 +01003330 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003331 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003332
3333 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003334 return 0;
3335}
3336
3337int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3338 struct kvm_sregs *sregs)
3339{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003340 vcpu_load(vcpu);
3341
Christian Borntraeger59674c12012-01-11 11:20:33 +01003342 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003343 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003344
3345 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003346 return 0;
3347}
3348
3349int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3350{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003351 int ret = 0;
3352
3353 vcpu_load(vcpu);
3354
3355 if (test_fp_ctl(fpu->fpc)) {
3356 ret = -EINVAL;
3357 goto out;
3358 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003359 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003360 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003361 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3362 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003363 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003364 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003365
3366out:
3367 vcpu_put(vcpu);
3368 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003369}
3370
3371int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3372{
Christoffer Dall13931232017-12-04 21:35:34 +01003373 vcpu_load(vcpu);
3374
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003375 /* make sure we have the latest values */
3376 save_fpu_regs();
3377 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003378 convert_vx_to_fp((freg_t *) fpu->fprs,
3379 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003380 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003381 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003382 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003383
3384 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003385 return 0;
3386}
3387
3388static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3389{
3390 int rc = 0;
3391
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003392 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003393 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003394 else {
3395 vcpu->run->psw_mask = psw.mask;
3396 vcpu->run->psw_addr = psw.addr;
3397 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003398 return rc;
3399}
3400
3401int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3402 struct kvm_translation *tr)
3403{
3404 return -EINVAL; /* not implemented yet */
3405}
3406
David Hildenbrand27291e22014-01-23 12:26:52 +01003407#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3408 KVM_GUESTDBG_USE_HW_BP | \
3409 KVM_GUESTDBG_ENABLE)
3410
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003411int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3412 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003413{
David Hildenbrand27291e22014-01-23 12:26:52 +01003414 int rc = 0;
3415
Christoffer Dall66b56562017-12-04 21:35:33 +01003416 vcpu_load(vcpu);
3417
David Hildenbrand27291e22014-01-23 12:26:52 +01003418 vcpu->guest_debug = 0;
3419 kvm_s390_clear_bp_data(vcpu);
3420
Christoffer Dall66b56562017-12-04 21:35:33 +01003421 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3422 rc = -EINVAL;
3423 goto out;
3424 }
3425 if (!sclp.has_gpere) {
3426 rc = -EINVAL;
3427 goto out;
3428 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003429
3430 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3431 vcpu->guest_debug = dbg->control;
3432 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003433 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003434
3435 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3436 rc = kvm_s390_import_bp_data(vcpu, dbg);
3437 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003438 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003439 vcpu->arch.guestdbg.last_bp = 0;
3440 }
3441
3442 if (rc) {
3443 vcpu->guest_debug = 0;
3444 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003445 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003446 }
3447
Christoffer Dall66b56562017-12-04 21:35:33 +01003448out:
3449 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003450 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003451}
3452
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003453int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3454 struct kvm_mp_state *mp_state)
3455{
Christoffer Dallfd232562017-12-04 21:35:30 +01003456 int ret;
3457
3458 vcpu_load(vcpu);
3459
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003460 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003461 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3462 KVM_MP_STATE_OPERATING;
3463
3464 vcpu_put(vcpu);
3465 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003466}
3467
3468int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3469 struct kvm_mp_state *mp_state)
3470{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003471 int rc = 0;
3472
Christoffer Dalle83dff52017-12-04 21:35:31 +01003473 vcpu_load(vcpu);
3474
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003475 /* user space knows about this interface - let it control the state */
3476 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3477
3478 switch (mp_state->mp_state) {
3479 case KVM_MP_STATE_STOPPED:
3480 kvm_s390_vcpu_stop(vcpu);
3481 break;
3482 case KVM_MP_STATE_OPERATING:
3483 kvm_s390_vcpu_start(vcpu);
3484 break;
3485 case KVM_MP_STATE_LOAD:
3486 case KVM_MP_STATE_CHECK_STOP:
3487 /* fall through - CHECK_STOP and LOAD are not supported yet */
3488 default:
3489 rc = -ENXIO;
3490 }
3491
Christoffer Dalle83dff52017-12-04 21:35:31 +01003492 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003493 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003494}
3495
David Hildenbrand8ad35752014-03-14 11:00:21 +01003496static bool ibs_enabled(struct kvm_vcpu *vcpu)
3497{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003498 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003499}
3500
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003501static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3502{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003503retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003504 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003505 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003506 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003507 /*
3508 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003509 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003510 * This ensures that the ipte instruction for this request has
3511 * already finished. We might race against a second unmapper that
3512 * wants to set the blocking bit. Lets just retry the request loop.
3513 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003514 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003515 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003516 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3517 kvm_s390_get_prefix(vcpu),
3518 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003519 if (rc) {
3520 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003521 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003522 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003523 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003524 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003525
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003526 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3527 vcpu->arch.sie_block->ihcpu = 0xffff;
3528 goto retry;
3529 }
3530
David Hildenbrand8ad35752014-03-14 11:00:21 +01003531 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3532 if (!ibs_enabled(vcpu)) {
3533 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003534 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003535 }
3536 goto retry;
3537 }
3538
3539 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3540 if (ibs_enabled(vcpu)) {
3541 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003542 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003543 }
3544 goto retry;
3545 }
3546
David Hildenbrand6502a342016-06-21 14:19:51 +02003547 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3548 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3549 goto retry;
3550 }
3551
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003552 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3553 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003554 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003555 * instruction manually, in order to provide additional
3556 * functionalities needed for live migration.
3557 */
3558 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3559 goto retry;
3560 }
3561
3562 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3563 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003564 * Re-enable CMM virtualization if CMMA is available and
3565 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003566 */
3567 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003568 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003569 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3570 goto retry;
3571 }
3572
David Hildenbrand0759d062014-05-13 16:54:32 +02003573 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003574 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003575 /* we left the vsie handler, nothing to do, just clear the request */
3576 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003577
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003578 return 0;
3579}
3580
David Hildenbrand0e7def52018-02-07 12:46:43 +01003581void kvm_s390_set_tod_clock(struct kvm *kvm,
3582 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003583{
3584 struct kvm_vcpu *vcpu;
3585 struct kvm_s390_tod_clock_ext htod;
3586 int i;
3587
3588 mutex_lock(&kvm->lock);
3589 preempt_disable();
3590
3591 get_tod_clock_ext((char *)&htod);
3592
3593 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003594 kvm->arch.epdx = 0;
3595 if (test_kvm_facility(kvm, 139)) {
3596 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3597 if (kvm->arch.epoch > gtod->tod)
3598 kvm->arch.epdx -= 1;
3599 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003600
3601 kvm_s390_vcpu_block_all(kvm);
3602 kvm_for_each_vcpu(i, vcpu, kvm) {
3603 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3604 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3605 }
3606
3607 kvm_s390_vcpu_unblock_all(kvm);
3608 preempt_enable();
3609 mutex_unlock(&kvm->lock);
3610}
3611
Thomas Huthfa576c52014-05-06 17:20:16 +02003612/**
3613 * kvm_arch_fault_in_page - fault-in guest page if necessary
3614 * @vcpu: The corresponding virtual cpu
3615 * @gpa: Guest physical address
3616 * @writable: Whether the page should be writable or not
3617 *
3618 * Make sure that a guest page has been faulted-in on the host.
3619 *
3620 * Return: Zero on success, negative error code otherwise.
3621 */
3622long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003623{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003624 return gmap_fault(vcpu->arch.gmap, gpa,
3625 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003626}
3627
Dominik Dingel3c038e62013-10-07 17:11:48 +02003628static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3629 unsigned long token)
3630{
3631 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003632 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003633
3634 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003635 irq.u.ext.ext_params2 = token;
3636 irq.type = KVM_S390_INT_PFAULT_INIT;
3637 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003638 } else {
3639 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003640 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003641 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3642 }
3643}
3644
3645void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3646 struct kvm_async_pf *work)
3647{
3648 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3649 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3650}
3651
3652void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3653 struct kvm_async_pf *work)
3654{
3655 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3656 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3657}
3658
3659void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3660 struct kvm_async_pf *work)
3661{
3662 /* s390 will always inject the page directly */
3663}
3664
3665bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3666{
3667 /*
3668 * s390 will always inject the page directly,
3669 * but we still want check_async_completion to cleanup
3670 */
3671 return true;
3672}
3673
3674static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3675{
3676 hva_t hva;
3677 struct kvm_arch_async_pf arch;
3678 int rc;
3679
3680 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3681 return 0;
3682 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3683 vcpu->arch.pfault_compare)
3684 return 0;
3685 if (psw_extint_disabled(vcpu))
3686 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003687 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003688 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003689 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003690 return 0;
3691 if (!vcpu->arch.gmap->pfault_enabled)
3692 return 0;
3693
Heiko Carstens81480cc2014-01-01 16:36:07 +01003694 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3695 hva += current->thread.gmap_addr & ~PAGE_MASK;
3696 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003697 return 0;
3698
3699 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3700 return rc;
3701}
3702
Thomas Huth3fb4c402013-09-12 10:33:43 +02003703static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003704{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003705 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003706
Dominik Dingel3c038e62013-10-07 17:11:48 +02003707 /*
3708 * On s390 notifications for arriving pages will be delivered directly
3709 * to the guest but the house keeping for completed pfaults is
3710 * handled outside the worker.
3711 */
3712 kvm_check_async_pf_completion(vcpu);
3713
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003714 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3715 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003716
3717 if (need_resched())
3718 schedule();
3719
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003720 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003721 s390_handle_mcck();
3722
Jens Freimann79395032014-04-17 10:10:30 +02003723 if (!kvm_is_ucontrol(vcpu->kvm)) {
3724 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3725 if (rc)
3726 return rc;
3727 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003728
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003729 rc = kvm_s390_handle_requests(vcpu);
3730 if (rc)
3731 return rc;
3732
David Hildenbrand27291e22014-01-23 12:26:52 +01003733 if (guestdbg_enabled(vcpu)) {
3734 kvm_s390_backup_guest_per_regs(vcpu);
3735 kvm_s390_patch_guest_per_regs(vcpu);
3736 }
3737
Michael Mueller9f30f622019-01-31 09:52:44 +01003738 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
3739
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003740 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003741 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3742 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3743 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003744
Thomas Huth3fb4c402013-09-12 10:33:43 +02003745 return 0;
3746}
3747
Thomas Huth492d8642015-02-10 16:11:01 +01003748static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3749{
David Hildenbrand56317922016-01-12 17:37:58 +01003750 struct kvm_s390_pgm_info pgm_info = {
3751 .code = PGM_ADDRESSING,
3752 };
3753 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003754 int rc;
3755
3756 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3757 trace_kvm_s390_sie_fault(vcpu);
3758
3759 /*
3760 * We want to inject an addressing exception, which is defined as a
3761 * suppressing or terminating exception. However, since we came here
3762 * by a DAT access exception, the PSW still points to the faulting
3763 * instruction since DAT exceptions are nullifying. So we've got
3764 * to look up the current opcode to get the length of the instruction
3765 * to be able to forward the PSW.
3766 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003767 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003768 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003769 if (rc < 0) {
3770 return rc;
3771 } else if (rc) {
3772 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3773 * Forward by arbitrary ilc, injection will take care of
3774 * nullification if necessary.
3775 */
3776 pgm_info = vcpu->arch.pgm;
3777 ilen = 4;
3778 }
David Hildenbrand56317922016-01-12 17:37:58 +01003779 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3780 kvm_s390_forward_psw(vcpu, ilen);
3781 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003782}
3783
Thomas Huth3fb4c402013-09-12 10:33:43 +02003784static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3785{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003786 struct mcck_volatile_info *mcck_info;
3787 struct sie_page *sie_page;
3788
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003789 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3790 vcpu->arch.sie_block->icptcode);
3791 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3792
David Hildenbrand27291e22014-01-23 12:26:52 +01003793 if (guestdbg_enabled(vcpu))
3794 kvm_s390_restore_guest_per_regs(vcpu);
3795
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003796 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3797 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003798
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003799 if (exit_reason == -EINTR) {
3800 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3801 sie_page = container_of(vcpu->arch.sie_block,
3802 struct sie_page, sie_block);
3803 mcck_info = &sie_page->mcck_info;
3804 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3805 return 0;
3806 }
3807
David Hildenbrand71f116b2015-10-19 16:24:28 +02003808 if (vcpu->arch.sie_block->icptcode > 0) {
3809 int rc = kvm_handle_sie_intercept(vcpu);
3810
3811 if (rc != -EOPNOTSUPP)
3812 return rc;
3813 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3814 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3815 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3816 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3817 return -EREMOTE;
3818 } else if (exit_reason != -EFAULT) {
3819 vcpu->stat.exit_null++;
3820 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003821 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3822 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3823 vcpu->run->s390_ucontrol.trans_exc_code =
3824 current->thread.gmap_addr;
3825 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003826 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003827 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003828 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003829 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003830 if (kvm_arch_setup_async_pf(vcpu))
3831 return 0;
3832 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003833 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003834 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003835}
3836
3837static int __vcpu_run(struct kvm_vcpu *vcpu)
3838{
3839 int rc, exit_reason;
3840
Thomas Huth800c1062013-09-12 10:33:45 +02003841 /*
3842 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3843 * ning the guest), so that memslots (and other stuff) are protected
3844 */
3845 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3846
Thomas Hutha76ccff2013-09-12 10:33:44 +02003847 do {
3848 rc = vcpu_pre_run(vcpu);
3849 if (rc)
3850 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003851
Thomas Huth800c1062013-09-12 10:33:45 +02003852 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003853 /*
3854 * As PF_VCPU will be used in fault handler, between
3855 * guest_enter and guest_exit should be no uaccess.
3856 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003857 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003858 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003859 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003860 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003861 exit_reason = sie64a(vcpu->arch.sie_block,
3862 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003863 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003864 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003865 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003866 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003867 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003868
Thomas Hutha76ccff2013-09-12 10:33:44 +02003869 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003870 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003871
Thomas Huth800c1062013-09-12 10:33:45 +02003872 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003873 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003874}
3875
David Hildenbrandb028ee32014-07-17 10:47:43 +02003876static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3877{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003878 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003879 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003880
3881 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003882 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003883 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3884 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3885 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3886 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3887 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3888 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003889 /* some control register changes require a tlb flush */
3890 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003891 }
3892 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003893 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003894 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3895 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3896 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3897 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3898 }
3899 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3900 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3901 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3902 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003903 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3904 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003905 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003906 /*
3907 * If userspace sets the riccb (e.g. after migration) to a valid state,
3908 * we should enable RI here instead of doing the lazy enablement.
3909 */
3910 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003911 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003912 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003913 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003914 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003915 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003916 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003917 /*
3918 * If userspace sets the gscb (e.g. after migration) to non-zero,
3919 * we should enable GS here instead of doing the lazy enablement.
3920 */
3921 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3922 test_kvm_facility(vcpu->kvm, 133) &&
3923 gscb->gssm &&
3924 !vcpu->arch.gs_enabled) {
3925 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3926 vcpu->arch.sie_block->ecb |= ECB_GS;
3927 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3928 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003929 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003930 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3931 test_kvm_facility(vcpu->kvm, 82)) {
3932 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3933 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3934 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003935 save_access_regs(vcpu->arch.host_acrs);
3936 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003937 /* save host (userspace) fprs/vrs */
3938 save_fpu_regs();
3939 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3940 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3941 if (MACHINE_HAS_VX)
3942 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3943 else
3944 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3945 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3946 if (test_fp_ctl(current->thread.fpu.fpc))
3947 /* User space provided an invalid FPC, let's clear it */
3948 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003949 if (MACHINE_HAS_GS) {
3950 preempt_disable();
3951 __ctl_set_bit(2, 4);
3952 if (current->thread.gs_cb) {
3953 vcpu->arch.host_gscb = current->thread.gs_cb;
3954 save_gs_cb(vcpu->arch.host_gscb);
3955 }
3956 if (vcpu->arch.gs_enabled) {
3957 current->thread.gs_cb = (struct gs_cb *)
3958 &vcpu->run->s.regs.gscb;
3959 restore_gs_cb(current->thread.gs_cb);
3960 }
3961 preempt_enable();
3962 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003963 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Fan Zhang80cd8762016-08-15 04:53:22 +02003964
David Hildenbrandb028ee32014-07-17 10:47:43 +02003965 kvm_run->kvm_dirty_regs = 0;
3966}
3967
3968static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3969{
3970 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3971 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3972 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3973 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003974 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003975 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3976 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3977 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3978 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3979 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3980 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3981 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003982 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003983 save_access_regs(vcpu->run->s.regs.acrs);
3984 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003985 /* Save guest register state */
3986 save_fpu_regs();
3987 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3988 /* Restore will be done lazily at return */
3989 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3990 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003991 if (MACHINE_HAS_GS) {
3992 __ctl_set_bit(2, 4);
3993 if (vcpu->arch.gs_enabled)
3994 save_gs_cb(current->thread.gs_cb);
3995 preempt_disable();
3996 current->thread.gs_cb = vcpu->arch.host_gscb;
3997 restore_gs_cb(vcpu->arch.host_gscb);
3998 preempt_enable();
3999 if (!vcpu->arch.host_gscb)
4000 __ctl_clear_bit(2, 4);
4001 vcpu->arch.host_gscb = NULL;
4002 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004003 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02004004}
4005
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004006int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4007{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004008 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004009
Paolo Bonzini460df4c2017-02-08 11:50:15 +01004010 if (kvm_run->immediate_exit)
4011 return -EINTR;
4012
Christoffer Dallaccb7572017-12-04 21:35:25 +01004013 vcpu_load(vcpu);
4014
David Hildenbrand27291e22014-01-23 12:26:52 +01004015 if (guestdbg_exit_pending(vcpu)) {
4016 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004017 rc = 0;
4018 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004019 }
4020
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004021 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004022
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004023 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4024 kvm_s390_vcpu_start(vcpu);
4025 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004026 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004027 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004028 rc = -EINVAL;
4029 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004030 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004031
David Hildenbrandb028ee32014-07-17 10:47:43 +02004032 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004033 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004034
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004035 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004036 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004037
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004038 if (signal_pending(current) && !rc) {
4039 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004040 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004041 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004042
David Hildenbrand27291e22014-01-23 12:26:52 +01004043 if (guestdbg_exit_pending(vcpu) && !rc) {
4044 kvm_s390_prepare_debug_exit(vcpu);
4045 rc = 0;
4046 }
4047
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004048 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004049 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004050 rc = 0;
4051 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004052
David Hildenbranddb0758b2016-02-15 09:42:25 +01004053 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004054 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004055
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004056 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004057
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004058 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004059out:
4060 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004061 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004062}
4063
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004064/*
4065 * store status at address
4066 * we use have two special cases:
4067 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4068 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4069 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004070int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004071{
Carsten Otte092670c2011-07-24 10:48:22 +02004072 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004073 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004074 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004075 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004076 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004077
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004078 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004079 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4080 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004081 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004082 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004083 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4084 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004085 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004086 gpa = px;
4087 } else
4088 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004089
4090 /* manually convert vector registers if necessary */
4091 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004092 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004093 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4094 fprs, 128);
4095 } else {
4096 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004097 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004098 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004099 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004100 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004101 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004102 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004103 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004104 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004105 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004106 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004107 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004108 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004109 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004110 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004111 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004112 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004113 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004114 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004115 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004116 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004117 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004118 &vcpu->arch.sie_block->gcr, 128);
4119 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004120}
4121
Thomas Huthe8798922013-11-06 15:46:33 +01004122int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4123{
4124 /*
4125 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004126 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004127 * it into the save area
4128 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004129 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004130 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004131 save_access_regs(vcpu->run->s.regs.acrs);
4132
4133 return kvm_s390_store_status_unloaded(vcpu, addr);
4134}
4135
David Hildenbrand8ad35752014-03-14 11:00:21 +01004136static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4137{
4138 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004139 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004140}
4141
4142static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4143{
4144 unsigned int i;
4145 struct kvm_vcpu *vcpu;
4146
4147 kvm_for_each_vcpu(i, vcpu, kvm) {
4148 __disable_ibs_on_vcpu(vcpu);
4149 }
4150}
4151
4152static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4153{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004154 if (!sclp.has_ibs)
4155 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004156 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004157 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004158}
4159
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004160void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
4161{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004162 int i, online_vcpus, started_vcpus = 0;
4163
4164 if (!is_vcpu_stopped(vcpu))
4165 return;
4166
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004167 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004168 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004169 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004170 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4171
4172 for (i = 0; i < online_vcpus; i++) {
4173 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4174 started_vcpus++;
4175 }
4176
4177 if (started_vcpus == 0) {
4178 /* we're the only active VCPU -> speed it up */
4179 __enable_ibs_on_vcpu(vcpu);
4180 } else if (started_vcpus == 1) {
4181 /*
4182 * As we are starting a second VCPU, we have to disable
4183 * the IBS facility on all VCPUs to remove potentially
4184 * oustanding ENABLE requests.
4185 */
4186 __disable_ibs_on_all_vcpus(vcpu->kvm);
4187 }
4188
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004189 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004190 /*
4191 * Another VCPU might have used IBS while we were offline.
4192 * Let's play safe and flush the VCPU at startup.
4193 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004194 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004195 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004196 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004197}
4198
4199void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
4200{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004201 int i, online_vcpus, started_vcpus = 0;
4202 struct kvm_vcpu *started_vcpu = NULL;
4203
4204 if (is_vcpu_stopped(vcpu))
4205 return;
4206
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004207 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004208 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004209 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004210 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4211
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004212 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004213 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004214
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004215 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004216 __disable_ibs_on_vcpu(vcpu);
4217
4218 for (i = 0; i < online_vcpus; i++) {
4219 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4220 started_vcpus++;
4221 started_vcpu = vcpu->kvm->vcpus[i];
4222 }
4223 }
4224
4225 if (started_vcpus == 1) {
4226 /*
4227 * As we only have one VCPU left, we want to enable the
4228 * IBS facility for that VCPU to speed it up.
4229 */
4230 __enable_ibs_on_vcpu(started_vcpu);
4231 }
4232
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004233 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004234 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004235}
4236
Cornelia Huckd6712df2012-12-20 15:32:11 +01004237static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4238 struct kvm_enable_cap *cap)
4239{
4240 int r;
4241
4242 if (cap->flags)
4243 return -EINVAL;
4244
4245 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004246 case KVM_CAP_S390_CSS_SUPPORT:
4247 if (!vcpu->kvm->arch.css_support) {
4248 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004249 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004250 trace_kvm_s390_enable_css(vcpu->kvm);
4251 }
4252 r = 0;
4253 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004254 default:
4255 r = -EINVAL;
4256 break;
4257 }
4258 return r;
4259}
4260
Thomas Huth41408c282015-02-06 15:01:21 +01004261static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4262 struct kvm_s390_mem_op *mop)
4263{
4264 void __user *uaddr = (void __user *)mop->buf;
4265 void *tmpbuf = NULL;
4266 int r, srcu_idx;
4267 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4268 | KVM_S390_MEMOP_F_CHECK_ONLY;
4269
4270 if (mop->flags & ~supported_flags)
4271 return -EINVAL;
4272
4273 if (mop->size > MEM_OP_MAX_SIZE)
4274 return -E2BIG;
4275
4276 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4277 tmpbuf = vmalloc(mop->size);
4278 if (!tmpbuf)
4279 return -ENOMEM;
4280 }
4281
4282 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4283
4284 switch (mop->op) {
4285 case KVM_S390_MEMOP_LOGICAL_READ:
4286 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004287 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4288 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004289 break;
4290 }
4291 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4292 if (r == 0) {
4293 if (copy_to_user(uaddr, tmpbuf, mop->size))
4294 r = -EFAULT;
4295 }
4296 break;
4297 case KVM_S390_MEMOP_LOGICAL_WRITE:
4298 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004299 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4300 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004301 break;
4302 }
4303 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4304 r = -EFAULT;
4305 break;
4306 }
4307 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4308 break;
4309 default:
4310 r = -EINVAL;
4311 }
4312
4313 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4314
4315 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4316 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4317
4318 vfree(tmpbuf);
4319 return r;
4320}
4321
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004322long kvm_arch_vcpu_async_ioctl(struct file *filp,
4323 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004324{
4325 struct kvm_vcpu *vcpu = filp->private_data;
4326 void __user *argp = (void __user *)arg;
4327
Avi Kivity93736622010-05-13 12:35:17 +03004328 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004329 case KVM_S390_IRQ: {
4330 struct kvm_s390_irq s390irq;
4331
Jens Freimann47b43c52014-11-11 20:57:06 +01004332 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004333 return -EFAULT;
4334 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004335 }
Avi Kivity93736622010-05-13 12:35:17 +03004336 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004337 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02004338 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01004339
4340 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004341 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004342 if (s390int_to_s390irq(&s390int, &s390irq))
4343 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004344 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004345 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004346 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004347 return -ENOIOCTLCMD;
4348}
4349
4350long kvm_arch_vcpu_ioctl(struct file *filp,
4351 unsigned int ioctl, unsigned long arg)
4352{
4353 struct kvm_vcpu *vcpu = filp->private_data;
4354 void __user *argp = (void __user *)arg;
4355 int idx;
4356 long r;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004357
4358 vcpu_load(vcpu);
4359
4360 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004361 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004362 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004363 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004364 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004365 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004366 case KVM_S390_SET_INITIAL_PSW: {
4367 psw_t psw;
4368
Avi Kivitybc923cc2010-05-13 12:21:46 +03004369 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004370 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004371 break;
4372 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4373 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004374 }
4375 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03004376 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4377 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004378 case KVM_SET_ONE_REG:
4379 case KVM_GET_ONE_REG: {
4380 struct kvm_one_reg reg;
4381 r = -EFAULT;
4382 if (copy_from_user(&reg, argp, sizeof(reg)))
4383 break;
4384 if (ioctl == KVM_SET_ONE_REG)
4385 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4386 else
4387 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4388 break;
4389 }
Carsten Otte27e03932012-01-04 10:25:21 +01004390#ifdef CONFIG_KVM_S390_UCONTROL
4391 case KVM_S390_UCAS_MAP: {
4392 struct kvm_s390_ucas_mapping ucasmap;
4393
4394 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4395 r = -EFAULT;
4396 break;
4397 }
4398
4399 if (!kvm_is_ucontrol(vcpu->kvm)) {
4400 r = -EINVAL;
4401 break;
4402 }
4403
4404 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4405 ucasmap.vcpu_addr, ucasmap.length);
4406 break;
4407 }
4408 case KVM_S390_UCAS_UNMAP: {
4409 struct kvm_s390_ucas_mapping ucasmap;
4410
4411 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4412 r = -EFAULT;
4413 break;
4414 }
4415
4416 if (!kvm_is_ucontrol(vcpu->kvm)) {
4417 r = -EINVAL;
4418 break;
4419 }
4420
4421 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4422 ucasmap.length);
4423 break;
4424 }
4425#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004426 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004427 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004428 break;
4429 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004430 case KVM_ENABLE_CAP:
4431 {
4432 struct kvm_enable_cap cap;
4433 r = -EFAULT;
4434 if (copy_from_user(&cap, argp, sizeof(cap)))
4435 break;
4436 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4437 break;
4438 }
Thomas Huth41408c282015-02-06 15:01:21 +01004439 case KVM_S390_MEM_OP: {
4440 struct kvm_s390_mem_op mem_op;
4441
4442 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4443 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4444 else
4445 r = -EFAULT;
4446 break;
4447 }
Jens Freimann816c7662014-11-24 17:13:46 +01004448 case KVM_S390_SET_IRQ_STATE: {
4449 struct kvm_s390_irq_state irq_state;
4450
4451 r = -EFAULT;
4452 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4453 break;
4454 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4455 irq_state.len == 0 ||
4456 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4457 r = -EINVAL;
4458 break;
4459 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004460 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004461 r = kvm_s390_set_irq_state(vcpu,
4462 (void __user *) irq_state.buf,
4463 irq_state.len);
4464 break;
4465 }
4466 case KVM_S390_GET_IRQ_STATE: {
4467 struct kvm_s390_irq_state irq_state;
4468
4469 r = -EFAULT;
4470 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4471 break;
4472 if (irq_state.len == 0) {
4473 r = -EINVAL;
4474 break;
4475 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004476 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004477 r = kvm_s390_get_irq_state(vcpu,
4478 (__u8 __user *) irq_state.buf,
4479 irq_state.len);
4480 break;
4481 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004482 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004483 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004484 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004485
4486 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004487 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004488}
4489
Souptick Joarder1499fa82018-04-19 00:49:58 +05304490vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004491{
4492#ifdef CONFIG_KVM_S390_UCONTROL
4493 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4494 && (kvm_is_ucontrol(vcpu->kvm))) {
4495 vmf->page = virt_to_page(vcpu->arch.sie_block);
4496 get_page(vmf->page);
4497 return 0;
4498 }
4499#endif
4500 return VM_FAULT_SIGBUS;
4501}
4502
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05304503int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4504 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09004505{
4506 return 0;
4507}
4508
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004509/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004510int kvm_arch_prepare_memory_region(struct kvm *kvm,
4511 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004512 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004513 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004514{
Nick Wangdd2887e2013-03-25 17:22:57 +01004515 /* A few sanity checks. We can have memory slots which have to be
4516 located/ended at a segment boundary (1MB). The memory in userland is
4517 ok to be fragmented into various different vmas. It is okay to mmap()
4518 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004519
Carsten Otte598841c2011-07-24 10:48:21 +02004520 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004521 return -EINVAL;
4522
Carsten Otte598841c2011-07-24 10:48:21 +02004523 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004524 return -EINVAL;
4525
Dominik Dingela3a92c32014-12-01 17:24:42 +01004526 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4527 return -EINVAL;
4528
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004529 return 0;
4530}
4531
4532void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004533 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004534 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004535 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004536 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004537{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004538 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004539
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004540 switch (change) {
4541 case KVM_MR_DELETE:
4542 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4543 old->npages * PAGE_SIZE);
4544 break;
4545 case KVM_MR_MOVE:
4546 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4547 old->npages * PAGE_SIZE);
4548 if (rc)
4549 break;
4550 /* FALLTHROUGH */
4551 case KVM_MR_CREATE:
4552 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4553 mem->guest_phys_addr, mem->memory_size);
4554 break;
4555 case KVM_MR_FLAGS_ONLY:
4556 break;
4557 default:
4558 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
4559 }
Carsten Otte598841c2011-07-24 10:48:21 +02004560 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004561 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02004562 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004563}
4564
Alexander Yarygin60a37702016-04-01 15:38:57 +03004565static inline unsigned long nonhyp_mask(int i)
4566{
4567 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4568
4569 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4570}
4571
Christian Borntraeger3491caf2016-05-13 12:16:35 +02004572void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4573{
4574 vcpu->valid_wakeup = false;
4575}
4576
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004577static int __init kvm_s390_init(void)
4578{
Alexander Yarygin60a37702016-04-01 15:38:57 +03004579 int i;
4580
David Hildenbrand07197fd2015-01-30 16:01:38 +01004581 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004582 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01004583 return -ENODEV;
4584 }
4585
Janosch Franka4499382018-07-13 11:28:31 +01004586 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004587 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01004588 return -EINVAL;
4589 }
4590
Alexander Yarygin60a37702016-04-01 15:38:57 +03004591 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00004592 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03004593 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4594
Michael Mueller9d8d5782015-02-02 15:42:51 +01004595 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004596}
4597
4598static void __exit kvm_s390_exit(void)
4599{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004600 kvm_exit();
4601}
4602
4603module_init(kvm_s390_init);
4604module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02004605
4606/*
4607 * Enable autoloading of the kvm module.
4608 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4609 * since x86 takes a different approach.
4610 */
4611#include <linux/miscdevice.h>
4612MODULE_ALIAS_MISCDEV(KVM_MINOR);
4613MODULE_ALIAS("devname:kvm");