blob: 40af442b2e150aafc65b15e99509b01c2fc36f17 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Christian Borntraegera37cb072018-01-23 13:28:40 +01005 * Copyright IBM Corp. 2008, 2018
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010047#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010048#include "gaccess.h"
49
Cornelia Huck5786fff2012-07-23 17:20:29 +020050#define CREATE_TRACE_POINTS
51#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020052#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020053
Thomas Huth41408c282015-02-06 15:01:21 +010054#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010055#define LOCAL_IRQS 32
56#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010058
Heiko Carstensb0c632d2008-03-25 18:47:20 +010059#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Christian Borntraegerccc40c52018-03-08 12:48:05 +000060#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061
62struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020064 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010065 { "exit_validity", VCPU_STAT(exit_validity) },
66 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 { "exit_external_request", VCPU_STAT(exit_external_request) },
Christian Borntraegera5e0acea92018-02-23 07:57:33 +000068 { "exit_io_request", VCPU_STAT(exit_io_request) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010069 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030071 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020074 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010075 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020076 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020077 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
Christian Borntraeger8b905d22019-03-05 05:30:02 -050078 { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020079 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020080 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010082 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000084 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
85 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010086 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020087 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010088 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000089 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010090 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
91 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
92 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000093 { "deliver_program", VCPU_STAT(deliver_program) },
94 { "deliver_io", VCPU_STAT(deliver_io) },
QingFeng Hao32de0742018-03-02 11:56:47 +010095 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010096 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000097 { "inject_ckc", VCPU_STAT(inject_ckc) },
98 { "inject_cputm", VCPU_STAT(inject_cputm) },
99 { "inject_external_call", VCPU_STAT(inject_external_call) },
100 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
101 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
102 { "inject_io", VM_STAT(inject_io) },
103 { "inject_mchk", VCPU_STAT(inject_mchk) },
104 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
105 { "inject_program", VCPU_STAT(inject_program) },
106 { "inject_restart", VCPU_STAT(inject_restart) },
107 { "inject_service_signal", VM_STAT(inject_service_signal) },
108 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
109 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
110 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
111 { "inject_virtio", VM_STAT(inject_virtio) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100112 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
113 { "instruction_gs", VCPU_STAT(instruction_gs) },
114 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
115 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
116 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200117 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100118 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100119 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100120 { "instruction_sck", VCPU_STAT(instruction_sck) },
121 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100122 { "instruction_spx", VCPU_STAT(instruction_spx) },
123 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
124 { "instruction_stap", VCPU_STAT(instruction_stap) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100125 { "instruction_iske", VCPU_STAT(instruction_iske) },
126 { "instruction_ri", VCPU_STAT(instruction_ri) },
127 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
128 { "instruction_sske", VCPU_STAT(instruction_sske) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100129 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200130 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100131 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
132 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100133 { "instruction_tb", VCPU_STAT(instruction_tb) },
134 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200135 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100136 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200137 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200138 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100139 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100140 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200141 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100142 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200143 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
144 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100145 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200146 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
147 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500148 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100149 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
150 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
151 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200152 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
153 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
154 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100155 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
156 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
157 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
158 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
159 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
160 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100161 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100162 { NULL }
163};
164
Collin L. Walling8fa16962016-07-26 15:29:44 -0400165struct kvm_s390_tod_clock_ext {
166 __u8 epoch_idx;
167 __u64 tod;
168 __u8 reserved[7];
169} __packed;
170
David Hildenbranda411edf2016-02-02 15:41:22 +0100171/* allow nested virtualization in KVM (if enabled by user space) */
172static int nested;
173module_param(nested, int, S_IRUGO);
174MODULE_PARM_DESC(nested, "Nested virtualization support");
175
Janosch Franka4499382018-07-13 11:28:31 +0100176/* allow 1m huge page guest backing, if !nested */
177static int hpage;
178module_param(hpage, int, 0444);
179MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100180
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500181/* maximum percentage of steal time for polling. >100 is treated like 100 */
182static u8 halt_poll_max_steal = 10;
183module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000184MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500185
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000186/*
187 * For now we handle at most 16 double words as this is what the s390 base
188 * kernel handles and stores in the prefix page. If we ever need to go beyond
189 * this, this requires changes to code, but the external uapi can stay.
190 */
191#define SIZE_INTERNAL 16
192
193/*
194 * Base feature mask that defines default mask for facilities. Consists of the
195 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
196 */
197static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
198/*
199 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
200 * and defines the facilities that can be enabled via a cpu model.
201 */
202static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
203
204static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200205{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000206 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
207 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
208 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
209 sizeof(S390_lowcore.stfle_fac_list));
210
211 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200212}
213
David Hildenbrand15c97052015-03-19 17:36:43 +0100214/* available cpu features supported by kvm */
215static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200216/* available subfunctions indicated via query / "test bit" */
217static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100218
Michael Mueller9d8d5782015-02-02 15:42:51 +0100219static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200220static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200221debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100222
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100223/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200224int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100225{
226 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200227 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100228}
229
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700230int kvm_arch_check_processor_compat(void)
231{
232 return 0;
233}
234
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100235static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
236 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200237
David Hildenbrand15757672018-02-07 12:46:45 +0100238static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
239{
240 u8 delta_idx = 0;
241
242 /*
243 * The TOD jumps by delta, we have to compensate this by adding
244 * -delta to the epoch.
245 */
246 delta = -delta;
247
248 /* sign-extension - we're adding to signed values below */
249 if ((s64)delta < 0)
250 delta_idx = -1;
251
252 scb->epoch += delta;
253 if (scb->ecd & ECD_MEF) {
254 scb->epdx += delta_idx;
255 if (scb->epoch < delta)
256 scb->epdx += 1;
257 }
258}
259
Fan Zhangfdf03652015-05-13 10:58:41 +0200260/*
261 * This callback is executed during stop_machine(). All CPUs are therefore
262 * temporarily stopped. In order not to change guest behavior, we have to
263 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
264 * so a CPU won't be stopped while calculating with the epoch.
265 */
266static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
267 void *v)
268{
269 struct kvm *kvm;
270 struct kvm_vcpu *vcpu;
271 int i;
272 unsigned long long *delta = v;
273
274 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200275 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100276 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
277 if (i == 0) {
278 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
279 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
280 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100281 if (vcpu->arch.cputm_enabled)
282 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100283 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100284 kvm_clock_sync_scb(vcpu->arch.vsie_block,
285 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200286 }
287 }
288 return NOTIFY_OK;
289}
290
291static struct notifier_block kvm_clock_notifier = {
292 .notifier_call = kvm_clock_sync,
293};
294
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100295int kvm_arch_hardware_setup(void)
296{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200297 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100298 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200299 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
300 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200301 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
302 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100303 return 0;
304}
305
306void kvm_arch_hardware_unsetup(void)
307{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100308 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200309 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200310 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
311 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100312}
313
David Hildenbrand22be5a132016-01-21 13:22:54 +0100314static void allow_cpu_feat(unsigned long nr)
315{
316 set_bit_inv(nr, kvm_s390_available_cpu_feat);
317}
318
David Hildenbrand0a763c72016-05-18 16:03:47 +0200319static inline int plo_test_bit(unsigned char nr)
320{
321 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100322 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200323
324 asm volatile(
325 /* Parameter registers are ignored for "test bit" */
326 " plo 0,0,0,0(0)\n"
327 " ipm %0\n"
328 " srl %0,28\n"
329 : "=d" (cc)
330 : "d" (r0)
331 : "cc");
332 return cc == 0;
333}
334
Christian Borntraegerd6681392019-02-20 03:04:07 -0500335static inline void __insn32_query(unsigned int opcode, u8 query[32])
336{
337 register unsigned long r0 asm("0") = 0; /* query function */
338 register unsigned long r1 asm("1") = (unsigned long) query;
339
340 asm volatile(
341 /* Parameter regs are ignored */
342 " .insn rrf,%[opc] << 16,2,4,6,0\n"
343 : "=m" (*query)
344 : "d" (r0), "a" (r1), [opc] "i" (opcode)
345 : "cc");
346}
347
Christian Borntraeger173aec22018-12-28 10:59:06 +0100348#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100349#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100350
David Hildenbrand22be5a132016-01-21 13:22:54 +0100351static void kvm_s390_cpu_feat_init(void)
352{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200353 int i;
354
355 for (i = 0; i < 256; ++i) {
356 if (plo_test_bit(i))
357 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
358 }
359
360 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400361 ptff(kvm_s390_available_subfunc.ptff,
362 sizeof(kvm_s390_available_subfunc.ptff),
363 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200364
365 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200366 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
367 kvm_s390_available_subfunc.kmac);
368 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
369 kvm_s390_available_subfunc.kmc);
370 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
371 kvm_s390_available_subfunc.km);
372 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
373 kvm_s390_available_subfunc.kimd);
374 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
375 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200376 }
377 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200378 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
379 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200380 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200381 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
382 kvm_s390_available_subfunc.kmctr);
383 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
384 kvm_s390_available_subfunc.kmf);
385 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
386 kvm_s390_available_subfunc.kmo);
387 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
388 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200389 }
390 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100391 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200392 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200393
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400394 if (test_facility(146)) /* MSA8 */
395 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
396 kvm_s390_available_subfunc.kma);
397
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100398 if (test_facility(155)) /* MSA9 */
399 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
400 kvm_s390_available_subfunc.kdsa);
401
Christian Borntraeger173aec22018-12-28 10:59:06 +0100402 if (test_facility(150)) /* SORTL */
403 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
404
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100405 if (test_facility(151)) /* DFLTCC */
406 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
407
David Hildenbrand22be5a132016-01-21 13:22:54 +0100408 if (MACHINE_HAS_ESOP)
409 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200410 /*
411 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
412 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
413 */
414 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100415 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200416 return;
417 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100418 if (sclp.has_64bscao)
419 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100420 if (sclp.has_siif)
421 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100422 if (sclp.has_gpere)
423 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100424 if (sclp.has_gsls)
425 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100426 if (sclp.has_ib)
427 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100428 if (sclp.has_cei)
429 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100430 if (sclp.has_ibs)
431 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500432 if (sclp.has_kss)
433 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200434 /*
435 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
436 * all skey handling functions read/set the skey from the PGSTE
437 * instead of the real storage key.
438 *
439 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
440 * pages being detected as preserved although they are resident.
441 *
442 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
443 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
444 *
445 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
446 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
447 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
448 *
449 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
450 * cannot easily shadow the SCA because of the ipte lock.
451 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100452}
453
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100454int kvm_arch_init(void *opaque)
455{
Janosch Frankf76f6372019-10-02 03:56:27 -0400456 int rc = -ENOMEM;
Michael Mueller308c3e62018-11-30 15:32:06 +0100457
Christian Borntraeger78f26132015-07-22 15:50:58 +0200458 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
459 if (!kvm_s390_dbf)
460 return -ENOMEM;
461
Janosch Frankf76f6372019-10-02 03:56:27 -0400462 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view))
463 goto out;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200464
David Hildenbrand22be5a132016-01-21 13:22:54 +0100465 kvm_s390_cpu_feat_init();
466
Cornelia Huck84877d92014-09-02 10:27:35 +0100467 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100468 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
469 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100470 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Janosch Frankf76f6372019-10-02 03:56:27 -0400471 goto out;
Michael Mueller308c3e62018-11-30 15:32:06 +0100472 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100473
474 rc = kvm_s390_gib_init(GAL_ISC);
475 if (rc)
Janosch Frankf76f6372019-10-02 03:56:27 -0400476 goto out;
Michael Muellerb1d1e762019-01-31 09:52:45 +0100477
Michael Mueller308c3e62018-11-30 15:32:06 +0100478 return 0;
479
Janosch Frankf76f6372019-10-02 03:56:27 -0400480out:
481 kvm_arch_exit();
Michael Mueller308c3e62018-11-30 15:32:06 +0100482 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100483}
484
Christian Borntraeger78f26132015-07-22 15:50:58 +0200485void kvm_arch_exit(void)
486{
Michael Mueller1282c212019-01-31 09:52:40 +0100487 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200488 debug_unregister(kvm_s390_dbf);
489}
490
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100491/* Section: device related */
492long kvm_arch_dev_ioctl(struct file *filp,
493 unsigned int ioctl, unsigned long arg)
494{
495 if (ioctl == KVM_S390_ENABLE_SIE)
496 return s390_enable_sie();
497 return -EINVAL;
498}
499
Alexander Graf784aa3d2014-07-14 18:27:35 +0200500int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100501{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100502 int r;
503
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200504 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100505 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200506 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100507 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100508#ifdef CONFIG_KVM_S390_UCONTROL
509 case KVM_CAP_S390_UCONTROL:
510#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200511 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100512 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200513 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100514 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100515 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100516 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200517 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200518 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200519 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200520 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100521 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100522 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200523 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100524 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400525 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100526 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200527 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200528 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100529 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100530 case KVM_CAP_S390_AIS_MIGRATION:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100531 r = 1;
532 break;
Janosch Franka4499382018-07-13 11:28:31 +0100533 case KVM_CAP_S390_HPAGE_1M:
534 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100535 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100536 r = 1;
537 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100538 case KVM_CAP_S390_MEM_OP:
539 r = MEM_OP_MAX_SIZE;
540 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200541 case KVM_CAP_NR_VCPUS:
542 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200543 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100544 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200545 if (!kvm_s390_use_sca_entries())
546 r = KVM_MAX_VCPUS;
547 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100548 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200549 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200550 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100551 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200552 break;
Eric Farman68c55752014-06-09 10:57:26 -0400553 case KVM_CAP_S390_VECTOR_REGISTERS:
554 r = MACHINE_HAS_VX;
555 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800556 case KVM_CAP_S390_RI:
557 r = test_facility(64);
558 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100559 case KVM_CAP_S390_GS:
560 r = test_facility(133);
561 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100562 case KVM_CAP_S390_BPB:
563 r = test_facility(82);
564 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200565 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100566 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200567 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100568 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100569}
570
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400571static void kvm_s390_sync_dirty_log(struct kvm *kvm,
Janosch Frank0959e162018-07-17 13:21:22 +0100572 struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400573{
Janosch Frank0959e162018-07-17 13:21:22 +0100574 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400575 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100576 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400577 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100578 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400579
Janosch Frank0959e162018-07-17 13:21:22 +0100580 /* Loop over all guest segments */
581 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400582 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100583 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
584 gaddr = gfn_to_gpa(cur_gfn);
585 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
586 if (kvm_is_error_hva(vmaddr))
587 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400588
Janosch Frank0959e162018-07-17 13:21:22 +0100589 bitmap_zero(bitmap, _PAGE_ENTRIES);
590 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
591 for (i = 0; i < _PAGE_ENTRIES; i++) {
592 if (test_bit(i, bitmap))
593 mark_page_dirty(kvm, cur_gfn + i);
594 }
595
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100596 if (fatal_signal_pending(current))
597 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100598 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400599 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400600}
601
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100602/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200603static void sca_del_vcpu(struct kvm_vcpu *vcpu);
604
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100605/*
606 * Get (and clear) the dirty memory log for a memory slot.
607 */
608int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
609 struct kvm_dirty_log *log)
610{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400611 int r;
612 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200613 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400614 struct kvm_memory_slot *memslot;
615 int is_dirty = 0;
616
Janosch Franke1e8a962017-02-02 16:39:31 +0100617 if (kvm_is_ucontrol(kvm))
618 return -EINVAL;
619
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400620 mutex_lock(&kvm->slots_lock);
621
622 r = -EINVAL;
623 if (log->slot >= KVM_USER_MEM_SLOTS)
624 goto out;
625
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200626 slots = kvm_memslots(kvm);
627 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400628 r = -ENOENT;
629 if (!memslot->dirty_bitmap)
630 goto out;
631
632 kvm_s390_sync_dirty_log(kvm, memslot);
633 r = kvm_get_dirty_log(kvm, log, &is_dirty);
634 if (r)
635 goto out;
636
637 /* Clear the dirty log */
638 if (is_dirty) {
639 n = kvm_dirty_bitmap_bytes(memslot);
640 memset(memslot->dirty_bitmap, 0, n);
641 }
642 r = 0;
643out:
644 mutex_unlock(&kvm->slots_lock);
645 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100646}
647
David Hildenbrand6502a342016-06-21 14:19:51 +0200648static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
649{
650 unsigned int i;
651 struct kvm_vcpu *vcpu;
652
653 kvm_for_each_vcpu(i, vcpu, kvm) {
654 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
655 }
656}
657
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100658int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200659{
660 int r;
661
662 if (cap->flags)
663 return -EINVAL;
664
665 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200666 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200667 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200668 kvm->arch.use_irqchip = 1;
669 r = 0;
670 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200671 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200672 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200673 kvm->arch.user_sigp = 1;
674 r = 0;
675 break;
Eric Farman68c55752014-06-09 10:57:26 -0400676 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100677 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200678 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100679 r = -EBUSY;
680 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100681 set_kvm_facility(kvm->arch.model.fac_mask, 129);
682 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200683 if (test_facility(134)) {
684 set_kvm_facility(kvm->arch.model.fac_mask, 134);
685 set_kvm_facility(kvm->arch.model.fac_list, 134);
686 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100687 if (test_facility(135)) {
688 set_kvm_facility(kvm->arch.model.fac_mask, 135);
689 set_kvm_facility(kvm->arch.model.fac_list, 135);
690 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100691 if (test_facility(148)) {
692 set_kvm_facility(kvm->arch.model.fac_mask, 148);
693 set_kvm_facility(kvm->arch.model.fac_list, 148);
694 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100695 if (test_facility(152)) {
696 set_kvm_facility(kvm->arch.model.fac_mask, 152);
697 set_kvm_facility(kvm->arch.model.fac_list, 152);
698 }
Michael Mueller18280d82015-03-16 16:05:41 +0100699 r = 0;
700 } else
701 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100702 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200703 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
704 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400705 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800706 case KVM_CAP_S390_RI:
707 r = -EINVAL;
708 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200709 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800710 r = -EBUSY;
711 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100712 set_kvm_facility(kvm->arch.model.fac_mask, 64);
713 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800714 r = 0;
715 }
716 mutex_unlock(&kvm->lock);
717 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
718 r ? "(not available)" : "(success)");
719 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100720 case KVM_CAP_S390_AIS:
721 mutex_lock(&kvm->lock);
722 if (kvm->created_vcpus) {
723 r = -EBUSY;
724 } else {
725 set_kvm_facility(kvm->arch.model.fac_mask, 72);
726 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100727 r = 0;
728 }
729 mutex_unlock(&kvm->lock);
730 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
731 r ? "(not available)" : "(success)");
732 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100733 case KVM_CAP_S390_GS:
734 r = -EINVAL;
735 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100736 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100737 r = -EBUSY;
738 } else if (test_facility(133)) {
739 set_kvm_facility(kvm->arch.model.fac_mask, 133);
740 set_kvm_facility(kvm->arch.model.fac_list, 133);
741 r = 0;
742 }
743 mutex_unlock(&kvm->lock);
744 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
745 r ? "(not available)" : "(success)");
746 break;
Janosch Franka4499382018-07-13 11:28:31 +0100747 case KVM_CAP_S390_HPAGE_1M:
748 mutex_lock(&kvm->lock);
749 if (kvm->created_vcpus)
750 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100751 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100752 r = -EINVAL;
753 else {
754 r = 0;
Janosch Frankdf88f312018-08-30 16:14:18 +0200755 down_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100756 kvm->mm->context.allow_gmap_hpage_1m = 1;
Janosch Frankdf88f312018-08-30 16:14:18 +0200757 up_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100758 /*
759 * We might have to create fake 4k page
760 * tables. To avoid that the hardware works on
761 * stale PGSTEs, we emulate these instructions.
762 */
763 kvm->arch.use_skf = 0;
764 kvm->arch.use_pfmfi = 0;
765 }
766 mutex_unlock(&kvm->lock);
767 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
768 r ? "(not available)" : "(success)");
769 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100770 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200771 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100772 kvm->arch.user_stsi = 1;
773 r = 0;
774 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200775 case KVM_CAP_S390_USER_INSTR0:
776 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
777 kvm->arch.user_instr0 = 1;
778 icpt_operexc_on_all_vcpus(kvm);
779 r = 0;
780 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200781 default:
782 r = -EINVAL;
783 break;
784 }
785 return r;
786}
787
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100788static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
789{
790 int ret;
791
792 switch (attr->attr) {
793 case KVM_S390_VM_MEM_LIMIT_SIZE:
794 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200795 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100796 kvm->arch.mem_limit);
797 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100798 ret = -EFAULT;
799 break;
800 default:
801 ret = -ENXIO;
802 break;
803 }
804 return ret;
805}
806
807static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200808{
809 int ret;
810 unsigned int idx;
811 switch (attr->attr) {
812 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100813 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100814 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200815 break;
816
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200817 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200818 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100819 if (kvm->created_vcpus)
820 ret = -EBUSY;
821 else if (kvm->mm->context.allow_gmap_hpage_1m)
822 ret = -EINVAL;
823 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200824 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100825 /* Not compatible with cmma. */
826 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200827 ret = 0;
828 }
829 mutex_unlock(&kvm->lock);
830 break;
831 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100832 ret = -ENXIO;
833 if (!sclp.has_cmma)
834 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200835 ret = -EINVAL;
836 if (!kvm->arch.use_cmma)
837 break;
838
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200839 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200840 mutex_lock(&kvm->lock);
841 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200842 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200843 srcu_read_unlock(&kvm->srcu, idx);
844 mutex_unlock(&kvm->lock);
845 ret = 0;
846 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100847 case KVM_S390_VM_MEM_LIMIT_SIZE: {
848 unsigned long new_limit;
849
850 if (kvm_is_ucontrol(kvm))
851 return -EINVAL;
852
853 if (get_user(new_limit, (u64 __user *)attr->addr))
854 return -EFAULT;
855
Dominik Dingela3a92c32014-12-01 17:24:42 +0100856 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
857 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100858 return -E2BIG;
859
Dominik Dingela3a92c32014-12-01 17:24:42 +0100860 if (!new_limit)
861 return -EINVAL;
862
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100863 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100864 if (new_limit != KVM_S390_NO_MEM_LIMIT)
865 new_limit -= 1;
866
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100867 ret = -EBUSY;
868 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200869 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100870 /* gmap_create will round the limit up */
871 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100872
873 if (!new) {
874 ret = -ENOMEM;
875 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100876 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100877 new->private = kvm;
878 kvm->arch.gmap = new;
879 ret = 0;
880 }
881 }
882 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100883 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
884 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
885 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100886 break;
887 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200888 default:
889 ret = -ENXIO;
890 break;
891 }
892 return ret;
893}
894
Tony Krowiaka374e892014-09-03 10:13:53 +0200895static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
896
Tony Krowiak20c922f2018-04-22 11:37:03 -0400897void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200898{
899 struct kvm_vcpu *vcpu;
900 int i;
901
Tony Krowiak20c922f2018-04-22 11:37:03 -0400902 kvm_s390_vcpu_block_all(kvm);
903
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400904 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400905 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400906 /* recreate the shadow crycb by leaving the VSIE handler */
907 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
908 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400909
910 kvm_s390_vcpu_unblock_all(kvm);
911}
912
913static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
914{
Tony Krowiaka374e892014-09-03 10:13:53 +0200915 mutex_lock(&kvm->lock);
916 switch (attr->attr) {
917 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200918 if (!test_kvm_facility(kvm, 76)) {
919 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400920 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200921 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200922 get_random_bytes(
923 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
924 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
925 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200926 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200927 break;
928 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200929 if (!test_kvm_facility(kvm, 76)) {
930 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400931 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200932 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200933 get_random_bytes(
934 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
935 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
936 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200937 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200938 break;
939 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200940 if (!test_kvm_facility(kvm, 76)) {
941 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400942 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200943 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200944 kvm->arch.crypto.aes_kw = 0;
945 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
946 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200947 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200948 break;
949 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200950 if (!test_kvm_facility(kvm, 76)) {
951 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400952 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200953 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200954 kvm->arch.crypto.dea_kw = 0;
955 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
956 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200957 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200958 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400959 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
960 if (!ap_instructions_available()) {
961 mutex_unlock(&kvm->lock);
962 return -EOPNOTSUPP;
963 }
964 kvm->arch.crypto.apie = 1;
965 break;
966 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
967 if (!ap_instructions_available()) {
968 mutex_unlock(&kvm->lock);
969 return -EOPNOTSUPP;
970 }
971 kvm->arch.crypto.apie = 0;
972 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200973 default:
974 mutex_unlock(&kvm->lock);
975 return -ENXIO;
976 }
977
Tony Krowiak20c922f2018-04-22 11:37:03 -0400978 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200979 mutex_unlock(&kvm->lock);
980 return 0;
981}
982
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200983static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
984{
985 int cx;
986 struct kvm_vcpu *vcpu;
987
988 kvm_for_each_vcpu(cx, vcpu, kvm)
989 kvm_s390_sync_request(req, vcpu);
990}
991
992/*
993 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100994 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200995 */
996static int kvm_s390_vm_start_migration(struct kvm *kvm)
997{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200998 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200999 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001000 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001001 int slotnr;
1002
1003 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001004 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001005 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001006 slots = kvm_memslots(kvm);
1007 if (!slots || !slots->used_slots)
1008 return -EINVAL;
1009
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001010 if (!kvm->arch.use_cmma) {
1011 kvm->arch.migration_mode = 1;
1012 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001013 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001014 /* mark all the pages in active slots as dirty */
1015 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1016 ms = slots->memslots + slotnr;
Igor Mammedov13a17cc2019-09-11 03:52:18 -04001017 if (!ms->dirty_bitmap)
1018 return -EINVAL;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001019 /*
1020 * The second half of the bitmap is only used on x86,
1021 * and would be wasted otherwise, so we put it to good
1022 * use here to keep track of the state of the storage
1023 * attributes.
1024 */
1025 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1026 ram_pages += ms->npages;
1027 }
1028 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1029 kvm->arch.migration_mode = 1;
1030 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001031 return 0;
1032}
1033
1034/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001035 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001036 * kvm_s390_vm_start_migration.
1037 */
1038static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1039{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001040 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001041 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001042 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001043 kvm->arch.migration_mode = 0;
1044 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001045 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001046 return 0;
1047}
1048
1049static int kvm_s390_vm_set_migration(struct kvm *kvm,
1050 struct kvm_device_attr *attr)
1051{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001052 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001053
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001054 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001055 switch (attr->attr) {
1056 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001057 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001058 break;
1059 case KVM_S390_VM_MIGRATION_STOP:
1060 res = kvm_s390_vm_stop_migration(kvm);
1061 break;
1062 default:
1063 break;
1064 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001065 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001066
1067 return res;
1068}
1069
1070static int kvm_s390_vm_get_migration(struct kvm *kvm,
1071 struct kvm_device_attr *attr)
1072{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001073 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001074
1075 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1076 return -ENXIO;
1077
1078 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1079 return -EFAULT;
1080 return 0;
1081}
1082
Collin L. Walling8fa16962016-07-26 15:29:44 -04001083static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1084{
1085 struct kvm_s390_vm_tod_clock gtod;
1086
1087 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1088 return -EFAULT;
1089
David Hildenbrand0e7def52018-02-07 12:46:43 +01001090 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001091 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001092 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001093
1094 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1095 gtod.epoch_idx, gtod.tod);
1096
1097 return 0;
1098}
1099
Jason J. Herne72f25022014-11-25 09:46:02 -05001100static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1101{
1102 u8 gtod_high;
1103
1104 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1105 sizeof(gtod_high)))
1106 return -EFAULT;
1107
1108 if (gtod_high != 0)
1109 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001110 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001111
1112 return 0;
1113}
1114
1115static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1116{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001117 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001118
David Hildenbrand0e7def52018-02-07 12:46:43 +01001119 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1120 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001121 return -EFAULT;
1122
David Hildenbrand0e7def52018-02-07 12:46:43 +01001123 kvm_s390_set_tod_clock(kvm, &gtod);
1124 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001125 return 0;
1126}
1127
1128static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1129{
1130 int ret;
1131
1132 if (attr->flags)
1133 return -EINVAL;
1134
1135 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001136 case KVM_S390_VM_TOD_EXT:
1137 ret = kvm_s390_set_tod_ext(kvm, attr);
1138 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001139 case KVM_S390_VM_TOD_HIGH:
1140 ret = kvm_s390_set_tod_high(kvm, attr);
1141 break;
1142 case KVM_S390_VM_TOD_LOW:
1143 ret = kvm_s390_set_tod_low(kvm, attr);
1144 break;
1145 default:
1146 ret = -ENXIO;
1147 break;
1148 }
1149 return ret;
1150}
1151
David Hildenbrand33d1b272018-04-27 14:36:13 +02001152static void kvm_s390_get_tod_clock(struct kvm *kvm,
1153 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001154{
1155 struct kvm_s390_tod_clock_ext htod;
1156
1157 preempt_disable();
1158
1159 get_tod_clock_ext((char *)&htod);
1160
1161 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001162 gtod->epoch_idx = 0;
1163 if (test_kvm_facility(kvm, 139)) {
1164 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1165 if (gtod->tod < htod.tod)
1166 gtod->epoch_idx += 1;
1167 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001168
1169 preempt_enable();
1170}
1171
1172static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1173{
1174 struct kvm_s390_vm_tod_clock gtod;
1175
1176 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001177 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001178 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1179 return -EFAULT;
1180
1181 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1182 gtod.epoch_idx, gtod.tod);
1183 return 0;
1184}
1185
Jason J. Herne72f25022014-11-25 09:46:02 -05001186static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1187{
1188 u8 gtod_high = 0;
1189
1190 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1191 sizeof(gtod_high)))
1192 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001193 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001194
1195 return 0;
1196}
1197
1198static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1199{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001200 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001201
David Hildenbrand60417fc2015-09-29 16:20:36 +02001202 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001203 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1204 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001205 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001206
1207 return 0;
1208}
1209
1210static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1211{
1212 int ret;
1213
1214 if (attr->flags)
1215 return -EINVAL;
1216
1217 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001218 case KVM_S390_VM_TOD_EXT:
1219 ret = kvm_s390_get_tod_ext(kvm, attr);
1220 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001221 case KVM_S390_VM_TOD_HIGH:
1222 ret = kvm_s390_get_tod_high(kvm, attr);
1223 break;
1224 case KVM_S390_VM_TOD_LOW:
1225 ret = kvm_s390_get_tod_low(kvm, attr);
1226 break;
1227 default:
1228 ret = -ENXIO;
1229 break;
1230 }
1231 return ret;
1232}
1233
Michael Mueller658b6ed2015-02-02 15:49:35 +01001234static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1235{
1236 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001237 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001238 int ret = 0;
1239
1240 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001241 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001242 ret = -EBUSY;
1243 goto out;
1244 }
1245 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1246 if (!proc) {
1247 ret = -ENOMEM;
1248 goto out;
1249 }
1250 if (!copy_from_user(proc, (void __user *)attr->addr,
1251 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001252 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001253 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1254 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001255 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001256 if (proc->ibc > unblocked_ibc)
1257 kvm->arch.model.ibc = unblocked_ibc;
1258 else if (proc->ibc < lowest_ibc)
1259 kvm->arch.model.ibc = lowest_ibc;
1260 else
1261 kvm->arch.model.ibc = proc->ibc;
1262 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001263 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001264 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001265 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1266 kvm->arch.model.ibc,
1267 kvm->arch.model.cpuid);
1268 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1269 kvm->arch.model.fac_list[0],
1270 kvm->arch.model.fac_list[1],
1271 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001272 } else
1273 ret = -EFAULT;
1274 kfree(proc);
1275out:
1276 mutex_unlock(&kvm->lock);
1277 return ret;
1278}
1279
David Hildenbrand15c97052015-03-19 17:36:43 +01001280static int kvm_s390_set_processor_feat(struct kvm *kvm,
1281 struct kvm_device_attr *attr)
1282{
1283 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001284
1285 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1286 return -EFAULT;
1287 if (!bitmap_subset((unsigned long *) data.feat,
1288 kvm_s390_available_cpu_feat,
1289 KVM_S390_VM_CPU_FEAT_NR_BITS))
1290 return -EINVAL;
1291
1292 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001293 if (kvm->created_vcpus) {
1294 mutex_unlock(&kvm->lock);
1295 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001296 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001297 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1298 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001299 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001300 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1301 data.feat[0],
1302 data.feat[1],
1303 data.feat[2]);
1304 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001305}
1306
David Hildenbrand0a763c72016-05-18 16:03:47 +02001307static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1308 struct kvm_device_attr *attr)
1309{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001310 mutex_lock(&kvm->lock);
1311 if (kvm->created_vcpus) {
1312 mutex_unlock(&kvm->lock);
1313 return -EBUSY;
1314 }
1315
1316 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1317 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1318 mutex_unlock(&kvm->lock);
1319 return -EFAULT;
1320 }
1321 mutex_unlock(&kvm->lock);
1322
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001323 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1324 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1325 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1326 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1327 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1328 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1329 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1330 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1331 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1332 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1333 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1334 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1335 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1336 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1337 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1338 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1339 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1340 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1341 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1342 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1343 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1344 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1345 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1346 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1347 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1348 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1349 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1350 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1351 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1352 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1353 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1354 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1355 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1356 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1357 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1358 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1359 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1360 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1361 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1362 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1363 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1364 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1365 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1366 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001367 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1368 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1369 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001370 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1371 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1372 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1373 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1374 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001375 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1376 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1377 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1378 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1379 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001380
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001381 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001382}
1383
Michael Mueller658b6ed2015-02-02 15:49:35 +01001384static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1385{
1386 int ret = -ENXIO;
1387
1388 switch (attr->attr) {
1389 case KVM_S390_VM_CPU_PROCESSOR:
1390 ret = kvm_s390_set_processor(kvm, attr);
1391 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001392 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1393 ret = kvm_s390_set_processor_feat(kvm, attr);
1394 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001395 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1396 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1397 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001398 }
1399 return ret;
1400}
1401
1402static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1403{
1404 struct kvm_s390_vm_cpu_processor *proc;
1405 int ret = 0;
1406
1407 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1408 if (!proc) {
1409 ret = -ENOMEM;
1410 goto out;
1411 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001412 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001413 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001414 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1415 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001416 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1417 kvm->arch.model.ibc,
1418 kvm->arch.model.cpuid);
1419 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1420 kvm->arch.model.fac_list[0],
1421 kvm->arch.model.fac_list[1],
1422 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001423 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1424 ret = -EFAULT;
1425 kfree(proc);
1426out:
1427 return ret;
1428}
1429
1430static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1431{
1432 struct kvm_s390_vm_cpu_machine *mach;
1433 int ret = 0;
1434
1435 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1436 if (!mach) {
1437 ret = -ENOMEM;
1438 goto out;
1439 }
1440 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001441 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001442 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001443 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001444 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001445 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001446 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1447 kvm->arch.model.ibc,
1448 kvm->arch.model.cpuid);
1449 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1450 mach->fac_mask[0],
1451 mach->fac_mask[1],
1452 mach->fac_mask[2]);
1453 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1454 mach->fac_list[0],
1455 mach->fac_list[1],
1456 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001457 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1458 ret = -EFAULT;
1459 kfree(mach);
1460out:
1461 return ret;
1462}
1463
David Hildenbrand15c97052015-03-19 17:36:43 +01001464static int kvm_s390_get_processor_feat(struct kvm *kvm,
1465 struct kvm_device_attr *attr)
1466{
1467 struct kvm_s390_vm_cpu_feat data;
1468
1469 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1470 KVM_S390_VM_CPU_FEAT_NR_BITS);
1471 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1472 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001473 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1474 data.feat[0],
1475 data.feat[1],
1476 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001477 return 0;
1478}
1479
1480static int kvm_s390_get_machine_feat(struct kvm *kvm,
1481 struct kvm_device_attr *attr)
1482{
1483 struct kvm_s390_vm_cpu_feat data;
1484
1485 bitmap_copy((unsigned long *) data.feat,
1486 kvm_s390_available_cpu_feat,
1487 KVM_S390_VM_CPU_FEAT_NR_BITS);
1488 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1489 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001490 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1491 data.feat[0],
1492 data.feat[1],
1493 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001494 return 0;
1495}
1496
David Hildenbrand0a763c72016-05-18 16:03:47 +02001497static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1498 struct kvm_device_attr *attr)
1499{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001500 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1501 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1502 return -EFAULT;
1503
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001504 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1505 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1506 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1507 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1508 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1509 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1510 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1511 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1512 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1513 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1514 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1515 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1516 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1517 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1518 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1519 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1520 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1521 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1522 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1523 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1524 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1525 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1526 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1527 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1528 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1529 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1530 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1531 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1532 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1533 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1534 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1535 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1536 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1537 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1538 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1539 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1540 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1541 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1542 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1543 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1544 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1545 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1546 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1547 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001548 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1549 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1550 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001551 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1552 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1553 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1554 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1555 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001556 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1557 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1558 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1559 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1560 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001561
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001562 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001563}
1564
1565static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1566 struct kvm_device_attr *attr)
1567{
1568 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1569 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1570 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001571
1572 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1573 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1574 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1575 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1576 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1577 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1578 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1579 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1580 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1581 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1582 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1583 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1584 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1585 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1586 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1587 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1588 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1589 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1590 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1591 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1592 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1593 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1594 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1595 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1596 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1597 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1598 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1599 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1600 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1601 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1602 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1603 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1604 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1605 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1606 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1607 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1608 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1609 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1610 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1611 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1612 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1613 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1614 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1615 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001616 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1617 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1618 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001619 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1620 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1621 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1622 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1623 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001624 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1625 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1626 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1627 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1628 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001629
David Hildenbrand0a763c72016-05-18 16:03:47 +02001630 return 0;
1631}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001632
Michael Mueller658b6ed2015-02-02 15:49:35 +01001633static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1634{
1635 int ret = -ENXIO;
1636
1637 switch (attr->attr) {
1638 case KVM_S390_VM_CPU_PROCESSOR:
1639 ret = kvm_s390_get_processor(kvm, attr);
1640 break;
1641 case KVM_S390_VM_CPU_MACHINE:
1642 ret = kvm_s390_get_machine(kvm, attr);
1643 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001644 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1645 ret = kvm_s390_get_processor_feat(kvm, attr);
1646 break;
1647 case KVM_S390_VM_CPU_MACHINE_FEAT:
1648 ret = kvm_s390_get_machine_feat(kvm, attr);
1649 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001650 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1651 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1652 break;
1653 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1654 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1655 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001656 }
1657 return ret;
1658}
1659
Dominik Dingelf2061652014-04-09 13:13:00 +02001660static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1661{
1662 int ret;
1663
1664 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001665 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001666 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001667 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001668 case KVM_S390_VM_TOD:
1669 ret = kvm_s390_set_tod(kvm, attr);
1670 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001671 case KVM_S390_VM_CPU_MODEL:
1672 ret = kvm_s390_set_cpu_model(kvm, attr);
1673 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001674 case KVM_S390_VM_CRYPTO:
1675 ret = kvm_s390_vm_set_crypto(kvm, attr);
1676 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001677 case KVM_S390_VM_MIGRATION:
1678 ret = kvm_s390_vm_set_migration(kvm, attr);
1679 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001680 default:
1681 ret = -ENXIO;
1682 break;
1683 }
1684
1685 return ret;
1686}
1687
1688static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1689{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001690 int ret;
1691
1692 switch (attr->group) {
1693 case KVM_S390_VM_MEM_CTRL:
1694 ret = kvm_s390_get_mem_control(kvm, attr);
1695 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001696 case KVM_S390_VM_TOD:
1697 ret = kvm_s390_get_tod(kvm, attr);
1698 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001699 case KVM_S390_VM_CPU_MODEL:
1700 ret = kvm_s390_get_cpu_model(kvm, attr);
1701 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001702 case KVM_S390_VM_MIGRATION:
1703 ret = kvm_s390_vm_get_migration(kvm, attr);
1704 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001705 default:
1706 ret = -ENXIO;
1707 break;
1708 }
1709
1710 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001711}
1712
1713static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1714{
1715 int ret;
1716
1717 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001718 case KVM_S390_VM_MEM_CTRL:
1719 switch (attr->attr) {
1720 case KVM_S390_VM_MEM_ENABLE_CMMA:
1721 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001722 ret = sclp.has_cmma ? 0 : -ENXIO;
1723 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001724 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001725 ret = 0;
1726 break;
1727 default:
1728 ret = -ENXIO;
1729 break;
1730 }
1731 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001732 case KVM_S390_VM_TOD:
1733 switch (attr->attr) {
1734 case KVM_S390_VM_TOD_LOW:
1735 case KVM_S390_VM_TOD_HIGH:
1736 ret = 0;
1737 break;
1738 default:
1739 ret = -ENXIO;
1740 break;
1741 }
1742 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001743 case KVM_S390_VM_CPU_MODEL:
1744 switch (attr->attr) {
1745 case KVM_S390_VM_CPU_PROCESSOR:
1746 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001747 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1748 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001749 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001750 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001751 ret = 0;
1752 break;
1753 default:
1754 ret = -ENXIO;
1755 break;
1756 }
1757 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001758 case KVM_S390_VM_CRYPTO:
1759 switch (attr->attr) {
1760 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1761 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1762 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1763 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1764 ret = 0;
1765 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001766 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1767 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1768 ret = ap_instructions_available() ? 0 : -ENXIO;
1769 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001770 default:
1771 ret = -ENXIO;
1772 break;
1773 }
1774 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001775 case KVM_S390_VM_MIGRATION:
1776 ret = 0;
1777 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001778 default:
1779 ret = -ENXIO;
1780 break;
1781 }
1782
1783 return ret;
1784}
1785
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001786static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1787{
1788 uint8_t *keys;
1789 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001790 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001791
1792 if (args->flags != 0)
1793 return -EINVAL;
1794
1795 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001796 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001797 return KVM_S390_GET_SKEYS_NONE;
1798
1799 /* Enforce sane limit on memory allocation */
1800 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1801 return -EINVAL;
1802
Michal Hocko752ade62017-05-08 15:57:27 -07001803 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001804 if (!keys)
1805 return -ENOMEM;
1806
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001807 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001808 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001809 for (i = 0; i < args->count; i++) {
1810 hva = gfn_to_hva(kvm, args->start_gfn + i);
1811 if (kvm_is_error_hva(hva)) {
1812 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001813 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001814 }
1815
David Hildenbrand154c8c12016-05-09 11:22:34 +02001816 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1817 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001818 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001819 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001820 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001821 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001822
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001823 if (!r) {
1824 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1825 sizeof(uint8_t) * args->count);
1826 if (r)
1827 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001828 }
1829
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001830 kvfree(keys);
1831 return r;
1832}
1833
1834static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1835{
1836 uint8_t *keys;
1837 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001838 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001839 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001840
1841 if (args->flags != 0)
1842 return -EINVAL;
1843
1844 /* Enforce sane limit on memory allocation */
1845 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1846 return -EINVAL;
1847
Michal Hocko752ade62017-05-08 15:57:27 -07001848 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001849 if (!keys)
1850 return -ENOMEM;
1851
1852 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1853 sizeof(uint8_t) * args->count);
1854 if (r) {
1855 r = -EFAULT;
1856 goto out;
1857 }
1858
1859 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001860 r = s390_enable_skey();
1861 if (r)
1862 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001863
Janosch Frankbd096f62018-07-18 13:40:22 +01001864 i = 0;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001865 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001866 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001867 while (i < args->count) {
1868 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001869 hva = gfn_to_hva(kvm, args->start_gfn + i);
1870 if (kvm_is_error_hva(hva)) {
1871 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001872 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001873 }
1874
1875 /* Lowest order bit is reserved */
1876 if (keys[i] & 0x01) {
1877 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001878 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001879 }
1880
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001881 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001882 if (r) {
1883 r = fixup_user_fault(current, current->mm, hva,
1884 FAULT_FLAG_WRITE, &unlocked);
1885 if (r)
1886 break;
1887 }
1888 if (!r)
1889 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001890 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001891 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001892 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001893out:
1894 kvfree(keys);
1895 return r;
1896}
1897
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001898/*
1899 * Base address and length must be sent at the start of each block, therefore
1900 * it's cheaper to send some clean data, as long as it's less than the size of
1901 * two longs.
1902 */
1903#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1904/* for consistency */
1905#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1906
1907/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001908 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1909 * address falls in a hole. In that case the index of one of the memslots
1910 * bordering the hole is returned.
1911 */
1912static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1913{
1914 int start = 0, end = slots->used_slots;
1915 int slot = atomic_read(&slots->lru_slot);
1916 struct kvm_memory_slot *memslots = slots->memslots;
1917
1918 if (gfn >= memslots[slot].base_gfn &&
1919 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1920 return slot;
1921
1922 while (start < end) {
1923 slot = start + (end - start) / 2;
1924
1925 if (gfn >= memslots[slot].base_gfn)
1926 end = slot;
1927 else
1928 start = slot + 1;
1929 }
1930
1931 if (gfn >= memslots[start].base_gfn &&
1932 gfn < memslots[start].base_gfn + memslots[start].npages) {
1933 atomic_set(&slots->lru_slot, start);
1934 }
1935
1936 return start;
1937}
1938
1939static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1940 u8 *res, unsigned long bufsize)
1941{
1942 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1943
1944 args->count = 0;
1945 while (args->count < bufsize) {
1946 hva = gfn_to_hva(kvm, cur_gfn);
1947 /*
1948 * We return an error if the first value was invalid, but we
1949 * return successfully if at least one value was copied.
1950 */
1951 if (kvm_is_error_hva(hva))
1952 return args->count ? 0 : -EFAULT;
1953 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1954 pgstev = 0;
1955 res[args->count++] = (pgstev >> 24) & 0x43;
1956 cur_gfn++;
1957 }
1958
1959 return 0;
1960}
1961
1962static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1963 unsigned long cur_gfn)
1964{
1965 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1966 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1967 unsigned long ofs = cur_gfn - ms->base_gfn;
1968
1969 if (ms->base_gfn + ms->npages <= cur_gfn) {
1970 slotidx--;
1971 /* If we are above the highest slot, wrap around */
1972 if (slotidx < 0)
1973 slotidx = slots->used_slots - 1;
1974
1975 ms = slots->memslots + slotidx;
1976 ofs = 0;
1977 }
1978 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1979 while ((slotidx > 0) && (ofs >= ms->npages)) {
1980 slotidx--;
1981 ms = slots->memslots + slotidx;
1982 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1983 }
1984 return ms->base_gfn + ofs;
1985}
1986
1987static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1988 u8 *res, unsigned long bufsize)
1989{
1990 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
1991 struct kvm_memslots *slots = kvm_memslots(kvm);
1992 struct kvm_memory_slot *ms;
1993
1994 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
1995 ms = gfn_to_memslot(kvm, cur_gfn);
1996 args->count = 0;
1997 args->start_gfn = cur_gfn;
1998 if (!ms)
1999 return 0;
2000 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2001 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2002
2003 while (args->count < bufsize) {
2004 hva = gfn_to_hva(kvm, cur_gfn);
2005 if (kvm_is_error_hva(hva))
2006 return 0;
2007 /* Decrement only if we actually flipped the bit to 0 */
2008 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2009 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2010 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2011 pgstev = 0;
2012 /* Save the value */
2013 res[args->count++] = (pgstev >> 24) & 0x43;
2014 /* If the next bit is too far away, stop. */
2015 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2016 return 0;
2017 /* If we reached the previous "next", find the next one */
2018 if (cur_gfn == next_gfn)
2019 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2020 /* Reached the end of memory or of the buffer, stop */
2021 if ((next_gfn >= mem_end) ||
2022 (next_gfn - args->start_gfn >= bufsize))
2023 return 0;
2024 cur_gfn++;
2025 /* Reached the end of the current memslot, take the next one. */
2026 if (cur_gfn - ms->base_gfn >= ms->npages) {
2027 ms = gfn_to_memslot(kvm, cur_gfn);
2028 if (!ms)
2029 return 0;
2030 }
2031 }
2032 return 0;
2033}
2034
2035/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002036 * This function searches for the next page with dirty CMMA attributes, and
2037 * saves the attributes in the buffer up to either the end of the buffer or
2038 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2039 * no trailing clean bytes are saved.
2040 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2041 * output buffer will indicate 0 as length.
2042 */
2043static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2044 struct kvm_s390_cmma_log *args)
2045{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002046 unsigned long bufsize;
2047 int srcu_idx, peek, ret;
2048 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002049
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002050 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002051 return -ENXIO;
2052 /* Invalid/unsupported flags were specified */
2053 if (args->flags & ~KVM_S390_CMMA_PEEK)
2054 return -EINVAL;
2055 /* Migration mode query, and we are not doing a migration */
2056 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002057 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002058 return -EINVAL;
2059 /* CMMA is disabled or was not used, or the buffer has length zero */
2060 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002061 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002062 memset(args, 0, sizeof(*args));
2063 return 0;
2064 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002065 /* We are not peeking, and there are no dirty pages */
2066 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2067 memset(args, 0, sizeof(*args));
2068 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002069 }
2070
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002071 values = vmalloc(bufsize);
2072 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002073 return -ENOMEM;
2074
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002075 down_read(&kvm->mm->mmap_sem);
2076 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002077 if (peek)
2078 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2079 else
2080 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002081 srcu_read_unlock(&kvm->srcu, srcu_idx);
2082 up_read(&kvm->mm->mmap_sem);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002083
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002084 if (kvm->arch.migration_mode)
2085 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2086 else
2087 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002088
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002089 if (copy_to_user((void __user *)args->values, values, args->count))
2090 ret = -EFAULT;
2091
2092 vfree(values);
2093 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002094}
2095
2096/*
2097 * This function sets the CMMA attributes for the given pages. If the input
2098 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002099 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002100 */
2101static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2102 const struct kvm_s390_cmma_log *args)
2103{
2104 unsigned long hva, mask, pgstev, i;
2105 uint8_t *bits;
2106 int srcu_idx, r = 0;
2107
2108 mask = args->mask;
2109
2110 if (!kvm->arch.use_cmma)
2111 return -ENXIO;
2112 /* invalid/unsupported flags */
2113 if (args->flags != 0)
2114 return -EINVAL;
2115 /* Enforce sane limit on memory allocation */
2116 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2117 return -EINVAL;
2118 /* Nothing to do */
2119 if (args->count == 0)
2120 return 0;
2121
Kees Cook42bc47b2018-06-12 14:27:11 -07002122 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002123 if (!bits)
2124 return -ENOMEM;
2125
2126 r = copy_from_user(bits, (void __user *)args->values, args->count);
2127 if (r) {
2128 r = -EFAULT;
2129 goto out;
2130 }
2131
2132 down_read(&kvm->mm->mmap_sem);
2133 srcu_idx = srcu_read_lock(&kvm->srcu);
2134 for (i = 0; i < args->count; i++) {
2135 hva = gfn_to_hva(kvm, args->start_gfn + i);
2136 if (kvm_is_error_hva(hva)) {
2137 r = -EFAULT;
2138 break;
2139 }
2140
2141 pgstev = bits[i];
2142 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002143 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002144 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2145 }
2146 srcu_read_unlock(&kvm->srcu, srcu_idx);
2147 up_read(&kvm->mm->mmap_sem);
2148
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002149 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002150 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002151 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002152 up_write(&kvm->mm->mmap_sem);
2153 }
2154out:
2155 vfree(bits);
2156 return r;
2157}
2158
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002159long kvm_arch_vm_ioctl(struct file *filp,
2160 unsigned int ioctl, unsigned long arg)
2161{
2162 struct kvm *kvm = filp->private_data;
2163 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002164 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002165 int r;
2166
2167 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002168 case KVM_S390_INTERRUPT: {
2169 struct kvm_s390_interrupt s390int;
2170
2171 r = -EFAULT;
2172 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2173 break;
2174 r = kvm_s390_inject_vm(kvm, &s390int);
2175 break;
2176 }
Cornelia Huck84223592013-07-15 13:36:01 +02002177 case KVM_CREATE_IRQCHIP: {
2178 struct kvm_irq_routing_entry routing;
2179
2180 r = -EINVAL;
2181 if (kvm->arch.use_irqchip) {
2182 /* Set up dummy routing. */
2183 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002184 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002185 }
2186 break;
2187 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002188 case KVM_SET_DEVICE_ATTR: {
2189 r = -EFAULT;
2190 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2191 break;
2192 r = kvm_s390_vm_set_attr(kvm, &attr);
2193 break;
2194 }
2195 case KVM_GET_DEVICE_ATTR: {
2196 r = -EFAULT;
2197 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2198 break;
2199 r = kvm_s390_vm_get_attr(kvm, &attr);
2200 break;
2201 }
2202 case KVM_HAS_DEVICE_ATTR: {
2203 r = -EFAULT;
2204 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2205 break;
2206 r = kvm_s390_vm_has_attr(kvm, &attr);
2207 break;
2208 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002209 case KVM_S390_GET_SKEYS: {
2210 struct kvm_s390_skeys args;
2211
2212 r = -EFAULT;
2213 if (copy_from_user(&args, argp,
2214 sizeof(struct kvm_s390_skeys)))
2215 break;
2216 r = kvm_s390_get_skeys(kvm, &args);
2217 break;
2218 }
2219 case KVM_S390_SET_SKEYS: {
2220 struct kvm_s390_skeys args;
2221
2222 r = -EFAULT;
2223 if (copy_from_user(&args, argp,
2224 sizeof(struct kvm_s390_skeys)))
2225 break;
2226 r = kvm_s390_set_skeys(kvm, &args);
2227 break;
2228 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002229 case KVM_S390_GET_CMMA_BITS: {
2230 struct kvm_s390_cmma_log args;
2231
2232 r = -EFAULT;
2233 if (copy_from_user(&args, argp, sizeof(args)))
2234 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002235 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002236 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002237 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002238 if (!r) {
2239 r = copy_to_user(argp, &args, sizeof(args));
2240 if (r)
2241 r = -EFAULT;
2242 }
2243 break;
2244 }
2245 case KVM_S390_SET_CMMA_BITS: {
2246 struct kvm_s390_cmma_log args;
2247
2248 r = -EFAULT;
2249 if (copy_from_user(&args, argp, sizeof(args)))
2250 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002251 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002252 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002253 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002254 break;
2255 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002256 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002257 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002258 }
2259
2260 return r;
2261}
2262
Tony Krowiak45c9b472015-01-13 11:33:26 -05002263static int kvm_s390_apxa_installed(void)
2264{
Tony Krowiake585b242018-09-25 19:16:18 -04002265 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002266
Tony Krowiake585b242018-09-25 19:16:18 -04002267 if (ap_instructions_available()) {
2268 if (ap_qci(&info) == 0)
2269 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002270 }
2271
2272 return 0;
2273}
2274
Tony Krowiake585b242018-09-25 19:16:18 -04002275/*
2276 * The format of the crypto control block (CRYCB) is specified in the 3 low
2277 * order bits of the CRYCB designation (CRYCBD) field as follows:
2278 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2279 * AP extended addressing (APXA) facility are installed.
2280 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2281 * Format 2: Both the APXA and MSAX3 facilities are installed
2282 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002283static void kvm_s390_set_crycb_format(struct kvm *kvm)
2284{
2285 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2286
Tony Krowiake585b242018-09-25 19:16:18 -04002287 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2288 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2289
2290 /* Check whether MSAX3 is installed */
2291 if (!test_kvm_facility(kvm, 76))
2292 return;
2293
Tony Krowiak45c9b472015-01-13 11:33:26 -05002294 if (kvm_s390_apxa_installed())
2295 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2296 else
2297 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2298}
2299
Pierre Morel0e237e42018-10-05 10:31:09 +02002300void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2301 unsigned long *aqm, unsigned long *adm)
2302{
2303 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2304
2305 mutex_lock(&kvm->lock);
2306 kvm_s390_vcpu_block_all(kvm);
2307
2308 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2309 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2310 memcpy(crycb->apcb1.apm, apm, 32);
2311 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2312 apm[0], apm[1], apm[2], apm[3]);
2313 memcpy(crycb->apcb1.aqm, aqm, 32);
2314 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2315 aqm[0], aqm[1], aqm[2], aqm[3]);
2316 memcpy(crycb->apcb1.adm, adm, 32);
2317 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2318 adm[0], adm[1], adm[2], adm[3]);
2319 break;
2320 case CRYCB_FORMAT1:
2321 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2322 memcpy(crycb->apcb0.apm, apm, 8);
2323 memcpy(crycb->apcb0.aqm, aqm, 2);
2324 memcpy(crycb->apcb0.adm, adm, 2);
2325 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2326 apm[0], *((unsigned short *)aqm),
2327 *((unsigned short *)adm));
2328 break;
2329 default: /* Can not happen */
2330 break;
2331 }
2332
2333 /* recreate the shadow crycb for each vcpu */
2334 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2335 kvm_s390_vcpu_unblock_all(kvm);
2336 mutex_unlock(&kvm->lock);
2337}
2338EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2339
Tony Krowiak421045982018-09-25 19:16:25 -04002340void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2341{
2342 mutex_lock(&kvm->lock);
2343 kvm_s390_vcpu_block_all(kvm);
2344
2345 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2346 sizeof(kvm->arch.crypto.crycb->apcb0));
2347 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2348 sizeof(kvm->arch.crypto.crycb->apcb1));
2349
Pierre Morel0e237e42018-10-05 10:31:09 +02002350 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002351 /* recreate the shadow crycb for each vcpu */
2352 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002353 kvm_s390_vcpu_unblock_all(kvm);
2354 mutex_unlock(&kvm->lock);
2355}
2356EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2357
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002358static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002359{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002360 struct cpuid cpuid;
2361
2362 get_cpu_id(&cpuid);
2363 cpuid.version = 0xff;
2364 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002365}
2366
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002367static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002368{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002369 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002370 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002371
Tony Krowiake585b242018-09-25 19:16:18 -04002372 if (!test_kvm_facility(kvm, 76))
2373 return;
2374
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002375 /* Enable AES/DEA protected key functions by default */
2376 kvm->arch.crypto.aes_kw = 1;
2377 kvm->arch.crypto.dea_kw = 1;
2378 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2379 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2380 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2381 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002382}
2383
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002384static void sca_dispose(struct kvm *kvm)
2385{
2386 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002387 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002388 else
2389 free_page((unsigned long)(kvm->arch.sca));
2390 kvm->arch.sca = NULL;
2391}
2392
Carsten Ottee08b9632012-01-04 10:25:20 +01002393int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002394{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002395 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002396 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002397 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002398 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002399
Carsten Ottee08b9632012-01-04 10:25:20 +01002400 rc = -EINVAL;
2401#ifdef CONFIG_KVM_S390_UCONTROL
2402 if (type & ~KVM_VM_S390_UCONTROL)
2403 goto out_err;
2404 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2405 goto out_err;
2406#else
2407 if (type)
2408 goto out_err;
2409#endif
2410
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002411 rc = s390_enable_sie();
2412 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002413 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002414
Carsten Otteb2904112011-10-18 12:27:13 +02002415 rc = -ENOMEM;
2416
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002417 if (!sclp.has_64bscao)
2418 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002419 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002420 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002421 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002422 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002423 goto out_err;
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002424 mutex_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002425 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002426 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002427 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002428 kvm->arch.sca = (struct bsca_block *)
2429 ((char *) kvm->arch.sca + sca_offset);
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002430 mutex_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002431
2432 sprintf(debug_name, "kvm-%u", current->pid);
2433
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002434 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002435 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002436 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002437
Michael Mueller19114be2017-05-30 14:26:02 +02002438 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002439 kvm->arch.sie_page2 =
2440 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2441 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002442 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002443
Michael Mueller25c84db2019-01-31 09:52:41 +01002444 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002445 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002446
2447 for (i = 0; i < kvm_s390_fac_size(); i++) {
2448 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2449 (kvm_s390_fac_base[i] |
2450 kvm_s390_fac_ext[i]);
2451 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2452 kvm_s390_fac_base[i];
2453 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002454 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002455
David Hildenbrand19352222017-08-29 16:31:08 +02002456 /* we are always in czam mode - even on pre z14 machines */
2457 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2458 set_kvm_facility(kvm->arch.model.fac_list, 138);
2459 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002460 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2461 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002462 if (MACHINE_HAS_TLB_GUEST) {
2463 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2464 set_kvm_facility(kvm->arch.model.fac_list, 147);
2465 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002466
Pierre Morel05f31e32019-05-21 17:34:37 +02002467 if (css_general_characteristics.aiv && test_facility(65))
2468 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2469
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002470 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002471 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002472
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002473 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002474
Fei Li51978392017-02-17 17:06:26 +08002475 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002476 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002477 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2478 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002479 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002480 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002481
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002482 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002483 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002484
Carsten Ottee08b9632012-01-04 10:25:20 +01002485 if (type & KVM_VM_S390_UCONTROL) {
2486 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002487 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002488 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002489 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002490 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002491 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002492 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002493 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002494 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002495 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002496 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002497 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002498 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002499 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002500
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002501 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002502 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002503 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002504 kvm_s390_vsie_init(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002505 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002506 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002507
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002508 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002509out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002510 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002511 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002512 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002513 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002514 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002515}
2516
Christian Borntraegerd329c032008-11-26 14:50:27 +01002517void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2518{
2519 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002520 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002521 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002522 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002523 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002524 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002525
2526 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002527 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002528
Dominik Dingele6db1d62015-05-07 15:41:57 +02002529 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002530 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002531 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002532
Christian Borntraeger6692cef2008-11-26 14:51:08 +01002533 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02002534 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002535}
2536
2537static void kvm_free_vcpus(struct kvm *kvm)
2538{
2539 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002540 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002541
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002542 kvm_for_each_vcpu(i, vcpu, kvm)
2543 kvm_arch_vcpu_destroy(vcpu);
2544
2545 mutex_lock(&kvm->lock);
2546 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2547 kvm->vcpus[i] = NULL;
2548
2549 atomic_set(&kvm->online_vcpus, 0);
2550 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002551}
2552
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002553void kvm_arch_destroy_vm(struct kvm *kvm)
2554{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002555 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002556 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002557 debug_unregister(kvm->arch.dbf);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002558 kvm_s390_gisa_destroy(kvm);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002559 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002560 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002561 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002562 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002563 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002564 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002565 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002566}
2567
2568/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002569static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2570{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002571 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002572 if (!vcpu->arch.gmap)
2573 return -ENOMEM;
2574 vcpu->arch.gmap->private = vcpu->kvm;
2575
2576 return 0;
2577}
2578
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002579static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2580{
David Hildenbranda6940672016-08-08 22:39:32 +02002581 if (!kvm_s390_use_sca_entries())
2582 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002583 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002584 if (vcpu->kvm->arch.use_esca) {
2585 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002586
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002587 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002588 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002589 } else {
2590 struct bsca_block *sca = vcpu->kvm->arch.sca;
2591
2592 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002593 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002594 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002595 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002596}
2597
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002598static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002599{
David Hildenbranda6940672016-08-08 22:39:32 +02002600 if (!kvm_s390_use_sca_entries()) {
2601 struct bsca_block *sca = vcpu->kvm->arch.sca;
2602
2603 /* we still need the basic sca for the ipte control */
2604 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2605 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002606 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002607 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002608 read_lock(&vcpu->kvm->arch.sca_lock);
2609 if (vcpu->kvm->arch.use_esca) {
2610 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002611
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002612 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002613 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2614 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002615 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002616 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002617 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002618 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002619
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002620 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002621 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2622 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002623 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002624 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002625 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002626}
2627
2628/* Basic SCA to Extended SCA data copy routines */
2629static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2630{
2631 d->sda = s->sda;
2632 d->sigp_ctrl.c = s->sigp_ctrl.c;
2633 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2634}
2635
2636static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2637{
2638 int i;
2639
2640 d->ipte_control = s->ipte_control;
2641 d->mcn[0] = s->mcn;
2642 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2643 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2644}
2645
2646static int sca_switch_to_extended(struct kvm *kvm)
2647{
2648 struct bsca_block *old_sca = kvm->arch.sca;
2649 struct esca_block *new_sca;
2650 struct kvm_vcpu *vcpu;
2651 unsigned int vcpu_idx;
2652 u32 scaol, scaoh;
2653
2654 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2655 if (!new_sca)
2656 return -ENOMEM;
2657
2658 scaoh = (u32)((u64)(new_sca) >> 32);
2659 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2660
2661 kvm_s390_vcpu_block_all(kvm);
2662 write_lock(&kvm->arch.sca_lock);
2663
2664 sca_copy_b_to_e(new_sca, old_sca);
2665
2666 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2667 vcpu->arch.sie_block->scaoh = scaoh;
2668 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002669 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002670 }
2671 kvm->arch.sca = new_sca;
2672 kvm->arch.use_esca = 1;
2673
2674 write_unlock(&kvm->arch.sca_lock);
2675 kvm_s390_vcpu_unblock_all(kvm);
2676
2677 free_page((unsigned long)old_sca);
2678
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002679 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2680 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002681 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002682}
2683
2684static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2685{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002686 int rc;
2687
David Hildenbranda6940672016-08-08 22:39:32 +02002688 if (!kvm_s390_use_sca_entries()) {
2689 if (id < KVM_MAX_VCPUS)
2690 return true;
2691 return false;
2692 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002693 if (id < KVM_S390_BSCA_CPU_SLOTS)
2694 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002695 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002696 return false;
2697
2698 mutex_lock(&kvm->lock);
2699 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2700 mutex_unlock(&kvm->lock);
2701
2702 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002703}
2704
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002705int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2706{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002707 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2708 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002709 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2710 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002711 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002712 KVM_SYNC_CRS |
2713 KVM_SYNC_ARCH0 |
2714 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002715 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002716 if (test_kvm_facility(vcpu->kvm, 64))
2717 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002718 if (test_kvm_facility(vcpu->kvm, 82))
2719 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002720 if (test_kvm_facility(vcpu->kvm, 133))
2721 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002722 if (test_kvm_facility(vcpu->kvm, 156))
2723 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002724 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2725 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2726 */
2727 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002728 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002729 else
2730 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002731
2732 if (kvm_is_ucontrol(vcpu->kvm))
2733 return __kvm_ucontrol_vcpu_init(vcpu);
2734
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002735 return 0;
2736}
2737
David Hildenbranddb0758b2016-02-15 09:42:25 +01002738/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2739static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2740{
2741 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002742 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002743 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002744 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002745}
2746
2747/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2748static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2749{
2750 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002751 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002752 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2753 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002754 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002755}
2756
2757/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2758static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2759{
2760 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2761 vcpu->arch.cputm_enabled = true;
2762 __start_cpu_timer_accounting(vcpu);
2763}
2764
2765/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2766static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2767{
2768 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2769 __stop_cpu_timer_accounting(vcpu);
2770 vcpu->arch.cputm_enabled = false;
2771}
2772
2773static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2774{
2775 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2776 __enable_cpu_timer_accounting(vcpu);
2777 preempt_enable();
2778}
2779
2780static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2781{
2782 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2783 __disable_cpu_timer_accounting(vcpu);
2784 preempt_enable();
2785}
2786
David Hildenbrand4287f242016-02-15 09:40:12 +01002787/* set the cpu timer - may only be called from the VCPU thread itself */
2788void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2789{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002790 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002791 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002792 if (vcpu->arch.cputm_enabled)
2793 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002794 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002795 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002796 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002797}
2798
David Hildenbranddb0758b2016-02-15 09:42:25 +01002799/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002800__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2801{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002802 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002803 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002804
2805 if (unlikely(!vcpu->arch.cputm_enabled))
2806 return vcpu->arch.sie_block->cputm;
2807
David Hildenbrand9c23a132016-02-17 21:53:33 +01002808 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2809 do {
2810 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2811 /*
2812 * If the writer would ever execute a read in the critical
2813 * section, e.g. in irq context, we have a deadlock.
2814 */
2815 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2816 value = vcpu->arch.sie_block->cputm;
2817 /* if cputm_start is 0, accounting is being started/stopped */
2818 if (likely(vcpu->arch.cputm_start))
2819 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2820 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2821 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002822 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002823}
2824
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002825void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2826{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002827
David Hildenbrand37d9df92015-03-11 16:47:33 +01002828 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002829 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002830 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002831 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002832 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002833}
2834
2835void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2836{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002837 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002838 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002839 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01002840 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002841 vcpu->arch.enabled_gmap = gmap_get_enabled();
2842 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002843
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002844}
2845
2846static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2847{
2848 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2849 vcpu->arch.sie_block->gpsw.mask = 0UL;
2850 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002851 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002852 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002853 vcpu->arch.sie_block->ckc = 0UL;
2854 vcpu->arch.sie_block->todpr = 0;
2855 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
David Hildenbrandb9224cd2018-04-30 17:55:24 +02002856 vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 |
2857 CR0_INTERRUPT_KEY_SUBMASK |
2858 CR0_MEASUREMENT_ALERT_SUBMASK;
2859 vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
2860 CR14_UNUSED_33 |
2861 CR14_EXTERNAL_DAMAGE_SUBMASK;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002862 /* make sure the new fpc will be lazily loaded */
2863 save_fpu_regs();
2864 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002865 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002866 vcpu->arch.sie_block->pp = 0;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002867 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002868 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2869 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002870 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2871 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002872 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002873}
2874
Dominik Dingel31928aa2014-12-04 15:47:07 +01002875void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002876{
Jason J. Herne72f25022014-11-25 09:46:02 -05002877 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002878 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002879 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01002880 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02002881 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002882 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002883 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002884 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002885 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002886 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002887 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2888 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002889 /* make vcpu_load load the right gmap on the first trigger */
2890 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002891}
2892
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002893static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
2894{
2895 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
2896 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
2897 return true;
2898 return false;
2899}
2900
2901static bool kvm_has_pckmo_ecc(struct kvm *kvm)
2902{
2903 /* At least one ECC subfunction must be present */
2904 return kvm_has_pckmo_subfunc(kvm, 32) ||
2905 kvm_has_pckmo_subfunc(kvm, 33) ||
2906 kvm_has_pckmo_subfunc(kvm, 34) ||
2907 kvm_has_pckmo_subfunc(kvm, 40) ||
2908 kvm_has_pckmo_subfunc(kvm, 41);
2909
2910}
2911
Tony Krowiak5102ee82014-06-27 14:46:01 -04002912static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2913{
Tony Krowiake585b242018-09-25 19:16:18 -04002914 /*
2915 * If the AP instructions are not being interpreted and the MSAX3
2916 * facility is not configured for the guest, there is nothing to set up.
2917 */
2918 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002919 return;
2920
Tony Krowiake585b242018-09-25 19:16:18 -04002921 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02002922 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04002923 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002924 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02002925
Tony Krowiake585b242018-09-25 19:16:18 -04002926 if (vcpu->kvm->arch.crypto.apie)
2927 vcpu->arch.sie_block->eca |= ECA_APIE;
2928
2929 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002930 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02002931 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002932 /* ecc is also wrapped with AES key */
2933 if (kvm_has_pckmo_ecc(vcpu->kvm))
2934 vcpu->arch.sie_block->ecd |= ECD_ECC;
2935 }
2936
Tony Krowiaka374e892014-09-03 10:13:53 +02002937 if (vcpu->kvm->arch.crypto.dea_kw)
2938 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04002939}
2940
Dominik Dingelb31605c2014-03-25 13:47:11 +01002941void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2942{
2943 free_page(vcpu->arch.sie_block->cbrlo);
2944 vcpu->arch.sie_block->cbrlo = 0;
2945}
2946
2947int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2948{
2949 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2950 if (!vcpu->arch.sie_block->cbrlo)
2951 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002952 return 0;
2953}
2954
Michael Mueller91520f12015-02-27 14:32:11 +01002955static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2956{
2957 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2958
Michael Mueller91520f12015-02-27 14:32:11 +01002959 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002960 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002961 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002962}
2963
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002964int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2965{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002966 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002967
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002968 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2969 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002970 CPUSTAT_STOPPED);
2971
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002972 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002973 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002974 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002975 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002976
Michael Mueller91520f12015-02-27 14:32:11 +01002977 kvm_s390_vcpu_setup_model(vcpu);
2978
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002979 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2980 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002981 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002982 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002983 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002984 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002985 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002986
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002987 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002988 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002989 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002990 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2991 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002992 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002993 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002994 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002995 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002996 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002997 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002998 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002999 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01003000 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003001 vcpu->arch.sie_block->eca |= ECA_VX;
3002 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04003003 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003004 if (test_kvm_facility(vcpu->kvm, 139))
3005 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003006 if (test_kvm_facility(vcpu->kvm, 156))
3007 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02003008 if (vcpu->arch.sie_block->gd) {
3009 vcpu->arch.sie_block->eca |= ECA_AIV;
3010 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3011 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3012 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003013 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3014 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08003015 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05003016
3017 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003018 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05003019 else
3020 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05003021
Dominik Dingele6db1d62015-05-07 15:41:57 +02003022 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01003023 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3024 if (rc)
3025 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003026 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01003027 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02003028 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01003029
Collin Walling67d49d52018-08-31 12:51:19 -04003030 vcpu->arch.sie_block->hpid = HPID_KVM;
3031
Tony Krowiak5102ee82014-06-27 14:46:01 -04003032 kvm_s390_vcpu_crypto_setup(vcpu);
3033
Dominik Dingelb31605c2014-03-25 13:47:11 +01003034 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003035}
3036
3037struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
3038 unsigned int id)
3039{
Carsten Otte4d475552011-10-18 12:27:12 +02003040 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003041 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02003042 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003043
David Hildenbrand42158252015-10-12 12:57:22 +02003044 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02003045 goto out;
3046
3047 rc = -ENOMEM;
3048
Michael Muellerb110fea2013-06-12 13:54:54 +02003049 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003050 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02003051 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003052
QingFeng Haoda72ca42017-06-07 11:41:19 +02003053 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003054 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
3055 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003056 goto out_free_cpu;
3057
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003058 vcpu->arch.sie_block = &sie_page->sie_block;
3059 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3060
David Hildenbrandefed1102015-04-16 12:32:41 +02003061 /* the real guest size will always be smaller than msl */
3062 vcpu->arch.sie_block->mso = 0;
3063 vcpu->arch.sie_block->msl = sclp.hamax;
3064
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003065 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003066 spin_lock_init(&vcpu->arch.local_int.lock);
Michael Mueller982cff42019-01-31 09:52:38 +01003067 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003068 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3069 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003070 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003071
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003072 rc = kvm_vcpu_init(vcpu, kvm, id);
3073 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003074 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01003075 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003076 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02003077 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003078
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003079 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003080out_free_sie_block:
3081 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003082out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02003083 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02003084out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003085 return ERR_PTR(rc);
3086}
3087
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003088int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3089{
David Hildenbrand9a022062014-08-05 17:40:47 +02003090 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003091}
3092
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003093bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3094{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003095 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003096}
3097
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003098void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003099{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003100 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003101 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003102}
3103
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003104void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003105{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003106 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003107}
3108
Christian Borntraeger8e236542015-04-09 13:49:04 +02003109static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3110{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003111 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003112 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003113}
3114
David Hildenbrand9ea59722018-09-25 19:16:16 -04003115bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3116{
3117 return atomic_read(&vcpu->arch.sie_block->prog20) &
3118 (PROG_BLOCK_SIE | PROG_REQUEST);
3119}
3120
Christian Borntraeger8e236542015-04-09 13:49:04 +02003121static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3122{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003123 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003124}
3125
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003126/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003127 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003128 * If the CPU is not running (e.g. waiting as idle) the function will
3129 * return immediately. */
3130void exit_sie(struct kvm_vcpu *vcpu)
3131{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003132 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003133 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003134 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3135 cpu_relax();
3136}
3137
Christian Borntraeger8e236542015-04-09 13:49:04 +02003138/* Kick a guest cpu out of SIE to process a request synchronously */
3139void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003140{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003141 kvm_make_request(req, vcpu);
3142 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003143}
3144
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003145static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3146 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003147{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003148 struct kvm *kvm = gmap->private;
3149 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003150 unsigned long prefix;
3151 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003152
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003153 if (gmap_is_shadow(gmap))
3154 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003155 if (start >= 1UL << 31)
3156 /* We are only interested in prefix pages */
3157 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003158 kvm_for_each_vcpu(i, vcpu, kvm) {
3159 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003160 prefix = kvm_s390_get_prefix(vcpu);
3161 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3162 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3163 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003164 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003165 }
3166 }
3167}
3168
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003169bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3170{
3171 /* do not poll with more than halt_poll_max_steal percent of steal time */
3172 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3173 halt_poll_max_steal) {
3174 vcpu->stat.halt_no_poll_steal++;
3175 return true;
3176 }
3177 return false;
3178}
3179
Christoffer Dallb6d33832012-03-08 16:44:24 -05003180int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3181{
3182 /* kvm common code refers to this, but never calls it */
3183 BUG();
3184 return 0;
3185}
3186
Carsten Otte14eebd92012-05-15 14:15:26 +02003187static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3188 struct kvm_one_reg *reg)
3189{
3190 int r = -EINVAL;
3191
3192 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003193 case KVM_REG_S390_TODPR:
3194 r = put_user(vcpu->arch.sie_block->todpr,
3195 (u32 __user *)reg->addr);
3196 break;
3197 case KVM_REG_S390_EPOCHDIFF:
3198 r = put_user(vcpu->arch.sie_block->epoch,
3199 (u64 __user *)reg->addr);
3200 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003201 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003202 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003203 (u64 __user *)reg->addr);
3204 break;
3205 case KVM_REG_S390_CLOCK_COMP:
3206 r = put_user(vcpu->arch.sie_block->ckc,
3207 (u64 __user *)reg->addr);
3208 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003209 case KVM_REG_S390_PFTOKEN:
3210 r = put_user(vcpu->arch.pfault_token,
3211 (u64 __user *)reg->addr);
3212 break;
3213 case KVM_REG_S390_PFCOMPARE:
3214 r = put_user(vcpu->arch.pfault_compare,
3215 (u64 __user *)reg->addr);
3216 break;
3217 case KVM_REG_S390_PFSELECT:
3218 r = put_user(vcpu->arch.pfault_select,
3219 (u64 __user *)reg->addr);
3220 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003221 case KVM_REG_S390_PP:
3222 r = put_user(vcpu->arch.sie_block->pp,
3223 (u64 __user *)reg->addr);
3224 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003225 case KVM_REG_S390_GBEA:
3226 r = put_user(vcpu->arch.sie_block->gbea,
3227 (u64 __user *)reg->addr);
3228 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003229 default:
3230 break;
3231 }
3232
3233 return r;
3234}
3235
3236static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3237 struct kvm_one_reg *reg)
3238{
3239 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003240 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003241
3242 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003243 case KVM_REG_S390_TODPR:
3244 r = get_user(vcpu->arch.sie_block->todpr,
3245 (u32 __user *)reg->addr);
3246 break;
3247 case KVM_REG_S390_EPOCHDIFF:
3248 r = get_user(vcpu->arch.sie_block->epoch,
3249 (u64 __user *)reg->addr);
3250 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003251 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003252 r = get_user(val, (u64 __user *)reg->addr);
3253 if (!r)
3254 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003255 break;
3256 case KVM_REG_S390_CLOCK_COMP:
3257 r = get_user(vcpu->arch.sie_block->ckc,
3258 (u64 __user *)reg->addr);
3259 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003260 case KVM_REG_S390_PFTOKEN:
3261 r = get_user(vcpu->arch.pfault_token,
3262 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003263 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3264 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003265 break;
3266 case KVM_REG_S390_PFCOMPARE:
3267 r = get_user(vcpu->arch.pfault_compare,
3268 (u64 __user *)reg->addr);
3269 break;
3270 case KVM_REG_S390_PFSELECT:
3271 r = get_user(vcpu->arch.pfault_select,
3272 (u64 __user *)reg->addr);
3273 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003274 case KVM_REG_S390_PP:
3275 r = get_user(vcpu->arch.sie_block->pp,
3276 (u64 __user *)reg->addr);
3277 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003278 case KVM_REG_S390_GBEA:
3279 r = get_user(vcpu->arch.sie_block->gbea,
3280 (u64 __user *)reg->addr);
3281 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003282 default:
3283 break;
3284 }
3285
3286 return r;
3287}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003288
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003289static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3290{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003291 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003292 return 0;
3293}
3294
3295int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3296{
Christoffer Dall875656f2017-12-04 21:35:27 +01003297 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003298 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003299 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003300 return 0;
3301}
3302
3303int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3304{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003305 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003306 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003307 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003308 return 0;
3309}
3310
3311int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3312 struct kvm_sregs *sregs)
3313{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003314 vcpu_load(vcpu);
3315
Christian Borntraeger59674c12012-01-11 11:20:33 +01003316 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003317 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003318
3319 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003320 return 0;
3321}
3322
3323int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3324 struct kvm_sregs *sregs)
3325{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003326 vcpu_load(vcpu);
3327
Christian Borntraeger59674c12012-01-11 11:20:33 +01003328 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003329 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003330
3331 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003332 return 0;
3333}
3334
3335int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3336{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003337 int ret = 0;
3338
3339 vcpu_load(vcpu);
3340
3341 if (test_fp_ctl(fpu->fpc)) {
3342 ret = -EINVAL;
3343 goto out;
3344 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003345 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003346 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003347 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3348 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003349 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003350 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003351
3352out:
3353 vcpu_put(vcpu);
3354 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003355}
3356
3357int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3358{
Christoffer Dall13931232017-12-04 21:35:34 +01003359 vcpu_load(vcpu);
3360
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003361 /* make sure we have the latest values */
3362 save_fpu_regs();
3363 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003364 convert_vx_to_fp((freg_t *) fpu->fprs,
3365 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003366 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003367 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003368 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003369
3370 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003371 return 0;
3372}
3373
3374static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3375{
3376 int rc = 0;
3377
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003378 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003379 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003380 else {
3381 vcpu->run->psw_mask = psw.mask;
3382 vcpu->run->psw_addr = psw.addr;
3383 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003384 return rc;
3385}
3386
3387int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3388 struct kvm_translation *tr)
3389{
3390 return -EINVAL; /* not implemented yet */
3391}
3392
David Hildenbrand27291e22014-01-23 12:26:52 +01003393#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3394 KVM_GUESTDBG_USE_HW_BP | \
3395 KVM_GUESTDBG_ENABLE)
3396
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003397int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3398 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003399{
David Hildenbrand27291e22014-01-23 12:26:52 +01003400 int rc = 0;
3401
Christoffer Dall66b56562017-12-04 21:35:33 +01003402 vcpu_load(vcpu);
3403
David Hildenbrand27291e22014-01-23 12:26:52 +01003404 vcpu->guest_debug = 0;
3405 kvm_s390_clear_bp_data(vcpu);
3406
Christoffer Dall66b56562017-12-04 21:35:33 +01003407 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3408 rc = -EINVAL;
3409 goto out;
3410 }
3411 if (!sclp.has_gpere) {
3412 rc = -EINVAL;
3413 goto out;
3414 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003415
3416 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3417 vcpu->guest_debug = dbg->control;
3418 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003419 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003420
3421 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3422 rc = kvm_s390_import_bp_data(vcpu, dbg);
3423 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003424 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003425 vcpu->arch.guestdbg.last_bp = 0;
3426 }
3427
3428 if (rc) {
3429 vcpu->guest_debug = 0;
3430 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003431 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003432 }
3433
Christoffer Dall66b56562017-12-04 21:35:33 +01003434out:
3435 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003436 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003437}
3438
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003439int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3440 struct kvm_mp_state *mp_state)
3441{
Christoffer Dallfd232562017-12-04 21:35:30 +01003442 int ret;
3443
3444 vcpu_load(vcpu);
3445
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003446 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003447 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3448 KVM_MP_STATE_OPERATING;
3449
3450 vcpu_put(vcpu);
3451 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003452}
3453
3454int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3455 struct kvm_mp_state *mp_state)
3456{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003457 int rc = 0;
3458
Christoffer Dalle83dff52017-12-04 21:35:31 +01003459 vcpu_load(vcpu);
3460
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003461 /* user space knows about this interface - let it control the state */
3462 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3463
3464 switch (mp_state->mp_state) {
3465 case KVM_MP_STATE_STOPPED:
3466 kvm_s390_vcpu_stop(vcpu);
3467 break;
3468 case KVM_MP_STATE_OPERATING:
3469 kvm_s390_vcpu_start(vcpu);
3470 break;
3471 case KVM_MP_STATE_LOAD:
3472 case KVM_MP_STATE_CHECK_STOP:
3473 /* fall through - CHECK_STOP and LOAD are not supported yet */
3474 default:
3475 rc = -ENXIO;
3476 }
3477
Christoffer Dalle83dff52017-12-04 21:35:31 +01003478 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003479 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003480}
3481
David Hildenbrand8ad35752014-03-14 11:00:21 +01003482static bool ibs_enabled(struct kvm_vcpu *vcpu)
3483{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003484 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003485}
3486
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003487static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3488{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003489retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003490 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003491 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003492 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003493 /*
3494 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003495 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003496 * This ensures that the ipte instruction for this request has
3497 * already finished. We might race against a second unmapper that
3498 * wants to set the blocking bit. Lets just retry the request loop.
3499 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003500 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003501 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003502 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3503 kvm_s390_get_prefix(vcpu),
3504 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003505 if (rc) {
3506 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003507 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003508 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003509 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003510 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003511
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003512 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3513 vcpu->arch.sie_block->ihcpu = 0xffff;
3514 goto retry;
3515 }
3516
David Hildenbrand8ad35752014-03-14 11:00:21 +01003517 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3518 if (!ibs_enabled(vcpu)) {
3519 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003520 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003521 }
3522 goto retry;
3523 }
3524
3525 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3526 if (ibs_enabled(vcpu)) {
3527 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003528 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003529 }
3530 goto retry;
3531 }
3532
David Hildenbrand6502a342016-06-21 14:19:51 +02003533 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3534 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3535 goto retry;
3536 }
3537
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003538 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3539 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003540 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003541 * instruction manually, in order to provide additional
3542 * functionalities needed for live migration.
3543 */
3544 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3545 goto retry;
3546 }
3547
3548 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3549 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003550 * Re-enable CMM virtualization if CMMA is available and
3551 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003552 */
3553 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003554 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003555 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3556 goto retry;
3557 }
3558
David Hildenbrand0759d062014-05-13 16:54:32 +02003559 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003560 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003561 /* we left the vsie handler, nothing to do, just clear the request */
3562 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003563
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003564 return 0;
3565}
3566
David Hildenbrand0e7def52018-02-07 12:46:43 +01003567void kvm_s390_set_tod_clock(struct kvm *kvm,
3568 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003569{
3570 struct kvm_vcpu *vcpu;
3571 struct kvm_s390_tod_clock_ext htod;
3572 int i;
3573
3574 mutex_lock(&kvm->lock);
3575 preempt_disable();
3576
3577 get_tod_clock_ext((char *)&htod);
3578
3579 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003580 kvm->arch.epdx = 0;
3581 if (test_kvm_facility(kvm, 139)) {
3582 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3583 if (kvm->arch.epoch > gtod->tod)
3584 kvm->arch.epdx -= 1;
3585 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003586
3587 kvm_s390_vcpu_block_all(kvm);
3588 kvm_for_each_vcpu(i, vcpu, kvm) {
3589 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3590 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3591 }
3592
3593 kvm_s390_vcpu_unblock_all(kvm);
3594 preempt_enable();
3595 mutex_unlock(&kvm->lock);
3596}
3597
Thomas Huthfa576c52014-05-06 17:20:16 +02003598/**
3599 * kvm_arch_fault_in_page - fault-in guest page if necessary
3600 * @vcpu: The corresponding virtual cpu
3601 * @gpa: Guest physical address
3602 * @writable: Whether the page should be writable or not
3603 *
3604 * Make sure that a guest page has been faulted-in on the host.
3605 *
3606 * Return: Zero on success, negative error code otherwise.
3607 */
3608long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003609{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003610 return gmap_fault(vcpu->arch.gmap, gpa,
3611 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003612}
3613
Dominik Dingel3c038e62013-10-07 17:11:48 +02003614static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3615 unsigned long token)
3616{
3617 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003618 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003619
3620 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003621 irq.u.ext.ext_params2 = token;
3622 irq.type = KVM_S390_INT_PFAULT_INIT;
3623 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003624 } else {
3625 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003626 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003627 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3628 }
3629}
3630
3631void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3632 struct kvm_async_pf *work)
3633{
3634 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3635 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3636}
3637
3638void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3639 struct kvm_async_pf *work)
3640{
3641 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3642 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3643}
3644
3645void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3646 struct kvm_async_pf *work)
3647{
3648 /* s390 will always inject the page directly */
3649}
3650
3651bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3652{
3653 /*
3654 * s390 will always inject the page directly,
3655 * but we still want check_async_completion to cleanup
3656 */
3657 return true;
3658}
3659
3660static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3661{
3662 hva_t hva;
3663 struct kvm_arch_async_pf arch;
3664 int rc;
3665
3666 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3667 return 0;
3668 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3669 vcpu->arch.pfault_compare)
3670 return 0;
3671 if (psw_extint_disabled(vcpu))
3672 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003673 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003674 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003675 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003676 return 0;
3677 if (!vcpu->arch.gmap->pfault_enabled)
3678 return 0;
3679
Heiko Carstens81480cc2014-01-01 16:36:07 +01003680 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3681 hva += current->thread.gmap_addr & ~PAGE_MASK;
3682 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003683 return 0;
3684
3685 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3686 return rc;
3687}
3688
Thomas Huth3fb4c402013-09-12 10:33:43 +02003689static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003690{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003691 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003692
Dominik Dingel3c038e62013-10-07 17:11:48 +02003693 /*
3694 * On s390 notifications for arriving pages will be delivered directly
3695 * to the guest but the house keeping for completed pfaults is
3696 * handled outside the worker.
3697 */
3698 kvm_check_async_pf_completion(vcpu);
3699
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003700 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3701 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003702
3703 if (need_resched())
3704 schedule();
3705
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003706 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003707 s390_handle_mcck();
3708
Jens Freimann79395032014-04-17 10:10:30 +02003709 if (!kvm_is_ucontrol(vcpu->kvm)) {
3710 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3711 if (rc)
3712 return rc;
3713 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003714
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003715 rc = kvm_s390_handle_requests(vcpu);
3716 if (rc)
3717 return rc;
3718
David Hildenbrand27291e22014-01-23 12:26:52 +01003719 if (guestdbg_enabled(vcpu)) {
3720 kvm_s390_backup_guest_per_regs(vcpu);
3721 kvm_s390_patch_guest_per_regs(vcpu);
3722 }
3723
Michael Mueller9f30f622019-01-31 09:52:44 +01003724 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
3725
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003726 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003727 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3728 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3729 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003730
Thomas Huth3fb4c402013-09-12 10:33:43 +02003731 return 0;
3732}
3733
Thomas Huth492d8642015-02-10 16:11:01 +01003734static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3735{
David Hildenbrand56317922016-01-12 17:37:58 +01003736 struct kvm_s390_pgm_info pgm_info = {
3737 .code = PGM_ADDRESSING,
3738 };
3739 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003740 int rc;
3741
3742 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3743 trace_kvm_s390_sie_fault(vcpu);
3744
3745 /*
3746 * We want to inject an addressing exception, which is defined as a
3747 * suppressing or terminating exception. However, since we came here
3748 * by a DAT access exception, the PSW still points to the faulting
3749 * instruction since DAT exceptions are nullifying. So we've got
3750 * to look up the current opcode to get the length of the instruction
3751 * to be able to forward the PSW.
3752 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003753 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003754 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003755 if (rc < 0) {
3756 return rc;
3757 } else if (rc) {
3758 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3759 * Forward by arbitrary ilc, injection will take care of
3760 * nullification if necessary.
3761 */
3762 pgm_info = vcpu->arch.pgm;
3763 ilen = 4;
3764 }
David Hildenbrand56317922016-01-12 17:37:58 +01003765 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3766 kvm_s390_forward_psw(vcpu, ilen);
3767 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003768}
3769
Thomas Huth3fb4c402013-09-12 10:33:43 +02003770static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3771{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003772 struct mcck_volatile_info *mcck_info;
3773 struct sie_page *sie_page;
3774
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003775 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3776 vcpu->arch.sie_block->icptcode);
3777 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3778
David Hildenbrand27291e22014-01-23 12:26:52 +01003779 if (guestdbg_enabled(vcpu))
3780 kvm_s390_restore_guest_per_regs(vcpu);
3781
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003782 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3783 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003784
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003785 if (exit_reason == -EINTR) {
3786 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3787 sie_page = container_of(vcpu->arch.sie_block,
3788 struct sie_page, sie_block);
3789 mcck_info = &sie_page->mcck_info;
3790 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3791 return 0;
3792 }
3793
David Hildenbrand71f116b2015-10-19 16:24:28 +02003794 if (vcpu->arch.sie_block->icptcode > 0) {
3795 int rc = kvm_handle_sie_intercept(vcpu);
3796
3797 if (rc != -EOPNOTSUPP)
3798 return rc;
3799 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3800 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3801 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3802 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3803 return -EREMOTE;
3804 } else if (exit_reason != -EFAULT) {
3805 vcpu->stat.exit_null++;
3806 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003807 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3808 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3809 vcpu->run->s390_ucontrol.trans_exc_code =
3810 current->thread.gmap_addr;
3811 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003812 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003813 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003814 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003815 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003816 if (kvm_arch_setup_async_pf(vcpu))
3817 return 0;
3818 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003819 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003820 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003821}
3822
3823static int __vcpu_run(struct kvm_vcpu *vcpu)
3824{
3825 int rc, exit_reason;
3826
Thomas Huth800c1062013-09-12 10:33:45 +02003827 /*
3828 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3829 * ning the guest), so that memslots (and other stuff) are protected
3830 */
3831 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3832
Thomas Hutha76ccff2013-09-12 10:33:44 +02003833 do {
3834 rc = vcpu_pre_run(vcpu);
3835 if (rc)
3836 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003837
Thomas Huth800c1062013-09-12 10:33:45 +02003838 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003839 /*
3840 * As PF_VCPU will be used in fault handler, between
3841 * guest_enter and guest_exit should be no uaccess.
3842 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003843 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003844 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003845 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003846 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003847 exit_reason = sie64a(vcpu->arch.sie_block,
3848 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003849 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003850 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003851 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003852 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003853 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003854
Thomas Hutha76ccff2013-09-12 10:33:44 +02003855 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003856 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003857
Thomas Huth800c1062013-09-12 10:33:45 +02003858 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003859 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003860}
3861
David Hildenbrandb028ee32014-07-17 10:47:43 +02003862static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3863{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003864 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003865 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003866
3867 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003868 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003869 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3870 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3871 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3872 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3873 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3874 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003875 /* some control register changes require a tlb flush */
3876 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003877 }
3878 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003879 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003880 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3881 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3882 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3883 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3884 }
3885 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3886 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3887 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3888 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003889 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3890 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003891 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003892 /*
3893 * If userspace sets the riccb (e.g. after migration) to a valid state,
3894 * we should enable RI here instead of doing the lazy enablement.
3895 */
3896 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003897 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003898 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003899 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003900 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003901 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003902 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003903 /*
3904 * If userspace sets the gscb (e.g. after migration) to non-zero,
3905 * we should enable GS here instead of doing the lazy enablement.
3906 */
3907 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3908 test_kvm_facility(vcpu->kvm, 133) &&
3909 gscb->gssm &&
3910 !vcpu->arch.gs_enabled) {
3911 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3912 vcpu->arch.sie_block->ecb |= ECB_GS;
3913 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3914 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003915 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003916 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3917 test_kvm_facility(vcpu->kvm, 82)) {
3918 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3919 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3920 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003921 save_access_regs(vcpu->arch.host_acrs);
3922 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003923 /* save host (userspace) fprs/vrs */
3924 save_fpu_regs();
3925 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3926 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3927 if (MACHINE_HAS_VX)
3928 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3929 else
3930 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3931 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3932 if (test_fp_ctl(current->thread.fpu.fpc))
3933 /* User space provided an invalid FPC, let's clear it */
3934 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003935 if (MACHINE_HAS_GS) {
3936 preempt_disable();
3937 __ctl_set_bit(2, 4);
3938 if (current->thread.gs_cb) {
3939 vcpu->arch.host_gscb = current->thread.gs_cb;
3940 save_gs_cb(vcpu->arch.host_gscb);
3941 }
3942 if (vcpu->arch.gs_enabled) {
3943 current->thread.gs_cb = (struct gs_cb *)
3944 &vcpu->run->s.regs.gscb;
3945 restore_gs_cb(current->thread.gs_cb);
3946 }
3947 preempt_enable();
3948 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003949 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Fan Zhang80cd8762016-08-15 04:53:22 +02003950
David Hildenbrandb028ee32014-07-17 10:47:43 +02003951 kvm_run->kvm_dirty_regs = 0;
3952}
3953
3954static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3955{
3956 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3957 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3958 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3959 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003960 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003961 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3962 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3963 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3964 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3965 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3966 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3967 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003968 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003969 save_access_regs(vcpu->run->s.regs.acrs);
3970 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003971 /* Save guest register state */
3972 save_fpu_regs();
3973 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3974 /* Restore will be done lazily at return */
3975 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3976 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003977 if (MACHINE_HAS_GS) {
3978 __ctl_set_bit(2, 4);
3979 if (vcpu->arch.gs_enabled)
3980 save_gs_cb(current->thread.gs_cb);
3981 preempt_disable();
3982 current->thread.gs_cb = vcpu->arch.host_gscb;
3983 restore_gs_cb(vcpu->arch.host_gscb);
3984 preempt_enable();
3985 if (!vcpu->arch.host_gscb)
3986 __ctl_clear_bit(2, 4);
3987 vcpu->arch.host_gscb = NULL;
3988 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003989 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02003990}
3991
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003992int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3993{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003994 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003995
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003996 if (kvm_run->immediate_exit)
3997 return -EINTR;
3998
Thomas Huth200824f2019-09-04 10:51:59 +02003999 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4000 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4001 return -EINVAL;
4002
Christoffer Dallaccb7572017-12-04 21:35:25 +01004003 vcpu_load(vcpu);
4004
David Hildenbrand27291e22014-01-23 12:26:52 +01004005 if (guestdbg_exit_pending(vcpu)) {
4006 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004007 rc = 0;
4008 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004009 }
4010
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004011 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004012
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004013 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4014 kvm_s390_vcpu_start(vcpu);
4015 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004016 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004017 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004018 rc = -EINVAL;
4019 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004020 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004021
David Hildenbrandb028ee32014-07-17 10:47:43 +02004022 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004023 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004024
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004025 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004026 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004027
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004028 if (signal_pending(current) && !rc) {
4029 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004030 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004031 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004032
David Hildenbrand27291e22014-01-23 12:26:52 +01004033 if (guestdbg_exit_pending(vcpu) && !rc) {
4034 kvm_s390_prepare_debug_exit(vcpu);
4035 rc = 0;
4036 }
4037
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004038 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004039 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004040 rc = 0;
4041 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004042
David Hildenbranddb0758b2016-02-15 09:42:25 +01004043 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004044 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004045
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004046 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004047
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004048 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004049out:
4050 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004051 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004052}
4053
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004054/*
4055 * store status at address
4056 * we use have two special cases:
4057 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4058 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4059 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004060int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004061{
Carsten Otte092670c2011-07-24 10:48:22 +02004062 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004063 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004064 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004065 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004066 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004067
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004068 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004069 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4070 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004071 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004072 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004073 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4074 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004075 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004076 gpa = px;
4077 } else
4078 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004079
4080 /* manually convert vector registers if necessary */
4081 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004082 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004083 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4084 fprs, 128);
4085 } else {
4086 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004087 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004088 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004089 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004090 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004091 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004092 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004093 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004094 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004095 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004096 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004097 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004098 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004099 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004100 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004101 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004102 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004103 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004104 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004105 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004106 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004107 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004108 &vcpu->arch.sie_block->gcr, 128);
4109 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004110}
4111
Thomas Huthe8798922013-11-06 15:46:33 +01004112int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4113{
4114 /*
4115 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004116 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004117 * it into the save area
4118 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004119 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004120 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004121 save_access_regs(vcpu->run->s.regs.acrs);
4122
4123 return kvm_s390_store_status_unloaded(vcpu, addr);
4124}
4125
David Hildenbrand8ad35752014-03-14 11:00:21 +01004126static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4127{
4128 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004129 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004130}
4131
4132static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4133{
4134 unsigned int i;
4135 struct kvm_vcpu *vcpu;
4136
4137 kvm_for_each_vcpu(i, vcpu, kvm) {
4138 __disable_ibs_on_vcpu(vcpu);
4139 }
4140}
4141
4142static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4143{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004144 if (!sclp.has_ibs)
4145 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004146 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004147 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004148}
4149
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004150void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
4151{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004152 int i, online_vcpus, started_vcpus = 0;
4153
4154 if (!is_vcpu_stopped(vcpu))
4155 return;
4156
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004157 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004158 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004159 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004160 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4161
4162 for (i = 0; i < online_vcpus; i++) {
4163 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4164 started_vcpus++;
4165 }
4166
4167 if (started_vcpus == 0) {
4168 /* we're the only active VCPU -> speed it up */
4169 __enable_ibs_on_vcpu(vcpu);
4170 } else if (started_vcpus == 1) {
4171 /*
4172 * As we are starting a second VCPU, we have to disable
4173 * the IBS facility on all VCPUs to remove potentially
4174 * oustanding ENABLE requests.
4175 */
4176 __disable_ibs_on_all_vcpus(vcpu->kvm);
4177 }
4178
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004179 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004180 /*
4181 * Another VCPU might have used IBS while we were offline.
4182 * Let's play safe and flush the VCPU at startup.
4183 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004184 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004185 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004186 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004187}
4188
4189void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
4190{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004191 int i, online_vcpus, started_vcpus = 0;
4192 struct kvm_vcpu *started_vcpu = NULL;
4193
4194 if (is_vcpu_stopped(vcpu))
4195 return;
4196
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004197 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004198 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004199 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004200 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4201
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004202 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004203 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004204
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004205 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004206 __disable_ibs_on_vcpu(vcpu);
4207
4208 for (i = 0; i < online_vcpus; i++) {
4209 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4210 started_vcpus++;
4211 started_vcpu = vcpu->kvm->vcpus[i];
4212 }
4213 }
4214
4215 if (started_vcpus == 1) {
4216 /*
4217 * As we only have one VCPU left, we want to enable the
4218 * IBS facility for that VCPU to speed it up.
4219 */
4220 __enable_ibs_on_vcpu(started_vcpu);
4221 }
4222
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004223 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004224 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004225}
4226
Cornelia Huckd6712df2012-12-20 15:32:11 +01004227static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4228 struct kvm_enable_cap *cap)
4229{
4230 int r;
4231
4232 if (cap->flags)
4233 return -EINVAL;
4234
4235 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004236 case KVM_CAP_S390_CSS_SUPPORT:
4237 if (!vcpu->kvm->arch.css_support) {
4238 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004239 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004240 trace_kvm_s390_enable_css(vcpu->kvm);
4241 }
4242 r = 0;
4243 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004244 default:
4245 r = -EINVAL;
4246 break;
4247 }
4248 return r;
4249}
4250
Thomas Huth41408c282015-02-06 15:01:21 +01004251static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4252 struct kvm_s390_mem_op *mop)
4253{
4254 void __user *uaddr = (void __user *)mop->buf;
4255 void *tmpbuf = NULL;
4256 int r, srcu_idx;
4257 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4258 | KVM_S390_MEMOP_F_CHECK_ONLY;
4259
Thomas Hutha13b03b2019-08-29 14:25:17 +02004260 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
Thomas Huth41408c282015-02-06 15:01:21 +01004261 return -EINVAL;
4262
4263 if (mop->size > MEM_OP_MAX_SIZE)
4264 return -E2BIG;
4265
4266 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4267 tmpbuf = vmalloc(mop->size);
4268 if (!tmpbuf)
4269 return -ENOMEM;
4270 }
4271
4272 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4273
4274 switch (mop->op) {
4275 case KVM_S390_MEMOP_LOGICAL_READ:
4276 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004277 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4278 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004279 break;
4280 }
4281 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4282 if (r == 0) {
4283 if (copy_to_user(uaddr, tmpbuf, mop->size))
4284 r = -EFAULT;
4285 }
4286 break;
4287 case KVM_S390_MEMOP_LOGICAL_WRITE:
4288 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004289 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4290 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004291 break;
4292 }
4293 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4294 r = -EFAULT;
4295 break;
4296 }
4297 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4298 break;
4299 default:
4300 r = -EINVAL;
4301 }
4302
4303 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4304
4305 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4306 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4307
4308 vfree(tmpbuf);
4309 return r;
4310}
4311
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004312long kvm_arch_vcpu_async_ioctl(struct file *filp,
4313 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004314{
4315 struct kvm_vcpu *vcpu = filp->private_data;
4316 void __user *argp = (void __user *)arg;
4317
Avi Kivity93736622010-05-13 12:35:17 +03004318 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004319 case KVM_S390_IRQ: {
4320 struct kvm_s390_irq s390irq;
4321
Jens Freimann47b43c52014-11-11 20:57:06 +01004322 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004323 return -EFAULT;
4324 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004325 }
Avi Kivity93736622010-05-13 12:35:17 +03004326 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004327 struct kvm_s390_interrupt s390int;
Thomas Huth53936b52019-09-12 13:54:38 +02004328 struct kvm_s390_irq s390irq = {};
Carsten Otteba5c1e92008-03-25 18:47:26 +01004329
4330 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004331 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004332 if (s390int_to_s390irq(&s390int, &s390irq))
4333 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004334 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004335 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004336 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004337 return -ENOIOCTLCMD;
4338}
4339
4340long kvm_arch_vcpu_ioctl(struct file *filp,
4341 unsigned int ioctl, unsigned long arg)
4342{
4343 struct kvm_vcpu *vcpu = filp->private_data;
4344 void __user *argp = (void __user *)arg;
4345 int idx;
4346 long r;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004347
4348 vcpu_load(vcpu);
4349
4350 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004351 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004352 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004353 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004354 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004355 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004356 case KVM_S390_SET_INITIAL_PSW: {
4357 psw_t psw;
4358
Avi Kivitybc923cc2010-05-13 12:21:46 +03004359 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004360 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004361 break;
4362 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4363 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004364 }
4365 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03004366 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4367 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004368 case KVM_SET_ONE_REG:
4369 case KVM_GET_ONE_REG: {
4370 struct kvm_one_reg reg;
4371 r = -EFAULT;
4372 if (copy_from_user(&reg, argp, sizeof(reg)))
4373 break;
4374 if (ioctl == KVM_SET_ONE_REG)
4375 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4376 else
4377 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4378 break;
4379 }
Carsten Otte27e03932012-01-04 10:25:21 +01004380#ifdef CONFIG_KVM_S390_UCONTROL
4381 case KVM_S390_UCAS_MAP: {
4382 struct kvm_s390_ucas_mapping ucasmap;
4383
4384 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4385 r = -EFAULT;
4386 break;
4387 }
4388
4389 if (!kvm_is_ucontrol(vcpu->kvm)) {
4390 r = -EINVAL;
4391 break;
4392 }
4393
4394 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4395 ucasmap.vcpu_addr, ucasmap.length);
4396 break;
4397 }
4398 case KVM_S390_UCAS_UNMAP: {
4399 struct kvm_s390_ucas_mapping ucasmap;
4400
4401 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4402 r = -EFAULT;
4403 break;
4404 }
4405
4406 if (!kvm_is_ucontrol(vcpu->kvm)) {
4407 r = -EINVAL;
4408 break;
4409 }
4410
4411 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4412 ucasmap.length);
4413 break;
4414 }
4415#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004416 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004417 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004418 break;
4419 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004420 case KVM_ENABLE_CAP:
4421 {
4422 struct kvm_enable_cap cap;
4423 r = -EFAULT;
4424 if (copy_from_user(&cap, argp, sizeof(cap)))
4425 break;
4426 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4427 break;
4428 }
Thomas Huth41408c282015-02-06 15:01:21 +01004429 case KVM_S390_MEM_OP: {
4430 struct kvm_s390_mem_op mem_op;
4431
4432 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4433 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4434 else
4435 r = -EFAULT;
4436 break;
4437 }
Jens Freimann816c7662014-11-24 17:13:46 +01004438 case KVM_S390_SET_IRQ_STATE: {
4439 struct kvm_s390_irq_state irq_state;
4440
4441 r = -EFAULT;
4442 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4443 break;
4444 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4445 irq_state.len == 0 ||
4446 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4447 r = -EINVAL;
4448 break;
4449 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004450 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004451 r = kvm_s390_set_irq_state(vcpu,
4452 (void __user *) irq_state.buf,
4453 irq_state.len);
4454 break;
4455 }
4456 case KVM_S390_GET_IRQ_STATE: {
4457 struct kvm_s390_irq_state irq_state;
4458
4459 r = -EFAULT;
4460 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4461 break;
4462 if (irq_state.len == 0) {
4463 r = -EINVAL;
4464 break;
4465 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004466 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004467 r = kvm_s390_get_irq_state(vcpu,
4468 (__u8 __user *) irq_state.buf,
4469 irq_state.len);
4470 break;
4471 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004472 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004473 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004474 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004475
4476 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004477 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004478}
4479
Souptick Joarder1499fa82018-04-19 00:49:58 +05304480vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004481{
4482#ifdef CONFIG_KVM_S390_UCONTROL
4483 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4484 && (kvm_is_ucontrol(vcpu->kvm))) {
4485 vmf->page = virt_to_page(vcpu->arch.sie_block);
4486 get_page(vmf->page);
4487 return 0;
4488 }
4489#endif
4490 return VM_FAULT_SIGBUS;
4491}
4492
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05304493int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4494 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09004495{
4496 return 0;
4497}
4498
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004499/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004500int kvm_arch_prepare_memory_region(struct kvm *kvm,
4501 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004502 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004503 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004504{
Nick Wangdd2887e2013-03-25 17:22:57 +01004505 /* A few sanity checks. We can have memory slots which have to be
4506 located/ended at a segment boundary (1MB). The memory in userland is
4507 ok to be fragmented into various different vmas. It is okay to mmap()
4508 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004509
Carsten Otte598841c2011-07-24 10:48:21 +02004510 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004511 return -EINVAL;
4512
Carsten Otte598841c2011-07-24 10:48:21 +02004513 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004514 return -EINVAL;
4515
Dominik Dingela3a92c32014-12-01 17:24:42 +01004516 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4517 return -EINVAL;
4518
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004519 return 0;
4520}
4521
4522void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004523 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004524 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004525 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004526 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004527{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004528 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004529
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004530 switch (change) {
4531 case KVM_MR_DELETE:
4532 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4533 old->npages * PAGE_SIZE);
4534 break;
4535 case KVM_MR_MOVE:
4536 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4537 old->npages * PAGE_SIZE);
4538 if (rc)
4539 break;
4540 /* FALLTHROUGH */
4541 case KVM_MR_CREATE:
4542 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4543 mem->guest_phys_addr, mem->memory_size);
4544 break;
4545 case KVM_MR_FLAGS_ONLY:
4546 break;
4547 default:
4548 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
4549 }
Carsten Otte598841c2011-07-24 10:48:21 +02004550 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004551 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02004552 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004553}
4554
Alexander Yarygin60a37702016-04-01 15:38:57 +03004555static inline unsigned long nonhyp_mask(int i)
4556{
4557 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4558
4559 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4560}
4561
Christian Borntraeger3491caf2016-05-13 12:16:35 +02004562void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4563{
4564 vcpu->valid_wakeup = false;
4565}
4566
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004567static int __init kvm_s390_init(void)
4568{
Alexander Yarygin60a37702016-04-01 15:38:57 +03004569 int i;
4570
David Hildenbrand07197fd2015-01-30 16:01:38 +01004571 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004572 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01004573 return -ENODEV;
4574 }
4575
Janosch Franka4499382018-07-13 11:28:31 +01004576 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004577 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01004578 return -EINVAL;
4579 }
4580
Alexander Yarygin60a37702016-04-01 15:38:57 +03004581 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00004582 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03004583 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4584
Michael Mueller9d8d5782015-02-02 15:42:51 +01004585 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004586}
4587
4588static void __exit kvm_s390_exit(void)
4589{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004590 kvm_exit();
4591}
4592
4593module_init(kvm_s390_init);
4594module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02004595
4596/*
4597 * Enable autoloading of the kvm module.
4598 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4599 * since x86 takes a different approach.
4600 */
4601#include <linux/miscdevice.h>
4602MODULE_ALIAS_MISCDEV(KVM_MINOR);
4603MODULE_ALIAS("devname:kvm");