blob: 28ebd647784c93b784bf625dc5063f02c5df3c30 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Christian Borntraegera37cb072018-01-23 13:28:40 +01005 * Copyright IBM Corp. 2008, 2018
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010047#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010048#include "gaccess.h"
49
Cornelia Huck5786fff2012-07-23 17:20:29 +020050#define CREATE_TRACE_POINTS
51#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020052#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020053
Thomas Huth41408c282015-02-06 15:01:21 +010054#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010055#define LOCAL_IRQS 32
56#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010058
Heiko Carstensb0c632d2008-03-25 18:47:20 +010059#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Christian Borntraegerccc40c52018-03-08 12:48:05 +000060#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061
62struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020064 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010065 { "exit_validity", VCPU_STAT(exit_validity) },
66 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 { "exit_external_request", VCPU_STAT(exit_external_request) },
Christian Borntraegera5e0acea92018-02-23 07:57:33 +000068 { "exit_io_request", VCPU_STAT(exit_io_request) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010069 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030071 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020074 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010075 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020076 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020077 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
Christian Borntraeger8b905d22019-03-05 05:30:02 -050078 { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020079 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020080 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010082 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000084 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
85 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010086 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020087 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010088 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000089 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010090 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
91 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
92 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000093 { "deliver_program", VCPU_STAT(deliver_program) },
94 { "deliver_io", VCPU_STAT(deliver_io) },
QingFeng Hao32de0742018-03-02 11:56:47 +010095 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010096 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000097 { "inject_ckc", VCPU_STAT(inject_ckc) },
98 { "inject_cputm", VCPU_STAT(inject_cputm) },
99 { "inject_external_call", VCPU_STAT(inject_external_call) },
100 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
101 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
102 { "inject_io", VM_STAT(inject_io) },
103 { "inject_mchk", VCPU_STAT(inject_mchk) },
104 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
105 { "inject_program", VCPU_STAT(inject_program) },
106 { "inject_restart", VCPU_STAT(inject_restart) },
107 { "inject_service_signal", VM_STAT(inject_service_signal) },
108 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
109 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
110 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
111 { "inject_virtio", VM_STAT(inject_virtio) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100112 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
113 { "instruction_gs", VCPU_STAT(instruction_gs) },
114 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
115 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
116 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200117 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100118 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100119 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100120 { "instruction_sck", VCPU_STAT(instruction_sck) },
121 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100122 { "instruction_spx", VCPU_STAT(instruction_spx) },
123 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
124 { "instruction_stap", VCPU_STAT(instruction_stap) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100125 { "instruction_iske", VCPU_STAT(instruction_iske) },
126 { "instruction_ri", VCPU_STAT(instruction_ri) },
127 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
128 { "instruction_sske", VCPU_STAT(instruction_sske) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100129 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200130 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100131 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
132 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100133 { "instruction_tb", VCPU_STAT(instruction_tb) },
134 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200135 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100136 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200137 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200138 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100139 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100140 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200141 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100142 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200143 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
144 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100145 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200146 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
147 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500148 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100149 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
150 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
151 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200152 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
153 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
154 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100155 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
156 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
157 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
158 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
159 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
160 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100161 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100162 { NULL }
163};
164
Collin L. Walling8fa16962016-07-26 15:29:44 -0400165struct kvm_s390_tod_clock_ext {
166 __u8 epoch_idx;
167 __u64 tod;
168 __u8 reserved[7];
169} __packed;
170
David Hildenbranda411edf2016-02-02 15:41:22 +0100171/* allow nested virtualization in KVM (if enabled by user space) */
172static int nested;
173module_param(nested, int, S_IRUGO);
174MODULE_PARM_DESC(nested, "Nested virtualization support");
175
Janosch Franka4499382018-07-13 11:28:31 +0100176/* allow 1m huge page guest backing, if !nested */
177static int hpage;
178module_param(hpage, int, 0444);
179MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100180
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500181/* maximum percentage of steal time for polling. >100 is treated like 100 */
182static u8 halt_poll_max_steal = 10;
183module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000184MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500185
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000186/*
187 * For now we handle at most 16 double words as this is what the s390 base
188 * kernel handles and stores in the prefix page. If we ever need to go beyond
189 * this, this requires changes to code, but the external uapi can stay.
190 */
191#define SIZE_INTERNAL 16
192
193/*
194 * Base feature mask that defines default mask for facilities. Consists of the
195 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
196 */
197static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
198/*
199 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
200 * and defines the facilities that can be enabled via a cpu model.
201 */
202static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
203
204static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200205{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000206 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
207 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
208 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
209 sizeof(S390_lowcore.stfle_fac_list));
210
211 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200212}
213
David Hildenbrand15c97052015-03-19 17:36:43 +0100214/* available cpu features supported by kvm */
215static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200216/* available subfunctions indicated via query / "test bit" */
217static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100218
Michael Mueller9d8d5782015-02-02 15:42:51 +0100219static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200220static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200221debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100222
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100223/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200224int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100225{
226 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200227 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100228}
229
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100230static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
231 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200232
David Hildenbrand15757672018-02-07 12:46:45 +0100233static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
234{
235 u8 delta_idx = 0;
236
237 /*
238 * The TOD jumps by delta, we have to compensate this by adding
239 * -delta to the epoch.
240 */
241 delta = -delta;
242
243 /* sign-extension - we're adding to signed values below */
244 if ((s64)delta < 0)
245 delta_idx = -1;
246
247 scb->epoch += delta;
248 if (scb->ecd & ECD_MEF) {
249 scb->epdx += delta_idx;
250 if (scb->epoch < delta)
251 scb->epdx += 1;
252 }
253}
254
Fan Zhangfdf03652015-05-13 10:58:41 +0200255/*
256 * This callback is executed during stop_machine(). All CPUs are therefore
257 * temporarily stopped. In order not to change guest behavior, we have to
258 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
259 * so a CPU won't be stopped while calculating with the epoch.
260 */
261static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
262 void *v)
263{
264 struct kvm *kvm;
265 struct kvm_vcpu *vcpu;
266 int i;
267 unsigned long long *delta = v;
268
269 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200270 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100271 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
272 if (i == 0) {
273 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
274 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
275 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100276 if (vcpu->arch.cputm_enabled)
277 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100278 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100279 kvm_clock_sync_scb(vcpu->arch.vsie_block,
280 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200281 }
282 }
283 return NOTIFY_OK;
284}
285
286static struct notifier_block kvm_clock_notifier = {
287 .notifier_call = kvm_clock_sync,
288};
289
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100290int kvm_arch_hardware_setup(void)
291{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200292 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100293 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200294 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
295 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200296 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
297 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100298 return 0;
299}
300
301void kvm_arch_hardware_unsetup(void)
302{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100303 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200304 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200305 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
306 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100307}
308
David Hildenbrand22be5a132016-01-21 13:22:54 +0100309static void allow_cpu_feat(unsigned long nr)
310{
311 set_bit_inv(nr, kvm_s390_available_cpu_feat);
312}
313
David Hildenbrand0a763c72016-05-18 16:03:47 +0200314static inline int plo_test_bit(unsigned char nr)
315{
316 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100317 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200318
319 asm volatile(
320 /* Parameter registers are ignored for "test bit" */
321 " plo 0,0,0,0(0)\n"
322 " ipm %0\n"
323 " srl %0,28\n"
324 : "=d" (cc)
325 : "d" (r0)
326 : "cc");
327 return cc == 0;
328}
329
Christian Borntraegerd6681392019-02-20 03:04:07 -0500330static inline void __insn32_query(unsigned int opcode, u8 query[32])
331{
332 register unsigned long r0 asm("0") = 0; /* query function */
333 register unsigned long r1 asm("1") = (unsigned long) query;
334
335 asm volatile(
336 /* Parameter regs are ignored */
337 " .insn rrf,%[opc] << 16,2,4,6,0\n"
338 : "=m" (*query)
339 : "d" (r0), "a" (r1), [opc] "i" (opcode)
340 : "cc");
341}
342
Christian Borntraeger173aec22018-12-28 10:59:06 +0100343#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100344#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100345
David Hildenbrand22be5a132016-01-21 13:22:54 +0100346static void kvm_s390_cpu_feat_init(void)
347{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200348 int i;
349
350 for (i = 0; i < 256; ++i) {
351 if (plo_test_bit(i))
352 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
353 }
354
355 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400356 ptff(kvm_s390_available_subfunc.ptff,
357 sizeof(kvm_s390_available_subfunc.ptff),
358 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200359
360 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200361 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
362 kvm_s390_available_subfunc.kmac);
363 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
364 kvm_s390_available_subfunc.kmc);
365 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
366 kvm_s390_available_subfunc.km);
367 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
368 kvm_s390_available_subfunc.kimd);
369 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
370 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200371 }
372 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200373 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
374 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200375 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200376 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
377 kvm_s390_available_subfunc.kmctr);
378 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
379 kvm_s390_available_subfunc.kmf);
380 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
381 kvm_s390_available_subfunc.kmo);
382 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
383 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200384 }
385 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100386 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200387 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200388
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400389 if (test_facility(146)) /* MSA8 */
390 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
391 kvm_s390_available_subfunc.kma);
392
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100393 if (test_facility(155)) /* MSA9 */
394 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
395 kvm_s390_available_subfunc.kdsa);
396
Christian Borntraeger173aec22018-12-28 10:59:06 +0100397 if (test_facility(150)) /* SORTL */
398 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
399
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100400 if (test_facility(151)) /* DFLTCC */
401 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
402
David Hildenbrand22be5a132016-01-21 13:22:54 +0100403 if (MACHINE_HAS_ESOP)
404 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200405 /*
406 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
407 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
408 */
409 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100410 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200411 return;
412 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100413 if (sclp.has_64bscao)
414 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100415 if (sclp.has_siif)
416 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100417 if (sclp.has_gpere)
418 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100419 if (sclp.has_gsls)
420 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100421 if (sclp.has_ib)
422 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100423 if (sclp.has_cei)
424 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100425 if (sclp.has_ibs)
426 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500427 if (sclp.has_kss)
428 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200429 /*
430 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
431 * all skey handling functions read/set the skey from the PGSTE
432 * instead of the real storage key.
433 *
434 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
435 * pages being detected as preserved although they are resident.
436 *
437 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
438 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
439 *
440 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
441 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
442 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
443 *
444 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
445 * cannot easily shadow the SCA because of the ipte lock.
446 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100447}
448
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100449int kvm_arch_init(void *opaque)
450{
Michael Mueller308c3e62018-11-30 15:32:06 +0100451 int rc;
452
Christian Borntraeger78f26132015-07-22 15:50:58 +0200453 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
454 if (!kvm_s390_dbf)
455 return -ENOMEM;
456
457 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
Michael Mueller308c3e62018-11-30 15:32:06 +0100458 rc = -ENOMEM;
459 goto out_debug_unreg;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200460 }
461
David Hildenbrand22be5a132016-01-21 13:22:54 +0100462 kvm_s390_cpu_feat_init();
463
Cornelia Huck84877d92014-09-02 10:27:35 +0100464 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100465 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
466 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100467 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Michael Mueller308c3e62018-11-30 15:32:06 +0100468 goto out_debug_unreg;
469 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100470
471 rc = kvm_s390_gib_init(GAL_ISC);
472 if (rc)
473 goto out_gib_destroy;
474
Michael Mueller308c3e62018-11-30 15:32:06 +0100475 return 0;
476
Michael Muellerb1d1e762019-01-31 09:52:45 +0100477out_gib_destroy:
478 kvm_s390_gib_destroy();
Michael Mueller308c3e62018-11-30 15:32:06 +0100479out_debug_unreg:
480 debug_unregister(kvm_s390_dbf);
481 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100482}
483
Christian Borntraeger78f26132015-07-22 15:50:58 +0200484void kvm_arch_exit(void)
485{
Michael Mueller1282c212019-01-31 09:52:40 +0100486 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200487 debug_unregister(kvm_s390_dbf);
488}
489
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100490/* Section: device related */
491long kvm_arch_dev_ioctl(struct file *filp,
492 unsigned int ioctl, unsigned long arg)
493{
494 if (ioctl == KVM_S390_ENABLE_SIE)
495 return s390_enable_sie();
496 return -EINVAL;
497}
498
Alexander Graf784aa3d2014-07-14 18:27:35 +0200499int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100500{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100501 int r;
502
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200503 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100504 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200505 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100506 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100507#ifdef CONFIG_KVM_S390_UCONTROL
508 case KVM_CAP_S390_UCONTROL:
509#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200510 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100511 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200512 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100513 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100514 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100515 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200516 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200517 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200518 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200519 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100520 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100521 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200522 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100523 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400524 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100525 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200526 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200527 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100528 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100529 case KVM_CAP_S390_AIS_MIGRATION:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100530 r = 1;
531 break;
Janosch Franka4499382018-07-13 11:28:31 +0100532 case KVM_CAP_S390_HPAGE_1M:
533 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100534 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100535 r = 1;
536 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100537 case KVM_CAP_S390_MEM_OP:
538 r = MEM_OP_MAX_SIZE;
539 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200540 case KVM_CAP_NR_VCPUS:
541 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200542 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100543 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200544 if (!kvm_s390_use_sca_entries())
545 r = KVM_MAX_VCPUS;
546 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100547 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200548 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200549 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100550 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200551 break;
Eric Farman68c55752014-06-09 10:57:26 -0400552 case KVM_CAP_S390_VECTOR_REGISTERS:
553 r = MACHINE_HAS_VX;
554 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800555 case KVM_CAP_S390_RI:
556 r = test_facility(64);
557 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100558 case KVM_CAP_S390_GS:
559 r = test_facility(133);
560 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100561 case KVM_CAP_S390_BPB:
562 r = test_facility(82);
563 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200564 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100565 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200566 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100567 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100568}
569
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400570static void kvm_s390_sync_dirty_log(struct kvm *kvm,
Janosch Frank0959e162018-07-17 13:21:22 +0100571 struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400572{
Janosch Frank0959e162018-07-17 13:21:22 +0100573 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400574 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100575 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400576 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100577 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400578
Janosch Frank0959e162018-07-17 13:21:22 +0100579 /* Loop over all guest segments */
580 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400581 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100582 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
583 gaddr = gfn_to_gpa(cur_gfn);
584 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
585 if (kvm_is_error_hva(vmaddr))
586 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400587
Janosch Frank0959e162018-07-17 13:21:22 +0100588 bitmap_zero(bitmap, _PAGE_ENTRIES);
589 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
590 for (i = 0; i < _PAGE_ENTRIES; i++) {
591 if (test_bit(i, bitmap))
592 mark_page_dirty(kvm, cur_gfn + i);
593 }
594
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100595 if (fatal_signal_pending(current))
596 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100597 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400598 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400599}
600
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100601/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200602static void sca_del_vcpu(struct kvm_vcpu *vcpu);
603
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100604/*
605 * Get (and clear) the dirty memory log for a memory slot.
606 */
607int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
608 struct kvm_dirty_log *log)
609{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400610 int r;
611 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200612 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400613 struct kvm_memory_slot *memslot;
614 int is_dirty = 0;
615
Janosch Franke1e8a962017-02-02 16:39:31 +0100616 if (kvm_is_ucontrol(kvm))
617 return -EINVAL;
618
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400619 mutex_lock(&kvm->slots_lock);
620
621 r = -EINVAL;
622 if (log->slot >= KVM_USER_MEM_SLOTS)
623 goto out;
624
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200625 slots = kvm_memslots(kvm);
626 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400627 r = -ENOENT;
628 if (!memslot->dirty_bitmap)
629 goto out;
630
631 kvm_s390_sync_dirty_log(kvm, memslot);
632 r = kvm_get_dirty_log(kvm, log, &is_dirty);
633 if (r)
634 goto out;
635
636 /* Clear the dirty log */
637 if (is_dirty) {
638 n = kvm_dirty_bitmap_bytes(memslot);
639 memset(memslot->dirty_bitmap, 0, n);
640 }
641 r = 0;
642out:
643 mutex_unlock(&kvm->slots_lock);
644 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100645}
646
David Hildenbrand6502a342016-06-21 14:19:51 +0200647static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
648{
649 unsigned int i;
650 struct kvm_vcpu *vcpu;
651
652 kvm_for_each_vcpu(i, vcpu, kvm) {
653 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
654 }
655}
656
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100657int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200658{
659 int r;
660
661 if (cap->flags)
662 return -EINVAL;
663
664 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200665 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200666 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200667 kvm->arch.use_irqchip = 1;
668 r = 0;
669 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200670 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200671 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200672 kvm->arch.user_sigp = 1;
673 r = 0;
674 break;
Eric Farman68c55752014-06-09 10:57:26 -0400675 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100676 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200677 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100678 r = -EBUSY;
679 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100680 set_kvm_facility(kvm->arch.model.fac_mask, 129);
681 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200682 if (test_facility(134)) {
683 set_kvm_facility(kvm->arch.model.fac_mask, 134);
684 set_kvm_facility(kvm->arch.model.fac_list, 134);
685 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100686 if (test_facility(135)) {
687 set_kvm_facility(kvm->arch.model.fac_mask, 135);
688 set_kvm_facility(kvm->arch.model.fac_list, 135);
689 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100690 if (test_facility(148)) {
691 set_kvm_facility(kvm->arch.model.fac_mask, 148);
692 set_kvm_facility(kvm->arch.model.fac_list, 148);
693 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100694 if (test_facility(152)) {
695 set_kvm_facility(kvm->arch.model.fac_mask, 152);
696 set_kvm_facility(kvm->arch.model.fac_list, 152);
697 }
Michael Mueller18280d82015-03-16 16:05:41 +0100698 r = 0;
699 } else
700 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100701 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200702 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
703 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400704 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800705 case KVM_CAP_S390_RI:
706 r = -EINVAL;
707 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200708 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800709 r = -EBUSY;
710 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100711 set_kvm_facility(kvm->arch.model.fac_mask, 64);
712 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800713 r = 0;
714 }
715 mutex_unlock(&kvm->lock);
716 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
717 r ? "(not available)" : "(success)");
718 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100719 case KVM_CAP_S390_AIS:
720 mutex_lock(&kvm->lock);
721 if (kvm->created_vcpus) {
722 r = -EBUSY;
723 } else {
724 set_kvm_facility(kvm->arch.model.fac_mask, 72);
725 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100726 r = 0;
727 }
728 mutex_unlock(&kvm->lock);
729 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
730 r ? "(not available)" : "(success)");
731 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100732 case KVM_CAP_S390_GS:
733 r = -EINVAL;
734 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100735 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100736 r = -EBUSY;
737 } else if (test_facility(133)) {
738 set_kvm_facility(kvm->arch.model.fac_mask, 133);
739 set_kvm_facility(kvm->arch.model.fac_list, 133);
740 r = 0;
741 }
742 mutex_unlock(&kvm->lock);
743 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
744 r ? "(not available)" : "(success)");
745 break;
Janosch Franka4499382018-07-13 11:28:31 +0100746 case KVM_CAP_S390_HPAGE_1M:
747 mutex_lock(&kvm->lock);
748 if (kvm->created_vcpus)
749 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100750 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100751 r = -EINVAL;
752 else {
753 r = 0;
Janosch Frankdf88f312018-08-30 16:14:18 +0200754 down_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100755 kvm->mm->context.allow_gmap_hpage_1m = 1;
Janosch Frankdf88f312018-08-30 16:14:18 +0200756 up_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100757 /*
758 * We might have to create fake 4k page
759 * tables. To avoid that the hardware works on
760 * stale PGSTEs, we emulate these instructions.
761 */
762 kvm->arch.use_skf = 0;
763 kvm->arch.use_pfmfi = 0;
764 }
765 mutex_unlock(&kvm->lock);
766 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
767 r ? "(not available)" : "(success)");
768 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100769 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200770 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100771 kvm->arch.user_stsi = 1;
772 r = 0;
773 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200774 case KVM_CAP_S390_USER_INSTR0:
775 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
776 kvm->arch.user_instr0 = 1;
777 icpt_operexc_on_all_vcpus(kvm);
778 r = 0;
779 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200780 default:
781 r = -EINVAL;
782 break;
783 }
784 return r;
785}
786
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100787static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
788{
789 int ret;
790
791 switch (attr->attr) {
792 case KVM_S390_VM_MEM_LIMIT_SIZE:
793 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200794 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100795 kvm->arch.mem_limit);
796 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100797 ret = -EFAULT;
798 break;
799 default:
800 ret = -ENXIO;
801 break;
802 }
803 return ret;
804}
805
806static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200807{
808 int ret;
809 unsigned int idx;
810 switch (attr->attr) {
811 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100812 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100813 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200814 break;
815
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200816 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200817 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100818 if (kvm->created_vcpus)
819 ret = -EBUSY;
820 else if (kvm->mm->context.allow_gmap_hpage_1m)
821 ret = -EINVAL;
822 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200823 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100824 /* Not compatible with cmma. */
825 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200826 ret = 0;
827 }
828 mutex_unlock(&kvm->lock);
829 break;
830 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100831 ret = -ENXIO;
832 if (!sclp.has_cmma)
833 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200834 ret = -EINVAL;
835 if (!kvm->arch.use_cmma)
836 break;
837
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200838 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200839 mutex_lock(&kvm->lock);
840 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200841 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200842 srcu_read_unlock(&kvm->srcu, idx);
843 mutex_unlock(&kvm->lock);
844 ret = 0;
845 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100846 case KVM_S390_VM_MEM_LIMIT_SIZE: {
847 unsigned long new_limit;
848
849 if (kvm_is_ucontrol(kvm))
850 return -EINVAL;
851
852 if (get_user(new_limit, (u64 __user *)attr->addr))
853 return -EFAULT;
854
Dominik Dingela3a92c32014-12-01 17:24:42 +0100855 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
856 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100857 return -E2BIG;
858
Dominik Dingela3a92c32014-12-01 17:24:42 +0100859 if (!new_limit)
860 return -EINVAL;
861
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100862 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100863 if (new_limit != KVM_S390_NO_MEM_LIMIT)
864 new_limit -= 1;
865
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100866 ret = -EBUSY;
867 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200868 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100869 /* gmap_create will round the limit up */
870 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100871
872 if (!new) {
873 ret = -ENOMEM;
874 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100875 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100876 new->private = kvm;
877 kvm->arch.gmap = new;
878 ret = 0;
879 }
880 }
881 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100882 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
883 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
884 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100885 break;
886 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200887 default:
888 ret = -ENXIO;
889 break;
890 }
891 return ret;
892}
893
Tony Krowiaka374e892014-09-03 10:13:53 +0200894static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
895
Tony Krowiak20c922f2018-04-22 11:37:03 -0400896void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200897{
898 struct kvm_vcpu *vcpu;
899 int i;
900
Tony Krowiak20c922f2018-04-22 11:37:03 -0400901 kvm_s390_vcpu_block_all(kvm);
902
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400903 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400904 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400905 /* recreate the shadow crycb by leaving the VSIE handler */
906 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
907 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400908
909 kvm_s390_vcpu_unblock_all(kvm);
910}
911
912static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
913{
Tony Krowiaka374e892014-09-03 10:13:53 +0200914 mutex_lock(&kvm->lock);
915 switch (attr->attr) {
916 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200917 if (!test_kvm_facility(kvm, 76)) {
918 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400919 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200920 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200921 get_random_bytes(
922 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
923 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
924 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200925 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200926 break;
927 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200928 if (!test_kvm_facility(kvm, 76)) {
929 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400930 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200931 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200932 get_random_bytes(
933 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
934 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
935 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200936 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200937 break;
938 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200939 if (!test_kvm_facility(kvm, 76)) {
940 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400941 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200942 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200943 kvm->arch.crypto.aes_kw = 0;
944 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
945 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200946 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200947 break;
948 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200949 if (!test_kvm_facility(kvm, 76)) {
950 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400951 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200952 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200953 kvm->arch.crypto.dea_kw = 0;
954 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
955 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200956 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200957 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400958 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
959 if (!ap_instructions_available()) {
960 mutex_unlock(&kvm->lock);
961 return -EOPNOTSUPP;
962 }
963 kvm->arch.crypto.apie = 1;
964 break;
965 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
966 if (!ap_instructions_available()) {
967 mutex_unlock(&kvm->lock);
968 return -EOPNOTSUPP;
969 }
970 kvm->arch.crypto.apie = 0;
971 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200972 default:
973 mutex_unlock(&kvm->lock);
974 return -ENXIO;
975 }
976
Tony Krowiak20c922f2018-04-22 11:37:03 -0400977 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200978 mutex_unlock(&kvm->lock);
979 return 0;
980}
981
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200982static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
983{
984 int cx;
985 struct kvm_vcpu *vcpu;
986
987 kvm_for_each_vcpu(cx, vcpu, kvm)
988 kvm_s390_sync_request(req, vcpu);
989}
990
991/*
992 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100993 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200994 */
995static int kvm_s390_vm_start_migration(struct kvm *kvm)
996{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200997 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200998 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200999 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001000 int slotnr;
1001
1002 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001003 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001004 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001005 slots = kvm_memslots(kvm);
1006 if (!slots || !slots->used_slots)
1007 return -EINVAL;
1008
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001009 if (!kvm->arch.use_cmma) {
1010 kvm->arch.migration_mode = 1;
1011 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001012 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001013 /* mark all the pages in active slots as dirty */
1014 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1015 ms = slots->memslots + slotnr;
1016 /*
1017 * The second half of the bitmap is only used on x86,
1018 * and would be wasted otherwise, so we put it to good
1019 * use here to keep track of the state of the storage
1020 * attributes.
1021 */
1022 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1023 ram_pages += ms->npages;
1024 }
1025 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1026 kvm->arch.migration_mode = 1;
1027 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001028 return 0;
1029}
1030
1031/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001032 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001033 * kvm_s390_vm_start_migration.
1034 */
1035static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1036{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001037 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001038 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001039 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001040 kvm->arch.migration_mode = 0;
1041 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001042 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001043 return 0;
1044}
1045
1046static int kvm_s390_vm_set_migration(struct kvm *kvm,
1047 struct kvm_device_attr *attr)
1048{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001049 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001050
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001051 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001052 switch (attr->attr) {
1053 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001054 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001055 break;
1056 case KVM_S390_VM_MIGRATION_STOP:
1057 res = kvm_s390_vm_stop_migration(kvm);
1058 break;
1059 default:
1060 break;
1061 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001062 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001063
1064 return res;
1065}
1066
1067static int kvm_s390_vm_get_migration(struct kvm *kvm,
1068 struct kvm_device_attr *attr)
1069{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001070 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001071
1072 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1073 return -ENXIO;
1074
1075 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1076 return -EFAULT;
1077 return 0;
1078}
1079
Collin L. Walling8fa16962016-07-26 15:29:44 -04001080static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1081{
1082 struct kvm_s390_vm_tod_clock gtod;
1083
1084 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1085 return -EFAULT;
1086
David Hildenbrand0e7def52018-02-07 12:46:43 +01001087 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001088 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001089 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001090
1091 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1092 gtod.epoch_idx, gtod.tod);
1093
1094 return 0;
1095}
1096
Jason J. Herne72f25022014-11-25 09:46:02 -05001097static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1098{
1099 u8 gtod_high;
1100
1101 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1102 sizeof(gtod_high)))
1103 return -EFAULT;
1104
1105 if (gtod_high != 0)
1106 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001107 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001108
1109 return 0;
1110}
1111
1112static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1113{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001114 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001115
David Hildenbrand0e7def52018-02-07 12:46:43 +01001116 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1117 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001118 return -EFAULT;
1119
David Hildenbrand0e7def52018-02-07 12:46:43 +01001120 kvm_s390_set_tod_clock(kvm, &gtod);
1121 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001122 return 0;
1123}
1124
1125static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1126{
1127 int ret;
1128
1129 if (attr->flags)
1130 return -EINVAL;
1131
1132 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001133 case KVM_S390_VM_TOD_EXT:
1134 ret = kvm_s390_set_tod_ext(kvm, attr);
1135 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001136 case KVM_S390_VM_TOD_HIGH:
1137 ret = kvm_s390_set_tod_high(kvm, attr);
1138 break;
1139 case KVM_S390_VM_TOD_LOW:
1140 ret = kvm_s390_set_tod_low(kvm, attr);
1141 break;
1142 default:
1143 ret = -ENXIO;
1144 break;
1145 }
1146 return ret;
1147}
1148
David Hildenbrand33d1b272018-04-27 14:36:13 +02001149static void kvm_s390_get_tod_clock(struct kvm *kvm,
1150 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001151{
1152 struct kvm_s390_tod_clock_ext htod;
1153
1154 preempt_disable();
1155
1156 get_tod_clock_ext((char *)&htod);
1157
1158 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001159 gtod->epoch_idx = 0;
1160 if (test_kvm_facility(kvm, 139)) {
1161 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1162 if (gtod->tod < htod.tod)
1163 gtod->epoch_idx += 1;
1164 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001165
1166 preempt_enable();
1167}
1168
1169static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1170{
1171 struct kvm_s390_vm_tod_clock gtod;
1172
1173 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001174 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001175 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1176 return -EFAULT;
1177
1178 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1179 gtod.epoch_idx, gtod.tod);
1180 return 0;
1181}
1182
Jason J. Herne72f25022014-11-25 09:46:02 -05001183static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1184{
1185 u8 gtod_high = 0;
1186
1187 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1188 sizeof(gtod_high)))
1189 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001190 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001191
1192 return 0;
1193}
1194
1195static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1196{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001197 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001198
David Hildenbrand60417fc2015-09-29 16:20:36 +02001199 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001200 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1201 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001202 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001203
1204 return 0;
1205}
1206
1207static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1208{
1209 int ret;
1210
1211 if (attr->flags)
1212 return -EINVAL;
1213
1214 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001215 case KVM_S390_VM_TOD_EXT:
1216 ret = kvm_s390_get_tod_ext(kvm, attr);
1217 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001218 case KVM_S390_VM_TOD_HIGH:
1219 ret = kvm_s390_get_tod_high(kvm, attr);
1220 break;
1221 case KVM_S390_VM_TOD_LOW:
1222 ret = kvm_s390_get_tod_low(kvm, attr);
1223 break;
1224 default:
1225 ret = -ENXIO;
1226 break;
1227 }
1228 return ret;
1229}
1230
Michael Mueller658b6ed2015-02-02 15:49:35 +01001231static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1232{
1233 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001234 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001235 int ret = 0;
1236
1237 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001238 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001239 ret = -EBUSY;
1240 goto out;
1241 }
1242 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1243 if (!proc) {
1244 ret = -ENOMEM;
1245 goto out;
1246 }
1247 if (!copy_from_user(proc, (void __user *)attr->addr,
1248 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001249 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001250 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1251 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001252 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001253 if (proc->ibc > unblocked_ibc)
1254 kvm->arch.model.ibc = unblocked_ibc;
1255 else if (proc->ibc < lowest_ibc)
1256 kvm->arch.model.ibc = lowest_ibc;
1257 else
1258 kvm->arch.model.ibc = proc->ibc;
1259 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001260 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001261 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001262 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1263 kvm->arch.model.ibc,
1264 kvm->arch.model.cpuid);
1265 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1266 kvm->arch.model.fac_list[0],
1267 kvm->arch.model.fac_list[1],
1268 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001269 } else
1270 ret = -EFAULT;
1271 kfree(proc);
1272out:
1273 mutex_unlock(&kvm->lock);
1274 return ret;
1275}
1276
David Hildenbrand15c97052015-03-19 17:36:43 +01001277static int kvm_s390_set_processor_feat(struct kvm *kvm,
1278 struct kvm_device_attr *attr)
1279{
1280 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001281
1282 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1283 return -EFAULT;
1284 if (!bitmap_subset((unsigned long *) data.feat,
1285 kvm_s390_available_cpu_feat,
1286 KVM_S390_VM_CPU_FEAT_NR_BITS))
1287 return -EINVAL;
1288
1289 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001290 if (kvm->created_vcpus) {
1291 mutex_unlock(&kvm->lock);
1292 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001293 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001294 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1295 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001296 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001297 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1298 data.feat[0],
1299 data.feat[1],
1300 data.feat[2]);
1301 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001302}
1303
David Hildenbrand0a763c72016-05-18 16:03:47 +02001304static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1305 struct kvm_device_attr *attr)
1306{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001307 mutex_lock(&kvm->lock);
1308 if (kvm->created_vcpus) {
1309 mutex_unlock(&kvm->lock);
1310 return -EBUSY;
1311 }
1312
1313 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1314 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1315 mutex_unlock(&kvm->lock);
1316 return -EFAULT;
1317 }
1318 mutex_unlock(&kvm->lock);
1319
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001320 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1321 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1322 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1323 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1324 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1325 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1326 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1327 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1328 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1329 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1330 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1331 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1332 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1333 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1334 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1335 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1336 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1337 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1338 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1339 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1340 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1341 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1342 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1343 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1344 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1345 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1346 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1347 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1348 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1349 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1350 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1351 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1352 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1353 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1354 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1355 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1356 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1357 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1358 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1359 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1360 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1361 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1362 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1363 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001364 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1365 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1366 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001367 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1368 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1369 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1370 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1371 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001372 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1373 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1374 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1375 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1376 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001377
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001378 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001379}
1380
Michael Mueller658b6ed2015-02-02 15:49:35 +01001381static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1382{
1383 int ret = -ENXIO;
1384
1385 switch (attr->attr) {
1386 case KVM_S390_VM_CPU_PROCESSOR:
1387 ret = kvm_s390_set_processor(kvm, attr);
1388 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001389 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1390 ret = kvm_s390_set_processor_feat(kvm, attr);
1391 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001392 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1393 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1394 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001395 }
1396 return ret;
1397}
1398
1399static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1400{
1401 struct kvm_s390_vm_cpu_processor *proc;
1402 int ret = 0;
1403
1404 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1405 if (!proc) {
1406 ret = -ENOMEM;
1407 goto out;
1408 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001409 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001410 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001411 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1412 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001413 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1414 kvm->arch.model.ibc,
1415 kvm->arch.model.cpuid);
1416 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1417 kvm->arch.model.fac_list[0],
1418 kvm->arch.model.fac_list[1],
1419 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001420 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1421 ret = -EFAULT;
1422 kfree(proc);
1423out:
1424 return ret;
1425}
1426
1427static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1428{
1429 struct kvm_s390_vm_cpu_machine *mach;
1430 int ret = 0;
1431
1432 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1433 if (!mach) {
1434 ret = -ENOMEM;
1435 goto out;
1436 }
1437 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001438 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001439 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001440 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001441 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001442 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001443 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1444 kvm->arch.model.ibc,
1445 kvm->arch.model.cpuid);
1446 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1447 mach->fac_mask[0],
1448 mach->fac_mask[1],
1449 mach->fac_mask[2]);
1450 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1451 mach->fac_list[0],
1452 mach->fac_list[1],
1453 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001454 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1455 ret = -EFAULT;
1456 kfree(mach);
1457out:
1458 return ret;
1459}
1460
David Hildenbrand15c97052015-03-19 17:36:43 +01001461static int kvm_s390_get_processor_feat(struct kvm *kvm,
1462 struct kvm_device_attr *attr)
1463{
1464 struct kvm_s390_vm_cpu_feat data;
1465
1466 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1467 KVM_S390_VM_CPU_FEAT_NR_BITS);
1468 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1469 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001470 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1471 data.feat[0],
1472 data.feat[1],
1473 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001474 return 0;
1475}
1476
1477static int kvm_s390_get_machine_feat(struct kvm *kvm,
1478 struct kvm_device_attr *attr)
1479{
1480 struct kvm_s390_vm_cpu_feat data;
1481
1482 bitmap_copy((unsigned long *) data.feat,
1483 kvm_s390_available_cpu_feat,
1484 KVM_S390_VM_CPU_FEAT_NR_BITS);
1485 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1486 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001487 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1488 data.feat[0],
1489 data.feat[1],
1490 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001491 return 0;
1492}
1493
David Hildenbrand0a763c72016-05-18 16:03:47 +02001494static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1495 struct kvm_device_attr *attr)
1496{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001497 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1498 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1499 return -EFAULT;
1500
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001501 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1502 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1503 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1504 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1505 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1506 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1507 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1508 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1509 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1510 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1511 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1512 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1513 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1514 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1515 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1516 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1517 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1518 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1519 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1520 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1521 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1522 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1523 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1524 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1525 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1526 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1527 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1528 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1529 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1530 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1531 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1532 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1533 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1534 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1535 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1536 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1537 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1538 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1539 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1540 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1541 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1542 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1544 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001545 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1546 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1547 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001548 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1549 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1550 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1551 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1552 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001553 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1554 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1555 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1556 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1557 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001558
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001559 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001560}
1561
1562static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1563 struct kvm_device_attr *attr)
1564{
1565 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1566 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1567 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001568
1569 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1570 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1571 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1572 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1573 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1574 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1575 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1576 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1577 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1578 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1579 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1580 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1581 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1582 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1583 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1584 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1585 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1586 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1587 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1588 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1589 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1590 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1591 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1592 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1593 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1594 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1595 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1596 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1597 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1598 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1599 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1600 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1601 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1602 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1603 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1604 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1605 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1606 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1607 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1608 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1609 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1610 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1611 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1612 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001613 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1614 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1615 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001616 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1617 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1618 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1619 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1620 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001621 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1622 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1623 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1624 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1625 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001626
David Hildenbrand0a763c72016-05-18 16:03:47 +02001627 return 0;
1628}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001629
Michael Mueller658b6ed2015-02-02 15:49:35 +01001630static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1631{
1632 int ret = -ENXIO;
1633
1634 switch (attr->attr) {
1635 case KVM_S390_VM_CPU_PROCESSOR:
1636 ret = kvm_s390_get_processor(kvm, attr);
1637 break;
1638 case KVM_S390_VM_CPU_MACHINE:
1639 ret = kvm_s390_get_machine(kvm, attr);
1640 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001641 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1642 ret = kvm_s390_get_processor_feat(kvm, attr);
1643 break;
1644 case KVM_S390_VM_CPU_MACHINE_FEAT:
1645 ret = kvm_s390_get_machine_feat(kvm, attr);
1646 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001647 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1648 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1649 break;
1650 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1651 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1652 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001653 }
1654 return ret;
1655}
1656
Dominik Dingelf2061652014-04-09 13:13:00 +02001657static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1658{
1659 int ret;
1660
1661 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001662 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001663 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001664 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001665 case KVM_S390_VM_TOD:
1666 ret = kvm_s390_set_tod(kvm, attr);
1667 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001668 case KVM_S390_VM_CPU_MODEL:
1669 ret = kvm_s390_set_cpu_model(kvm, attr);
1670 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001671 case KVM_S390_VM_CRYPTO:
1672 ret = kvm_s390_vm_set_crypto(kvm, attr);
1673 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001674 case KVM_S390_VM_MIGRATION:
1675 ret = kvm_s390_vm_set_migration(kvm, attr);
1676 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001677 default:
1678 ret = -ENXIO;
1679 break;
1680 }
1681
1682 return ret;
1683}
1684
1685static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1686{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001687 int ret;
1688
1689 switch (attr->group) {
1690 case KVM_S390_VM_MEM_CTRL:
1691 ret = kvm_s390_get_mem_control(kvm, attr);
1692 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001693 case KVM_S390_VM_TOD:
1694 ret = kvm_s390_get_tod(kvm, attr);
1695 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001696 case KVM_S390_VM_CPU_MODEL:
1697 ret = kvm_s390_get_cpu_model(kvm, attr);
1698 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001699 case KVM_S390_VM_MIGRATION:
1700 ret = kvm_s390_vm_get_migration(kvm, attr);
1701 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001702 default:
1703 ret = -ENXIO;
1704 break;
1705 }
1706
1707 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001708}
1709
1710static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1711{
1712 int ret;
1713
1714 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001715 case KVM_S390_VM_MEM_CTRL:
1716 switch (attr->attr) {
1717 case KVM_S390_VM_MEM_ENABLE_CMMA:
1718 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001719 ret = sclp.has_cmma ? 0 : -ENXIO;
1720 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001721 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001722 ret = 0;
1723 break;
1724 default:
1725 ret = -ENXIO;
1726 break;
1727 }
1728 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001729 case KVM_S390_VM_TOD:
1730 switch (attr->attr) {
1731 case KVM_S390_VM_TOD_LOW:
1732 case KVM_S390_VM_TOD_HIGH:
1733 ret = 0;
1734 break;
1735 default:
1736 ret = -ENXIO;
1737 break;
1738 }
1739 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001740 case KVM_S390_VM_CPU_MODEL:
1741 switch (attr->attr) {
1742 case KVM_S390_VM_CPU_PROCESSOR:
1743 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001744 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1745 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001746 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001747 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001748 ret = 0;
1749 break;
1750 default:
1751 ret = -ENXIO;
1752 break;
1753 }
1754 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001755 case KVM_S390_VM_CRYPTO:
1756 switch (attr->attr) {
1757 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1758 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1759 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1760 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1761 ret = 0;
1762 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001763 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1764 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1765 ret = ap_instructions_available() ? 0 : -ENXIO;
1766 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001767 default:
1768 ret = -ENXIO;
1769 break;
1770 }
1771 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001772 case KVM_S390_VM_MIGRATION:
1773 ret = 0;
1774 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001775 default:
1776 ret = -ENXIO;
1777 break;
1778 }
1779
1780 return ret;
1781}
1782
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001783static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1784{
1785 uint8_t *keys;
1786 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001787 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001788
1789 if (args->flags != 0)
1790 return -EINVAL;
1791
1792 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001793 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001794 return KVM_S390_GET_SKEYS_NONE;
1795
1796 /* Enforce sane limit on memory allocation */
1797 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1798 return -EINVAL;
1799
Michal Hocko752ade62017-05-08 15:57:27 -07001800 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001801 if (!keys)
1802 return -ENOMEM;
1803
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001804 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001805 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001806 for (i = 0; i < args->count; i++) {
1807 hva = gfn_to_hva(kvm, args->start_gfn + i);
1808 if (kvm_is_error_hva(hva)) {
1809 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001810 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001811 }
1812
David Hildenbrand154c8c12016-05-09 11:22:34 +02001813 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1814 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001815 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001816 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001817 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001818 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001819
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001820 if (!r) {
1821 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1822 sizeof(uint8_t) * args->count);
1823 if (r)
1824 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001825 }
1826
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001827 kvfree(keys);
1828 return r;
1829}
1830
1831static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1832{
1833 uint8_t *keys;
1834 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001835 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001836 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001837
1838 if (args->flags != 0)
1839 return -EINVAL;
1840
1841 /* Enforce sane limit on memory allocation */
1842 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1843 return -EINVAL;
1844
Michal Hocko752ade62017-05-08 15:57:27 -07001845 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001846 if (!keys)
1847 return -ENOMEM;
1848
1849 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1850 sizeof(uint8_t) * args->count);
1851 if (r) {
1852 r = -EFAULT;
1853 goto out;
1854 }
1855
1856 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001857 r = s390_enable_skey();
1858 if (r)
1859 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001860
Janosch Frankbd096f62018-07-18 13:40:22 +01001861 i = 0;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001862 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001863 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001864 while (i < args->count) {
1865 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001866 hva = gfn_to_hva(kvm, args->start_gfn + i);
1867 if (kvm_is_error_hva(hva)) {
1868 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001869 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001870 }
1871
1872 /* Lowest order bit is reserved */
1873 if (keys[i] & 0x01) {
1874 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001875 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001876 }
1877
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001878 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001879 if (r) {
1880 r = fixup_user_fault(current, current->mm, hva,
1881 FAULT_FLAG_WRITE, &unlocked);
1882 if (r)
1883 break;
1884 }
1885 if (!r)
1886 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001887 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001888 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001889 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001890out:
1891 kvfree(keys);
1892 return r;
1893}
1894
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001895/*
1896 * Base address and length must be sent at the start of each block, therefore
1897 * it's cheaper to send some clean data, as long as it's less than the size of
1898 * two longs.
1899 */
1900#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1901/* for consistency */
1902#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1903
1904/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001905 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1906 * address falls in a hole. In that case the index of one of the memslots
1907 * bordering the hole is returned.
1908 */
1909static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1910{
1911 int start = 0, end = slots->used_slots;
1912 int slot = atomic_read(&slots->lru_slot);
1913 struct kvm_memory_slot *memslots = slots->memslots;
1914
1915 if (gfn >= memslots[slot].base_gfn &&
1916 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1917 return slot;
1918
1919 while (start < end) {
1920 slot = start + (end - start) / 2;
1921
1922 if (gfn >= memslots[slot].base_gfn)
1923 end = slot;
1924 else
1925 start = slot + 1;
1926 }
1927
1928 if (gfn >= memslots[start].base_gfn &&
1929 gfn < memslots[start].base_gfn + memslots[start].npages) {
1930 atomic_set(&slots->lru_slot, start);
1931 }
1932
1933 return start;
1934}
1935
1936static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1937 u8 *res, unsigned long bufsize)
1938{
1939 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1940
1941 args->count = 0;
1942 while (args->count < bufsize) {
1943 hva = gfn_to_hva(kvm, cur_gfn);
1944 /*
1945 * We return an error if the first value was invalid, but we
1946 * return successfully if at least one value was copied.
1947 */
1948 if (kvm_is_error_hva(hva))
1949 return args->count ? 0 : -EFAULT;
1950 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1951 pgstev = 0;
1952 res[args->count++] = (pgstev >> 24) & 0x43;
1953 cur_gfn++;
1954 }
1955
1956 return 0;
1957}
1958
1959static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1960 unsigned long cur_gfn)
1961{
1962 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1963 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1964 unsigned long ofs = cur_gfn - ms->base_gfn;
1965
1966 if (ms->base_gfn + ms->npages <= cur_gfn) {
1967 slotidx--;
1968 /* If we are above the highest slot, wrap around */
1969 if (slotidx < 0)
1970 slotidx = slots->used_slots - 1;
1971
1972 ms = slots->memslots + slotidx;
1973 ofs = 0;
1974 }
1975 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1976 while ((slotidx > 0) && (ofs >= ms->npages)) {
1977 slotidx--;
1978 ms = slots->memslots + slotidx;
1979 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1980 }
1981 return ms->base_gfn + ofs;
1982}
1983
1984static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1985 u8 *res, unsigned long bufsize)
1986{
1987 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
1988 struct kvm_memslots *slots = kvm_memslots(kvm);
1989 struct kvm_memory_slot *ms;
1990
1991 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
1992 ms = gfn_to_memslot(kvm, cur_gfn);
1993 args->count = 0;
1994 args->start_gfn = cur_gfn;
1995 if (!ms)
1996 return 0;
1997 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
1998 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
1999
2000 while (args->count < bufsize) {
2001 hva = gfn_to_hva(kvm, cur_gfn);
2002 if (kvm_is_error_hva(hva))
2003 return 0;
2004 /* Decrement only if we actually flipped the bit to 0 */
2005 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2006 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2007 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2008 pgstev = 0;
2009 /* Save the value */
2010 res[args->count++] = (pgstev >> 24) & 0x43;
2011 /* If the next bit is too far away, stop. */
2012 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2013 return 0;
2014 /* If we reached the previous "next", find the next one */
2015 if (cur_gfn == next_gfn)
2016 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2017 /* Reached the end of memory or of the buffer, stop */
2018 if ((next_gfn >= mem_end) ||
2019 (next_gfn - args->start_gfn >= bufsize))
2020 return 0;
2021 cur_gfn++;
2022 /* Reached the end of the current memslot, take the next one. */
2023 if (cur_gfn - ms->base_gfn >= ms->npages) {
2024 ms = gfn_to_memslot(kvm, cur_gfn);
2025 if (!ms)
2026 return 0;
2027 }
2028 }
2029 return 0;
2030}
2031
2032/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002033 * This function searches for the next page with dirty CMMA attributes, and
2034 * saves the attributes in the buffer up to either the end of the buffer or
2035 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2036 * no trailing clean bytes are saved.
2037 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2038 * output buffer will indicate 0 as length.
2039 */
2040static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2041 struct kvm_s390_cmma_log *args)
2042{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002043 unsigned long bufsize;
2044 int srcu_idx, peek, ret;
2045 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002046
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002047 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002048 return -ENXIO;
2049 /* Invalid/unsupported flags were specified */
2050 if (args->flags & ~KVM_S390_CMMA_PEEK)
2051 return -EINVAL;
2052 /* Migration mode query, and we are not doing a migration */
2053 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002054 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002055 return -EINVAL;
2056 /* CMMA is disabled or was not used, or the buffer has length zero */
2057 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002058 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002059 memset(args, 0, sizeof(*args));
2060 return 0;
2061 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002062 /* We are not peeking, and there are no dirty pages */
2063 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2064 memset(args, 0, sizeof(*args));
2065 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002066 }
2067
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002068 values = vmalloc(bufsize);
2069 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002070 return -ENOMEM;
2071
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002072 down_read(&kvm->mm->mmap_sem);
2073 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002074 if (peek)
2075 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2076 else
2077 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002078 srcu_read_unlock(&kvm->srcu, srcu_idx);
2079 up_read(&kvm->mm->mmap_sem);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002080
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002081 if (kvm->arch.migration_mode)
2082 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2083 else
2084 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002085
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002086 if (copy_to_user((void __user *)args->values, values, args->count))
2087 ret = -EFAULT;
2088
2089 vfree(values);
2090 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002091}
2092
2093/*
2094 * This function sets the CMMA attributes for the given pages. If the input
2095 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002096 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002097 */
2098static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2099 const struct kvm_s390_cmma_log *args)
2100{
2101 unsigned long hva, mask, pgstev, i;
2102 uint8_t *bits;
2103 int srcu_idx, r = 0;
2104
2105 mask = args->mask;
2106
2107 if (!kvm->arch.use_cmma)
2108 return -ENXIO;
2109 /* invalid/unsupported flags */
2110 if (args->flags != 0)
2111 return -EINVAL;
2112 /* Enforce sane limit on memory allocation */
2113 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2114 return -EINVAL;
2115 /* Nothing to do */
2116 if (args->count == 0)
2117 return 0;
2118
Kees Cook42bc47b2018-06-12 14:27:11 -07002119 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002120 if (!bits)
2121 return -ENOMEM;
2122
2123 r = copy_from_user(bits, (void __user *)args->values, args->count);
2124 if (r) {
2125 r = -EFAULT;
2126 goto out;
2127 }
2128
2129 down_read(&kvm->mm->mmap_sem);
2130 srcu_idx = srcu_read_lock(&kvm->srcu);
2131 for (i = 0; i < args->count; i++) {
2132 hva = gfn_to_hva(kvm, args->start_gfn + i);
2133 if (kvm_is_error_hva(hva)) {
2134 r = -EFAULT;
2135 break;
2136 }
2137
2138 pgstev = bits[i];
2139 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002140 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002141 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2142 }
2143 srcu_read_unlock(&kvm->srcu, srcu_idx);
2144 up_read(&kvm->mm->mmap_sem);
2145
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002146 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002147 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002148 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002149 up_write(&kvm->mm->mmap_sem);
2150 }
2151out:
2152 vfree(bits);
2153 return r;
2154}
2155
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002156long kvm_arch_vm_ioctl(struct file *filp,
2157 unsigned int ioctl, unsigned long arg)
2158{
2159 struct kvm *kvm = filp->private_data;
2160 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002161 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002162 int r;
2163
2164 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002165 case KVM_S390_INTERRUPT: {
2166 struct kvm_s390_interrupt s390int;
2167
2168 r = -EFAULT;
2169 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2170 break;
2171 r = kvm_s390_inject_vm(kvm, &s390int);
2172 break;
2173 }
Cornelia Huck84223592013-07-15 13:36:01 +02002174 case KVM_CREATE_IRQCHIP: {
2175 struct kvm_irq_routing_entry routing;
2176
2177 r = -EINVAL;
2178 if (kvm->arch.use_irqchip) {
2179 /* Set up dummy routing. */
2180 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002181 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002182 }
2183 break;
2184 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002185 case KVM_SET_DEVICE_ATTR: {
2186 r = -EFAULT;
2187 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2188 break;
2189 r = kvm_s390_vm_set_attr(kvm, &attr);
2190 break;
2191 }
2192 case KVM_GET_DEVICE_ATTR: {
2193 r = -EFAULT;
2194 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2195 break;
2196 r = kvm_s390_vm_get_attr(kvm, &attr);
2197 break;
2198 }
2199 case KVM_HAS_DEVICE_ATTR: {
2200 r = -EFAULT;
2201 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2202 break;
2203 r = kvm_s390_vm_has_attr(kvm, &attr);
2204 break;
2205 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002206 case KVM_S390_GET_SKEYS: {
2207 struct kvm_s390_skeys args;
2208
2209 r = -EFAULT;
2210 if (copy_from_user(&args, argp,
2211 sizeof(struct kvm_s390_skeys)))
2212 break;
2213 r = kvm_s390_get_skeys(kvm, &args);
2214 break;
2215 }
2216 case KVM_S390_SET_SKEYS: {
2217 struct kvm_s390_skeys args;
2218
2219 r = -EFAULT;
2220 if (copy_from_user(&args, argp,
2221 sizeof(struct kvm_s390_skeys)))
2222 break;
2223 r = kvm_s390_set_skeys(kvm, &args);
2224 break;
2225 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002226 case KVM_S390_GET_CMMA_BITS: {
2227 struct kvm_s390_cmma_log args;
2228
2229 r = -EFAULT;
2230 if (copy_from_user(&args, argp, sizeof(args)))
2231 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002232 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002233 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002234 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002235 if (!r) {
2236 r = copy_to_user(argp, &args, sizeof(args));
2237 if (r)
2238 r = -EFAULT;
2239 }
2240 break;
2241 }
2242 case KVM_S390_SET_CMMA_BITS: {
2243 struct kvm_s390_cmma_log args;
2244
2245 r = -EFAULT;
2246 if (copy_from_user(&args, argp, sizeof(args)))
2247 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002248 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002249 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002250 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002251 break;
2252 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002253 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002254 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002255 }
2256
2257 return r;
2258}
2259
Tony Krowiak45c9b472015-01-13 11:33:26 -05002260static int kvm_s390_apxa_installed(void)
2261{
Tony Krowiake585b242018-09-25 19:16:18 -04002262 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002263
Tony Krowiake585b242018-09-25 19:16:18 -04002264 if (ap_instructions_available()) {
2265 if (ap_qci(&info) == 0)
2266 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002267 }
2268
2269 return 0;
2270}
2271
Tony Krowiake585b242018-09-25 19:16:18 -04002272/*
2273 * The format of the crypto control block (CRYCB) is specified in the 3 low
2274 * order bits of the CRYCB designation (CRYCBD) field as follows:
2275 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2276 * AP extended addressing (APXA) facility are installed.
2277 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2278 * Format 2: Both the APXA and MSAX3 facilities are installed
2279 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002280static void kvm_s390_set_crycb_format(struct kvm *kvm)
2281{
2282 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2283
Tony Krowiake585b242018-09-25 19:16:18 -04002284 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2285 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2286
2287 /* Check whether MSAX3 is installed */
2288 if (!test_kvm_facility(kvm, 76))
2289 return;
2290
Tony Krowiak45c9b472015-01-13 11:33:26 -05002291 if (kvm_s390_apxa_installed())
2292 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2293 else
2294 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2295}
2296
Pierre Morel0e237e42018-10-05 10:31:09 +02002297void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2298 unsigned long *aqm, unsigned long *adm)
2299{
2300 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2301
2302 mutex_lock(&kvm->lock);
2303 kvm_s390_vcpu_block_all(kvm);
2304
2305 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2306 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2307 memcpy(crycb->apcb1.apm, apm, 32);
2308 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2309 apm[0], apm[1], apm[2], apm[3]);
2310 memcpy(crycb->apcb1.aqm, aqm, 32);
2311 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2312 aqm[0], aqm[1], aqm[2], aqm[3]);
2313 memcpy(crycb->apcb1.adm, adm, 32);
2314 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2315 adm[0], adm[1], adm[2], adm[3]);
2316 break;
2317 case CRYCB_FORMAT1:
2318 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2319 memcpy(crycb->apcb0.apm, apm, 8);
2320 memcpy(crycb->apcb0.aqm, aqm, 2);
2321 memcpy(crycb->apcb0.adm, adm, 2);
2322 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2323 apm[0], *((unsigned short *)aqm),
2324 *((unsigned short *)adm));
2325 break;
2326 default: /* Can not happen */
2327 break;
2328 }
2329
2330 /* recreate the shadow crycb for each vcpu */
2331 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2332 kvm_s390_vcpu_unblock_all(kvm);
2333 mutex_unlock(&kvm->lock);
2334}
2335EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2336
Tony Krowiak421045982018-09-25 19:16:25 -04002337void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2338{
2339 mutex_lock(&kvm->lock);
2340 kvm_s390_vcpu_block_all(kvm);
2341
2342 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2343 sizeof(kvm->arch.crypto.crycb->apcb0));
2344 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2345 sizeof(kvm->arch.crypto.crycb->apcb1));
2346
Pierre Morel0e237e42018-10-05 10:31:09 +02002347 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002348 /* recreate the shadow crycb for each vcpu */
2349 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002350 kvm_s390_vcpu_unblock_all(kvm);
2351 mutex_unlock(&kvm->lock);
2352}
2353EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2354
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002355static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002356{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002357 struct cpuid cpuid;
2358
2359 get_cpu_id(&cpuid);
2360 cpuid.version = 0xff;
2361 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002362}
2363
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002364static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002365{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002366 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002367 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002368
Tony Krowiake585b242018-09-25 19:16:18 -04002369 if (!test_kvm_facility(kvm, 76))
2370 return;
2371
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002372 /* Enable AES/DEA protected key functions by default */
2373 kvm->arch.crypto.aes_kw = 1;
2374 kvm->arch.crypto.dea_kw = 1;
2375 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2376 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2377 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2378 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002379}
2380
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002381static void sca_dispose(struct kvm *kvm)
2382{
2383 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002384 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002385 else
2386 free_page((unsigned long)(kvm->arch.sca));
2387 kvm->arch.sca = NULL;
2388}
2389
Carsten Ottee08b9632012-01-04 10:25:20 +01002390int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002391{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002392 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002393 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002394 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002395 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002396
Carsten Ottee08b9632012-01-04 10:25:20 +01002397 rc = -EINVAL;
2398#ifdef CONFIG_KVM_S390_UCONTROL
2399 if (type & ~KVM_VM_S390_UCONTROL)
2400 goto out_err;
2401 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2402 goto out_err;
2403#else
2404 if (type)
2405 goto out_err;
2406#endif
2407
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002408 rc = s390_enable_sie();
2409 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002410 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002411
Carsten Otteb2904112011-10-18 12:27:13 +02002412 rc = -ENOMEM;
2413
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002414 if (!sclp.has_64bscao)
2415 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002416 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002417 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002418 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002419 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002420 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002421 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002422 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002423 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002424 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002425 kvm->arch.sca = (struct bsca_block *)
2426 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002427 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002428
2429 sprintf(debug_name, "kvm-%u", current->pid);
2430
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002431 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002432 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002433 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002434
Michael Mueller19114be2017-05-30 14:26:02 +02002435 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002436 kvm->arch.sie_page2 =
2437 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2438 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002439 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002440
Michael Mueller25c84db2019-01-31 09:52:41 +01002441 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002442 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002443
2444 for (i = 0; i < kvm_s390_fac_size(); i++) {
2445 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2446 (kvm_s390_fac_base[i] |
2447 kvm_s390_fac_ext[i]);
2448 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2449 kvm_s390_fac_base[i];
2450 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002451 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002452
David Hildenbrand19352222017-08-29 16:31:08 +02002453 /* we are always in czam mode - even on pre z14 machines */
2454 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2455 set_kvm_facility(kvm->arch.model.fac_list, 138);
2456 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002457 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2458 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002459 if (MACHINE_HAS_TLB_GUEST) {
2460 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2461 set_kvm_facility(kvm->arch.model.fac_list, 147);
2462 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002463
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002464 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002465 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002466
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002467 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002468
Fei Li51978392017-02-17 17:06:26 +08002469 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002470 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002471 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2472 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002473 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002474 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002475
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002476 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002477 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002478
Carsten Ottee08b9632012-01-04 10:25:20 +01002479 if (type & KVM_VM_S390_UCONTROL) {
2480 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002481 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002482 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002483 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002484 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002485 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002486 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002487 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002488 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002489 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002490 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002491 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002492 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002493 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002494
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002495 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002496 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002497 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002498 kvm_s390_vsie_init(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002499 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002500 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002501
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002502 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002503out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002504 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002505 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002506 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002507 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002508 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002509}
2510
Luiz Capitulino235539b2016-09-07 14:47:23 -04002511bool kvm_arch_has_vcpu_debugfs(void)
2512{
2513 return false;
2514}
2515
2516int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2517{
2518 return 0;
2519}
2520
Christian Borntraegerd329c032008-11-26 14:50:27 +01002521void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2522{
2523 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002524 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002525 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002526 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002527 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002528 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002529
2530 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002531 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002532
Dominik Dingele6db1d62015-05-07 15:41:57 +02002533 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002534 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002535 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002536
Christian Borntraeger6692cef2008-11-26 14:51:08 +01002537 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02002538 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002539}
2540
2541static void kvm_free_vcpus(struct kvm *kvm)
2542{
2543 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002544 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002545
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002546 kvm_for_each_vcpu(i, vcpu, kvm)
2547 kvm_arch_vcpu_destroy(vcpu);
2548
2549 mutex_lock(&kvm->lock);
2550 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2551 kvm->vcpus[i] = NULL;
2552
2553 atomic_set(&kvm->online_vcpus, 0);
2554 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002555}
2556
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002557void kvm_arch_destroy_vm(struct kvm *kvm)
2558{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002559 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002560 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002561 debug_unregister(kvm->arch.dbf);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002562 kvm_s390_gisa_destroy(kvm);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002563 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002564 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002565 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002566 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002567 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002568 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002569 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002570}
2571
2572/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002573static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2574{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002575 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002576 if (!vcpu->arch.gmap)
2577 return -ENOMEM;
2578 vcpu->arch.gmap->private = vcpu->kvm;
2579
2580 return 0;
2581}
2582
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002583static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2584{
David Hildenbranda6940672016-08-08 22:39:32 +02002585 if (!kvm_s390_use_sca_entries())
2586 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002587 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002588 if (vcpu->kvm->arch.use_esca) {
2589 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002590
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002591 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002592 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002593 } else {
2594 struct bsca_block *sca = vcpu->kvm->arch.sca;
2595
2596 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002597 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002598 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002599 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002600}
2601
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002602static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002603{
David Hildenbranda6940672016-08-08 22:39:32 +02002604 if (!kvm_s390_use_sca_entries()) {
2605 struct bsca_block *sca = vcpu->kvm->arch.sca;
2606
2607 /* we still need the basic sca for the ipte control */
2608 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2609 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002610 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002611 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002612 read_lock(&vcpu->kvm->arch.sca_lock);
2613 if (vcpu->kvm->arch.use_esca) {
2614 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002615
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002616 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002617 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2618 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002619 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002620 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002621 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002622 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002623
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002624 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002625 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2626 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002627 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002628 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002629 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002630}
2631
2632/* Basic SCA to Extended SCA data copy routines */
2633static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2634{
2635 d->sda = s->sda;
2636 d->sigp_ctrl.c = s->sigp_ctrl.c;
2637 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2638}
2639
2640static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2641{
2642 int i;
2643
2644 d->ipte_control = s->ipte_control;
2645 d->mcn[0] = s->mcn;
2646 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2647 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2648}
2649
2650static int sca_switch_to_extended(struct kvm *kvm)
2651{
2652 struct bsca_block *old_sca = kvm->arch.sca;
2653 struct esca_block *new_sca;
2654 struct kvm_vcpu *vcpu;
2655 unsigned int vcpu_idx;
2656 u32 scaol, scaoh;
2657
2658 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2659 if (!new_sca)
2660 return -ENOMEM;
2661
2662 scaoh = (u32)((u64)(new_sca) >> 32);
2663 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2664
2665 kvm_s390_vcpu_block_all(kvm);
2666 write_lock(&kvm->arch.sca_lock);
2667
2668 sca_copy_b_to_e(new_sca, old_sca);
2669
2670 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2671 vcpu->arch.sie_block->scaoh = scaoh;
2672 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002673 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002674 }
2675 kvm->arch.sca = new_sca;
2676 kvm->arch.use_esca = 1;
2677
2678 write_unlock(&kvm->arch.sca_lock);
2679 kvm_s390_vcpu_unblock_all(kvm);
2680
2681 free_page((unsigned long)old_sca);
2682
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002683 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2684 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002685 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002686}
2687
2688static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2689{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002690 int rc;
2691
David Hildenbranda6940672016-08-08 22:39:32 +02002692 if (!kvm_s390_use_sca_entries()) {
2693 if (id < KVM_MAX_VCPUS)
2694 return true;
2695 return false;
2696 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002697 if (id < KVM_S390_BSCA_CPU_SLOTS)
2698 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002699 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002700 return false;
2701
2702 mutex_lock(&kvm->lock);
2703 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2704 mutex_unlock(&kvm->lock);
2705
2706 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002707}
2708
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002709int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2710{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002711 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2712 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002713 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2714 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002715 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002716 KVM_SYNC_CRS |
2717 KVM_SYNC_ARCH0 |
2718 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002719 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002720 if (test_kvm_facility(vcpu->kvm, 64))
2721 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002722 if (test_kvm_facility(vcpu->kvm, 82))
2723 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002724 if (test_kvm_facility(vcpu->kvm, 133))
2725 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002726 if (test_kvm_facility(vcpu->kvm, 156))
2727 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002728 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2729 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2730 */
2731 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002732 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002733 else
2734 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002735
2736 if (kvm_is_ucontrol(vcpu->kvm))
2737 return __kvm_ucontrol_vcpu_init(vcpu);
2738
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002739 return 0;
2740}
2741
David Hildenbranddb0758b2016-02-15 09:42:25 +01002742/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2743static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2744{
2745 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002746 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002747 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002748 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002749}
2750
2751/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2752static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2753{
2754 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002755 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002756 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2757 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002758 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002759}
2760
2761/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2762static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2763{
2764 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2765 vcpu->arch.cputm_enabled = true;
2766 __start_cpu_timer_accounting(vcpu);
2767}
2768
2769/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2770static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2771{
2772 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2773 __stop_cpu_timer_accounting(vcpu);
2774 vcpu->arch.cputm_enabled = false;
2775}
2776
2777static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2778{
2779 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2780 __enable_cpu_timer_accounting(vcpu);
2781 preempt_enable();
2782}
2783
2784static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2785{
2786 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2787 __disable_cpu_timer_accounting(vcpu);
2788 preempt_enable();
2789}
2790
David Hildenbrand4287f242016-02-15 09:40:12 +01002791/* set the cpu timer - may only be called from the VCPU thread itself */
2792void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2793{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002794 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002795 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002796 if (vcpu->arch.cputm_enabled)
2797 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002798 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002799 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002800 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002801}
2802
David Hildenbranddb0758b2016-02-15 09:42:25 +01002803/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002804__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2805{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002806 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002807 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002808
2809 if (unlikely(!vcpu->arch.cputm_enabled))
2810 return vcpu->arch.sie_block->cputm;
2811
David Hildenbrand9c23a132016-02-17 21:53:33 +01002812 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2813 do {
2814 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2815 /*
2816 * If the writer would ever execute a read in the critical
2817 * section, e.g. in irq context, we have a deadlock.
2818 */
2819 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2820 value = vcpu->arch.sie_block->cputm;
2821 /* if cputm_start is 0, accounting is being started/stopped */
2822 if (likely(vcpu->arch.cputm_start))
2823 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2824 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2825 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002826 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002827}
2828
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002829void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2830{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002831
David Hildenbrand37d9df92015-03-11 16:47:33 +01002832 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002833 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002834 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002835 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002836 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002837}
2838
2839void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2840{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002841 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002842 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002843 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01002844 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002845 vcpu->arch.enabled_gmap = gmap_get_enabled();
2846 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002847
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002848}
2849
2850static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2851{
2852 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2853 vcpu->arch.sie_block->gpsw.mask = 0UL;
2854 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002855 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002856 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002857 vcpu->arch.sie_block->ckc = 0UL;
2858 vcpu->arch.sie_block->todpr = 0;
2859 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
David Hildenbrandb9224cd2018-04-30 17:55:24 +02002860 vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 |
2861 CR0_INTERRUPT_KEY_SUBMASK |
2862 CR0_MEASUREMENT_ALERT_SUBMASK;
2863 vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
2864 CR14_UNUSED_33 |
2865 CR14_EXTERNAL_DAMAGE_SUBMASK;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002866 /* make sure the new fpc will be lazily loaded */
2867 save_fpu_regs();
2868 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002869 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002870 vcpu->arch.sie_block->pp = 0;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002871 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002872 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2873 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002874 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2875 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002876 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002877}
2878
Dominik Dingel31928aa2014-12-04 15:47:07 +01002879void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002880{
Jason J. Herne72f25022014-11-25 09:46:02 -05002881 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002882 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002883 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01002884 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02002885 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002886 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002887 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002888 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002889 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002890 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002891 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2892 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002893 /* make vcpu_load load the right gmap on the first trigger */
2894 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002895}
2896
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002897static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
2898{
2899 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
2900 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
2901 return true;
2902 return false;
2903}
2904
2905static bool kvm_has_pckmo_ecc(struct kvm *kvm)
2906{
2907 /* At least one ECC subfunction must be present */
2908 return kvm_has_pckmo_subfunc(kvm, 32) ||
2909 kvm_has_pckmo_subfunc(kvm, 33) ||
2910 kvm_has_pckmo_subfunc(kvm, 34) ||
2911 kvm_has_pckmo_subfunc(kvm, 40) ||
2912 kvm_has_pckmo_subfunc(kvm, 41);
2913
2914}
2915
Tony Krowiak5102ee82014-06-27 14:46:01 -04002916static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2917{
Tony Krowiake585b242018-09-25 19:16:18 -04002918 /*
2919 * If the AP instructions are not being interpreted and the MSAX3
2920 * facility is not configured for the guest, there is nothing to set up.
2921 */
2922 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002923 return;
2924
Tony Krowiake585b242018-09-25 19:16:18 -04002925 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02002926 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04002927 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002928 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02002929
Tony Krowiake585b242018-09-25 19:16:18 -04002930 if (vcpu->kvm->arch.crypto.apie)
2931 vcpu->arch.sie_block->eca |= ECA_APIE;
2932
2933 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002934 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02002935 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002936 /* ecc is also wrapped with AES key */
2937 if (kvm_has_pckmo_ecc(vcpu->kvm))
2938 vcpu->arch.sie_block->ecd |= ECD_ECC;
2939 }
2940
Tony Krowiaka374e892014-09-03 10:13:53 +02002941 if (vcpu->kvm->arch.crypto.dea_kw)
2942 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04002943}
2944
Dominik Dingelb31605c2014-03-25 13:47:11 +01002945void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2946{
2947 free_page(vcpu->arch.sie_block->cbrlo);
2948 vcpu->arch.sie_block->cbrlo = 0;
2949}
2950
2951int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2952{
2953 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2954 if (!vcpu->arch.sie_block->cbrlo)
2955 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002956 return 0;
2957}
2958
Michael Mueller91520f12015-02-27 14:32:11 +01002959static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2960{
2961 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2962
Michael Mueller91520f12015-02-27 14:32:11 +01002963 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002964 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002965 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002966}
2967
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002968int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2969{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002970 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002971
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002972 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2973 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002974 CPUSTAT_STOPPED);
2975
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002976 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002977 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002978 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002979 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002980
Michael Mueller91520f12015-02-27 14:32:11 +01002981 kvm_s390_vcpu_setup_model(vcpu);
2982
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002983 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2984 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002985 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002986 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002987 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002988 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002989 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002990
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002991 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002992 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002993 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002994 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2995 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002996 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002997 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002998 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002999 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003000 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003001 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003002 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003003 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01003004 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003005 vcpu->arch.sie_block->eca |= ECA_VX;
3006 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04003007 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003008 if (test_kvm_facility(vcpu->kvm, 139))
3009 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003010 if (test_kvm_facility(vcpu->kvm, 156))
3011 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02003012 if (vcpu->arch.sie_block->gd) {
3013 vcpu->arch.sie_block->eca |= ECA_AIV;
3014 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3015 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3016 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003017 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3018 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08003019 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05003020
3021 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003022 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05003023 else
3024 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05003025
Dominik Dingele6db1d62015-05-07 15:41:57 +02003026 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01003027 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3028 if (rc)
3029 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003030 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01003031 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02003032 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01003033
Collin Walling67d49d52018-08-31 12:51:19 -04003034 vcpu->arch.sie_block->hpid = HPID_KVM;
3035
Tony Krowiak5102ee82014-06-27 14:46:01 -04003036 kvm_s390_vcpu_crypto_setup(vcpu);
3037
Dominik Dingelb31605c2014-03-25 13:47:11 +01003038 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003039}
3040
3041struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
3042 unsigned int id)
3043{
Carsten Otte4d475552011-10-18 12:27:12 +02003044 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003045 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02003046 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003047
David Hildenbrand42158252015-10-12 12:57:22 +02003048 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02003049 goto out;
3050
3051 rc = -ENOMEM;
3052
Michael Muellerb110fea2013-06-12 13:54:54 +02003053 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003054 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02003055 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003056
QingFeng Haoda72ca42017-06-07 11:41:19 +02003057 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003058 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
3059 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003060 goto out_free_cpu;
3061
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003062 vcpu->arch.sie_block = &sie_page->sie_block;
3063 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3064
David Hildenbrandefed1102015-04-16 12:32:41 +02003065 /* the real guest size will always be smaller than msl */
3066 vcpu->arch.sie_block->mso = 0;
3067 vcpu->arch.sie_block->msl = sclp.hamax;
3068
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003069 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003070 spin_lock_init(&vcpu->arch.local_int.lock);
Michael Mueller982cff42019-01-31 09:52:38 +01003071 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003072 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3073 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003074 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003075
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003076 rc = kvm_vcpu_init(vcpu, kvm, id);
3077 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003078 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01003079 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003080 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02003081 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003082
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003083 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003084out_free_sie_block:
3085 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003086out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02003087 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02003088out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003089 return ERR_PTR(rc);
3090}
3091
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003092int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3093{
David Hildenbrand9a022062014-08-05 17:40:47 +02003094 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003095}
3096
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003097bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3098{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003099 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003100}
3101
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003102void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003103{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003104 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003105 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003106}
3107
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003108void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003109{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003110 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003111}
3112
Christian Borntraeger8e236542015-04-09 13:49:04 +02003113static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3114{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003115 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003116 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003117}
3118
David Hildenbrand9ea59722018-09-25 19:16:16 -04003119bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3120{
3121 return atomic_read(&vcpu->arch.sie_block->prog20) &
3122 (PROG_BLOCK_SIE | PROG_REQUEST);
3123}
3124
Christian Borntraeger8e236542015-04-09 13:49:04 +02003125static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3126{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003127 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003128}
3129
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003130/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003131 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003132 * If the CPU is not running (e.g. waiting as idle) the function will
3133 * return immediately. */
3134void exit_sie(struct kvm_vcpu *vcpu)
3135{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003136 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003137 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003138 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3139 cpu_relax();
3140}
3141
Christian Borntraeger8e236542015-04-09 13:49:04 +02003142/* Kick a guest cpu out of SIE to process a request synchronously */
3143void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003144{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003145 kvm_make_request(req, vcpu);
3146 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003147}
3148
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003149static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3150 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003151{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003152 struct kvm *kvm = gmap->private;
3153 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003154 unsigned long prefix;
3155 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003156
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003157 if (gmap_is_shadow(gmap))
3158 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003159 if (start >= 1UL << 31)
3160 /* We are only interested in prefix pages */
3161 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003162 kvm_for_each_vcpu(i, vcpu, kvm) {
3163 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003164 prefix = kvm_s390_get_prefix(vcpu);
3165 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3166 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3167 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003168 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003169 }
3170 }
3171}
3172
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003173bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3174{
3175 /* do not poll with more than halt_poll_max_steal percent of steal time */
3176 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3177 halt_poll_max_steal) {
3178 vcpu->stat.halt_no_poll_steal++;
3179 return true;
3180 }
3181 return false;
3182}
3183
Christoffer Dallb6d33832012-03-08 16:44:24 -05003184int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3185{
3186 /* kvm common code refers to this, but never calls it */
3187 BUG();
3188 return 0;
3189}
3190
Carsten Otte14eebd92012-05-15 14:15:26 +02003191static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3192 struct kvm_one_reg *reg)
3193{
3194 int r = -EINVAL;
3195
3196 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003197 case KVM_REG_S390_TODPR:
3198 r = put_user(vcpu->arch.sie_block->todpr,
3199 (u32 __user *)reg->addr);
3200 break;
3201 case KVM_REG_S390_EPOCHDIFF:
3202 r = put_user(vcpu->arch.sie_block->epoch,
3203 (u64 __user *)reg->addr);
3204 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003205 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003206 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003207 (u64 __user *)reg->addr);
3208 break;
3209 case KVM_REG_S390_CLOCK_COMP:
3210 r = put_user(vcpu->arch.sie_block->ckc,
3211 (u64 __user *)reg->addr);
3212 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003213 case KVM_REG_S390_PFTOKEN:
3214 r = put_user(vcpu->arch.pfault_token,
3215 (u64 __user *)reg->addr);
3216 break;
3217 case KVM_REG_S390_PFCOMPARE:
3218 r = put_user(vcpu->arch.pfault_compare,
3219 (u64 __user *)reg->addr);
3220 break;
3221 case KVM_REG_S390_PFSELECT:
3222 r = put_user(vcpu->arch.pfault_select,
3223 (u64 __user *)reg->addr);
3224 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003225 case KVM_REG_S390_PP:
3226 r = put_user(vcpu->arch.sie_block->pp,
3227 (u64 __user *)reg->addr);
3228 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003229 case KVM_REG_S390_GBEA:
3230 r = put_user(vcpu->arch.sie_block->gbea,
3231 (u64 __user *)reg->addr);
3232 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003233 default:
3234 break;
3235 }
3236
3237 return r;
3238}
3239
3240static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3241 struct kvm_one_reg *reg)
3242{
3243 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003244 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003245
3246 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003247 case KVM_REG_S390_TODPR:
3248 r = get_user(vcpu->arch.sie_block->todpr,
3249 (u32 __user *)reg->addr);
3250 break;
3251 case KVM_REG_S390_EPOCHDIFF:
3252 r = get_user(vcpu->arch.sie_block->epoch,
3253 (u64 __user *)reg->addr);
3254 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003255 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003256 r = get_user(val, (u64 __user *)reg->addr);
3257 if (!r)
3258 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003259 break;
3260 case KVM_REG_S390_CLOCK_COMP:
3261 r = get_user(vcpu->arch.sie_block->ckc,
3262 (u64 __user *)reg->addr);
3263 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003264 case KVM_REG_S390_PFTOKEN:
3265 r = get_user(vcpu->arch.pfault_token,
3266 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003267 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3268 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003269 break;
3270 case KVM_REG_S390_PFCOMPARE:
3271 r = get_user(vcpu->arch.pfault_compare,
3272 (u64 __user *)reg->addr);
3273 break;
3274 case KVM_REG_S390_PFSELECT:
3275 r = get_user(vcpu->arch.pfault_select,
3276 (u64 __user *)reg->addr);
3277 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003278 case KVM_REG_S390_PP:
3279 r = get_user(vcpu->arch.sie_block->pp,
3280 (u64 __user *)reg->addr);
3281 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003282 case KVM_REG_S390_GBEA:
3283 r = get_user(vcpu->arch.sie_block->gbea,
3284 (u64 __user *)reg->addr);
3285 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003286 default:
3287 break;
3288 }
3289
3290 return r;
3291}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003292
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003293static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3294{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003295 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003296 return 0;
3297}
3298
3299int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3300{
Christoffer Dall875656f2017-12-04 21:35:27 +01003301 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003302 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003303 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003304 return 0;
3305}
3306
3307int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3308{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003309 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003310 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003311 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003312 return 0;
3313}
3314
3315int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3316 struct kvm_sregs *sregs)
3317{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003318 vcpu_load(vcpu);
3319
Christian Borntraeger59674c12012-01-11 11:20:33 +01003320 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003321 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003322
3323 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003324 return 0;
3325}
3326
3327int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3328 struct kvm_sregs *sregs)
3329{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003330 vcpu_load(vcpu);
3331
Christian Borntraeger59674c12012-01-11 11:20:33 +01003332 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003333 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003334
3335 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003336 return 0;
3337}
3338
3339int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3340{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003341 int ret = 0;
3342
3343 vcpu_load(vcpu);
3344
3345 if (test_fp_ctl(fpu->fpc)) {
3346 ret = -EINVAL;
3347 goto out;
3348 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003349 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003350 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003351 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3352 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003353 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003354 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003355
3356out:
3357 vcpu_put(vcpu);
3358 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003359}
3360
3361int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3362{
Christoffer Dall13931232017-12-04 21:35:34 +01003363 vcpu_load(vcpu);
3364
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003365 /* make sure we have the latest values */
3366 save_fpu_regs();
3367 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003368 convert_vx_to_fp((freg_t *) fpu->fprs,
3369 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003370 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003371 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003372 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003373
3374 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003375 return 0;
3376}
3377
3378static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3379{
3380 int rc = 0;
3381
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003382 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003383 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003384 else {
3385 vcpu->run->psw_mask = psw.mask;
3386 vcpu->run->psw_addr = psw.addr;
3387 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003388 return rc;
3389}
3390
3391int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3392 struct kvm_translation *tr)
3393{
3394 return -EINVAL; /* not implemented yet */
3395}
3396
David Hildenbrand27291e22014-01-23 12:26:52 +01003397#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3398 KVM_GUESTDBG_USE_HW_BP | \
3399 KVM_GUESTDBG_ENABLE)
3400
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003401int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3402 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003403{
David Hildenbrand27291e22014-01-23 12:26:52 +01003404 int rc = 0;
3405
Christoffer Dall66b56562017-12-04 21:35:33 +01003406 vcpu_load(vcpu);
3407
David Hildenbrand27291e22014-01-23 12:26:52 +01003408 vcpu->guest_debug = 0;
3409 kvm_s390_clear_bp_data(vcpu);
3410
Christoffer Dall66b56562017-12-04 21:35:33 +01003411 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3412 rc = -EINVAL;
3413 goto out;
3414 }
3415 if (!sclp.has_gpere) {
3416 rc = -EINVAL;
3417 goto out;
3418 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003419
3420 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3421 vcpu->guest_debug = dbg->control;
3422 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003423 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003424
3425 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3426 rc = kvm_s390_import_bp_data(vcpu, dbg);
3427 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003428 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003429 vcpu->arch.guestdbg.last_bp = 0;
3430 }
3431
3432 if (rc) {
3433 vcpu->guest_debug = 0;
3434 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003435 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003436 }
3437
Christoffer Dall66b56562017-12-04 21:35:33 +01003438out:
3439 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003440 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003441}
3442
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003443int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3444 struct kvm_mp_state *mp_state)
3445{
Christoffer Dallfd232562017-12-04 21:35:30 +01003446 int ret;
3447
3448 vcpu_load(vcpu);
3449
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003450 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003451 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3452 KVM_MP_STATE_OPERATING;
3453
3454 vcpu_put(vcpu);
3455 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003456}
3457
3458int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3459 struct kvm_mp_state *mp_state)
3460{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003461 int rc = 0;
3462
Christoffer Dalle83dff52017-12-04 21:35:31 +01003463 vcpu_load(vcpu);
3464
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003465 /* user space knows about this interface - let it control the state */
3466 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3467
3468 switch (mp_state->mp_state) {
3469 case KVM_MP_STATE_STOPPED:
3470 kvm_s390_vcpu_stop(vcpu);
3471 break;
3472 case KVM_MP_STATE_OPERATING:
3473 kvm_s390_vcpu_start(vcpu);
3474 break;
3475 case KVM_MP_STATE_LOAD:
3476 case KVM_MP_STATE_CHECK_STOP:
3477 /* fall through - CHECK_STOP and LOAD are not supported yet */
3478 default:
3479 rc = -ENXIO;
3480 }
3481
Christoffer Dalle83dff52017-12-04 21:35:31 +01003482 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003483 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003484}
3485
David Hildenbrand8ad35752014-03-14 11:00:21 +01003486static bool ibs_enabled(struct kvm_vcpu *vcpu)
3487{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003488 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003489}
3490
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003491static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3492{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003493retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003494 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003495 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003496 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003497 /*
3498 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003499 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003500 * This ensures that the ipte instruction for this request has
3501 * already finished. We might race against a second unmapper that
3502 * wants to set the blocking bit. Lets just retry the request loop.
3503 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003504 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003505 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003506 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3507 kvm_s390_get_prefix(vcpu),
3508 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003509 if (rc) {
3510 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003511 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003512 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003513 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003514 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003515
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003516 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3517 vcpu->arch.sie_block->ihcpu = 0xffff;
3518 goto retry;
3519 }
3520
David Hildenbrand8ad35752014-03-14 11:00:21 +01003521 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3522 if (!ibs_enabled(vcpu)) {
3523 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003524 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003525 }
3526 goto retry;
3527 }
3528
3529 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3530 if (ibs_enabled(vcpu)) {
3531 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003532 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003533 }
3534 goto retry;
3535 }
3536
David Hildenbrand6502a342016-06-21 14:19:51 +02003537 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3538 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3539 goto retry;
3540 }
3541
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003542 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3543 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003544 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003545 * instruction manually, in order to provide additional
3546 * functionalities needed for live migration.
3547 */
3548 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3549 goto retry;
3550 }
3551
3552 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3553 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003554 * Re-enable CMM virtualization if CMMA is available and
3555 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003556 */
3557 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003558 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003559 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3560 goto retry;
3561 }
3562
David Hildenbrand0759d062014-05-13 16:54:32 +02003563 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003564 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003565 /* we left the vsie handler, nothing to do, just clear the request */
3566 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003567
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003568 return 0;
3569}
3570
David Hildenbrand0e7def52018-02-07 12:46:43 +01003571void kvm_s390_set_tod_clock(struct kvm *kvm,
3572 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003573{
3574 struct kvm_vcpu *vcpu;
3575 struct kvm_s390_tod_clock_ext htod;
3576 int i;
3577
3578 mutex_lock(&kvm->lock);
3579 preempt_disable();
3580
3581 get_tod_clock_ext((char *)&htod);
3582
3583 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003584 kvm->arch.epdx = 0;
3585 if (test_kvm_facility(kvm, 139)) {
3586 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3587 if (kvm->arch.epoch > gtod->tod)
3588 kvm->arch.epdx -= 1;
3589 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003590
3591 kvm_s390_vcpu_block_all(kvm);
3592 kvm_for_each_vcpu(i, vcpu, kvm) {
3593 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3594 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3595 }
3596
3597 kvm_s390_vcpu_unblock_all(kvm);
3598 preempt_enable();
3599 mutex_unlock(&kvm->lock);
3600}
3601
Thomas Huthfa576c52014-05-06 17:20:16 +02003602/**
3603 * kvm_arch_fault_in_page - fault-in guest page if necessary
3604 * @vcpu: The corresponding virtual cpu
3605 * @gpa: Guest physical address
3606 * @writable: Whether the page should be writable or not
3607 *
3608 * Make sure that a guest page has been faulted-in on the host.
3609 *
3610 * Return: Zero on success, negative error code otherwise.
3611 */
3612long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003613{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003614 return gmap_fault(vcpu->arch.gmap, gpa,
3615 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003616}
3617
Dominik Dingel3c038e62013-10-07 17:11:48 +02003618static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3619 unsigned long token)
3620{
3621 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003622 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003623
3624 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003625 irq.u.ext.ext_params2 = token;
3626 irq.type = KVM_S390_INT_PFAULT_INIT;
3627 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003628 } else {
3629 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003630 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003631 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3632 }
3633}
3634
3635void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3636 struct kvm_async_pf *work)
3637{
3638 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3639 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3640}
3641
3642void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3643 struct kvm_async_pf *work)
3644{
3645 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3646 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3647}
3648
3649void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3650 struct kvm_async_pf *work)
3651{
3652 /* s390 will always inject the page directly */
3653}
3654
3655bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3656{
3657 /*
3658 * s390 will always inject the page directly,
3659 * but we still want check_async_completion to cleanup
3660 */
3661 return true;
3662}
3663
3664static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3665{
3666 hva_t hva;
3667 struct kvm_arch_async_pf arch;
3668 int rc;
3669
3670 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3671 return 0;
3672 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3673 vcpu->arch.pfault_compare)
3674 return 0;
3675 if (psw_extint_disabled(vcpu))
3676 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003677 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003678 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003679 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003680 return 0;
3681 if (!vcpu->arch.gmap->pfault_enabled)
3682 return 0;
3683
Heiko Carstens81480cc2014-01-01 16:36:07 +01003684 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3685 hva += current->thread.gmap_addr & ~PAGE_MASK;
3686 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003687 return 0;
3688
3689 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3690 return rc;
3691}
3692
Thomas Huth3fb4c402013-09-12 10:33:43 +02003693static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003694{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003695 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003696
Dominik Dingel3c038e62013-10-07 17:11:48 +02003697 /*
3698 * On s390 notifications for arriving pages will be delivered directly
3699 * to the guest but the house keeping for completed pfaults is
3700 * handled outside the worker.
3701 */
3702 kvm_check_async_pf_completion(vcpu);
3703
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003704 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3705 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003706
3707 if (need_resched())
3708 schedule();
3709
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003710 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003711 s390_handle_mcck();
3712
Jens Freimann79395032014-04-17 10:10:30 +02003713 if (!kvm_is_ucontrol(vcpu->kvm)) {
3714 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3715 if (rc)
3716 return rc;
3717 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003718
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003719 rc = kvm_s390_handle_requests(vcpu);
3720 if (rc)
3721 return rc;
3722
David Hildenbrand27291e22014-01-23 12:26:52 +01003723 if (guestdbg_enabled(vcpu)) {
3724 kvm_s390_backup_guest_per_regs(vcpu);
3725 kvm_s390_patch_guest_per_regs(vcpu);
3726 }
3727
Michael Mueller9f30f622019-01-31 09:52:44 +01003728 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
3729
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003730 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003731 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3732 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3733 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003734
Thomas Huth3fb4c402013-09-12 10:33:43 +02003735 return 0;
3736}
3737
Thomas Huth492d8642015-02-10 16:11:01 +01003738static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3739{
David Hildenbrand56317922016-01-12 17:37:58 +01003740 struct kvm_s390_pgm_info pgm_info = {
3741 .code = PGM_ADDRESSING,
3742 };
3743 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003744 int rc;
3745
3746 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3747 trace_kvm_s390_sie_fault(vcpu);
3748
3749 /*
3750 * We want to inject an addressing exception, which is defined as a
3751 * suppressing or terminating exception. However, since we came here
3752 * by a DAT access exception, the PSW still points to the faulting
3753 * instruction since DAT exceptions are nullifying. So we've got
3754 * to look up the current opcode to get the length of the instruction
3755 * to be able to forward the PSW.
3756 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003757 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003758 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003759 if (rc < 0) {
3760 return rc;
3761 } else if (rc) {
3762 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3763 * Forward by arbitrary ilc, injection will take care of
3764 * nullification if necessary.
3765 */
3766 pgm_info = vcpu->arch.pgm;
3767 ilen = 4;
3768 }
David Hildenbrand56317922016-01-12 17:37:58 +01003769 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3770 kvm_s390_forward_psw(vcpu, ilen);
3771 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003772}
3773
Thomas Huth3fb4c402013-09-12 10:33:43 +02003774static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3775{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003776 struct mcck_volatile_info *mcck_info;
3777 struct sie_page *sie_page;
3778
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003779 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3780 vcpu->arch.sie_block->icptcode);
3781 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3782
David Hildenbrand27291e22014-01-23 12:26:52 +01003783 if (guestdbg_enabled(vcpu))
3784 kvm_s390_restore_guest_per_regs(vcpu);
3785
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003786 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3787 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003788
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003789 if (exit_reason == -EINTR) {
3790 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3791 sie_page = container_of(vcpu->arch.sie_block,
3792 struct sie_page, sie_block);
3793 mcck_info = &sie_page->mcck_info;
3794 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3795 return 0;
3796 }
3797
David Hildenbrand71f116b2015-10-19 16:24:28 +02003798 if (vcpu->arch.sie_block->icptcode > 0) {
3799 int rc = kvm_handle_sie_intercept(vcpu);
3800
3801 if (rc != -EOPNOTSUPP)
3802 return rc;
3803 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3804 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3805 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3806 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3807 return -EREMOTE;
3808 } else if (exit_reason != -EFAULT) {
3809 vcpu->stat.exit_null++;
3810 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003811 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3812 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3813 vcpu->run->s390_ucontrol.trans_exc_code =
3814 current->thread.gmap_addr;
3815 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003816 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003817 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003818 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003819 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003820 if (kvm_arch_setup_async_pf(vcpu))
3821 return 0;
3822 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003823 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003824 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003825}
3826
3827static int __vcpu_run(struct kvm_vcpu *vcpu)
3828{
3829 int rc, exit_reason;
3830
Thomas Huth800c1062013-09-12 10:33:45 +02003831 /*
3832 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3833 * ning the guest), so that memslots (and other stuff) are protected
3834 */
3835 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3836
Thomas Hutha76ccff2013-09-12 10:33:44 +02003837 do {
3838 rc = vcpu_pre_run(vcpu);
3839 if (rc)
3840 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003841
Thomas Huth800c1062013-09-12 10:33:45 +02003842 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003843 /*
3844 * As PF_VCPU will be used in fault handler, between
3845 * guest_enter and guest_exit should be no uaccess.
3846 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003847 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003848 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003849 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003850 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003851 exit_reason = sie64a(vcpu->arch.sie_block,
3852 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003853 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003854 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003855 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003856 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003857 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003858
Thomas Hutha76ccff2013-09-12 10:33:44 +02003859 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003860 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003861
Thomas Huth800c1062013-09-12 10:33:45 +02003862 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003863 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003864}
3865
David Hildenbrandb028ee32014-07-17 10:47:43 +02003866static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3867{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003868 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003869 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003870
3871 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003872 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003873 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3874 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3875 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3876 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3877 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3878 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003879 /* some control register changes require a tlb flush */
3880 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003881 }
3882 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003883 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003884 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3885 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3886 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3887 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3888 }
3889 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3890 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3891 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3892 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003893 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3894 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003895 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003896 /*
3897 * If userspace sets the riccb (e.g. after migration) to a valid state,
3898 * we should enable RI here instead of doing the lazy enablement.
3899 */
3900 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003901 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003902 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003903 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003904 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003905 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003906 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003907 /*
3908 * If userspace sets the gscb (e.g. after migration) to non-zero,
3909 * we should enable GS here instead of doing the lazy enablement.
3910 */
3911 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3912 test_kvm_facility(vcpu->kvm, 133) &&
3913 gscb->gssm &&
3914 !vcpu->arch.gs_enabled) {
3915 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3916 vcpu->arch.sie_block->ecb |= ECB_GS;
3917 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3918 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003919 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003920 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3921 test_kvm_facility(vcpu->kvm, 82)) {
3922 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3923 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3924 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003925 save_access_regs(vcpu->arch.host_acrs);
3926 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003927 /* save host (userspace) fprs/vrs */
3928 save_fpu_regs();
3929 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3930 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3931 if (MACHINE_HAS_VX)
3932 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3933 else
3934 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3935 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3936 if (test_fp_ctl(current->thread.fpu.fpc))
3937 /* User space provided an invalid FPC, let's clear it */
3938 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003939 if (MACHINE_HAS_GS) {
3940 preempt_disable();
3941 __ctl_set_bit(2, 4);
3942 if (current->thread.gs_cb) {
3943 vcpu->arch.host_gscb = current->thread.gs_cb;
3944 save_gs_cb(vcpu->arch.host_gscb);
3945 }
3946 if (vcpu->arch.gs_enabled) {
3947 current->thread.gs_cb = (struct gs_cb *)
3948 &vcpu->run->s.regs.gscb;
3949 restore_gs_cb(current->thread.gs_cb);
3950 }
3951 preempt_enable();
3952 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003953 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Fan Zhang80cd8762016-08-15 04:53:22 +02003954
David Hildenbrandb028ee32014-07-17 10:47:43 +02003955 kvm_run->kvm_dirty_regs = 0;
3956}
3957
3958static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3959{
3960 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3961 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3962 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3963 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003964 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003965 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3966 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3967 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3968 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3969 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3970 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3971 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003972 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003973 save_access_regs(vcpu->run->s.regs.acrs);
3974 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003975 /* Save guest register state */
3976 save_fpu_regs();
3977 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3978 /* Restore will be done lazily at return */
3979 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3980 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003981 if (MACHINE_HAS_GS) {
3982 __ctl_set_bit(2, 4);
3983 if (vcpu->arch.gs_enabled)
3984 save_gs_cb(current->thread.gs_cb);
3985 preempt_disable();
3986 current->thread.gs_cb = vcpu->arch.host_gscb;
3987 restore_gs_cb(vcpu->arch.host_gscb);
3988 preempt_enable();
3989 if (!vcpu->arch.host_gscb)
3990 __ctl_clear_bit(2, 4);
3991 vcpu->arch.host_gscb = NULL;
3992 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003993 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02003994}
3995
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003996int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3997{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003998 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003999
Paolo Bonzini460df4c2017-02-08 11:50:15 +01004000 if (kvm_run->immediate_exit)
4001 return -EINTR;
4002
Christoffer Dallaccb7572017-12-04 21:35:25 +01004003 vcpu_load(vcpu);
4004
David Hildenbrand27291e22014-01-23 12:26:52 +01004005 if (guestdbg_exit_pending(vcpu)) {
4006 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004007 rc = 0;
4008 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004009 }
4010
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004011 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004012
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004013 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4014 kvm_s390_vcpu_start(vcpu);
4015 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004016 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004017 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004018 rc = -EINVAL;
4019 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004020 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004021
David Hildenbrandb028ee32014-07-17 10:47:43 +02004022 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004023 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004024
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004025 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004026 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004027
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004028 if (signal_pending(current) && !rc) {
4029 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004030 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004031 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004032
David Hildenbrand27291e22014-01-23 12:26:52 +01004033 if (guestdbg_exit_pending(vcpu) && !rc) {
4034 kvm_s390_prepare_debug_exit(vcpu);
4035 rc = 0;
4036 }
4037
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004038 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004039 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004040 rc = 0;
4041 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004042
David Hildenbranddb0758b2016-02-15 09:42:25 +01004043 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004044 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004045
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004046 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004047
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004048 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004049out:
4050 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004051 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004052}
4053
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004054/*
4055 * store status at address
4056 * we use have two special cases:
4057 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4058 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4059 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004060int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004061{
Carsten Otte092670c2011-07-24 10:48:22 +02004062 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004063 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004064 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004065 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004066 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004067
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004068 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004069 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4070 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004071 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004072 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004073 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4074 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004075 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004076 gpa = px;
4077 } else
4078 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004079
4080 /* manually convert vector registers if necessary */
4081 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004082 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004083 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4084 fprs, 128);
4085 } else {
4086 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004087 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004088 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004089 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004090 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004091 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004092 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004093 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004094 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004095 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004096 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004097 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004098 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004099 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004100 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004101 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004102 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004103 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004104 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004105 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004106 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004107 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004108 &vcpu->arch.sie_block->gcr, 128);
4109 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004110}
4111
Thomas Huthe8798922013-11-06 15:46:33 +01004112int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4113{
4114 /*
4115 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004116 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004117 * it into the save area
4118 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004119 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004120 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004121 save_access_regs(vcpu->run->s.regs.acrs);
4122
4123 return kvm_s390_store_status_unloaded(vcpu, addr);
4124}
4125
David Hildenbrand8ad35752014-03-14 11:00:21 +01004126static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4127{
4128 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004129 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004130}
4131
4132static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4133{
4134 unsigned int i;
4135 struct kvm_vcpu *vcpu;
4136
4137 kvm_for_each_vcpu(i, vcpu, kvm) {
4138 __disable_ibs_on_vcpu(vcpu);
4139 }
4140}
4141
4142static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4143{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004144 if (!sclp.has_ibs)
4145 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004146 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004147 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004148}
4149
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004150void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
4151{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004152 int i, online_vcpus, started_vcpus = 0;
4153
4154 if (!is_vcpu_stopped(vcpu))
4155 return;
4156
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004157 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004158 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004159 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004160 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4161
4162 for (i = 0; i < online_vcpus; i++) {
4163 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4164 started_vcpus++;
4165 }
4166
4167 if (started_vcpus == 0) {
4168 /* we're the only active VCPU -> speed it up */
4169 __enable_ibs_on_vcpu(vcpu);
4170 } else if (started_vcpus == 1) {
4171 /*
4172 * As we are starting a second VCPU, we have to disable
4173 * the IBS facility on all VCPUs to remove potentially
4174 * oustanding ENABLE requests.
4175 */
4176 __disable_ibs_on_all_vcpus(vcpu->kvm);
4177 }
4178
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004179 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004180 /*
4181 * Another VCPU might have used IBS while we were offline.
4182 * Let's play safe and flush the VCPU at startup.
4183 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004184 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004185 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004186 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004187}
4188
4189void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
4190{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004191 int i, online_vcpus, started_vcpus = 0;
4192 struct kvm_vcpu *started_vcpu = NULL;
4193
4194 if (is_vcpu_stopped(vcpu))
4195 return;
4196
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004197 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004198 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004199 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004200 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4201
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004202 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004203 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004204
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004205 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004206 __disable_ibs_on_vcpu(vcpu);
4207
4208 for (i = 0; i < online_vcpus; i++) {
4209 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4210 started_vcpus++;
4211 started_vcpu = vcpu->kvm->vcpus[i];
4212 }
4213 }
4214
4215 if (started_vcpus == 1) {
4216 /*
4217 * As we only have one VCPU left, we want to enable the
4218 * IBS facility for that VCPU to speed it up.
4219 */
4220 __enable_ibs_on_vcpu(started_vcpu);
4221 }
4222
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004223 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004224 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004225}
4226
Cornelia Huckd6712df2012-12-20 15:32:11 +01004227static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4228 struct kvm_enable_cap *cap)
4229{
4230 int r;
4231
4232 if (cap->flags)
4233 return -EINVAL;
4234
4235 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004236 case KVM_CAP_S390_CSS_SUPPORT:
4237 if (!vcpu->kvm->arch.css_support) {
4238 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004239 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004240 trace_kvm_s390_enable_css(vcpu->kvm);
4241 }
4242 r = 0;
4243 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004244 default:
4245 r = -EINVAL;
4246 break;
4247 }
4248 return r;
4249}
4250
Thomas Huth41408c282015-02-06 15:01:21 +01004251static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4252 struct kvm_s390_mem_op *mop)
4253{
4254 void __user *uaddr = (void __user *)mop->buf;
4255 void *tmpbuf = NULL;
4256 int r, srcu_idx;
4257 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4258 | KVM_S390_MEMOP_F_CHECK_ONLY;
4259
4260 if (mop->flags & ~supported_flags)
4261 return -EINVAL;
4262
4263 if (mop->size > MEM_OP_MAX_SIZE)
4264 return -E2BIG;
4265
4266 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4267 tmpbuf = vmalloc(mop->size);
4268 if (!tmpbuf)
4269 return -ENOMEM;
4270 }
4271
4272 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4273
4274 switch (mop->op) {
4275 case KVM_S390_MEMOP_LOGICAL_READ:
4276 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004277 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4278 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004279 break;
4280 }
4281 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4282 if (r == 0) {
4283 if (copy_to_user(uaddr, tmpbuf, mop->size))
4284 r = -EFAULT;
4285 }
4286 break;
4287 case KVM_S390_MEMOP_LOGICAL_WRITE:
4288 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004289 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4290 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004291 break;
4292 }
4293 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4294 r = -EFAULT;
4295 break;
4296 }
4297 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4298 break;
4299 default:
4300 r = -EINVAL;
4301 }
4302
4303 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4304
4305 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4306 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4307
4308 vfree(tmpbuf);
4309 return r;
4310}
4311
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004312long kvm_arch_vcpu_async_ioctl(struct file *filp,
4313 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004314{
4315 struct kvm_vcpu *vcpu = filp->private_data;
4316 void __user *argp = (void __user *)arg;
4317
Avi Kivity93736622010-05-13 12:35:17 +03004318 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004319 case KVM_S390_IRQ: {
4320 struct kvm_s390_irq s390irq;
4321
Jens Freimann47b43c52014-11-11 20:57:06 +01004322 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004323 return -EFAULT;
4324 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004325 }
Avi Kivity93736622010-05-13 12:35:17 +03004326 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004327 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02004328 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01004329
4330 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004331 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004332 if (s390int_to_s390irq(&s390int, &s390irq))
4333 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004334 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004335 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004336 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004337 return -ENOIOCTLCMD;
4338}
4339
4340long kvm_arch_vcpu_ioctl(struct file *filp,
4341 unsigned int ioctl, unsigned long arg)
4342{
4343 struct kvm_vcpu *vcpu = filp->private_data;
4344 void __user *argp = (void __user *)arg;
4345 int idx;
4346 long r;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004347
4348 vcpu_load(vcpu);
4349
4350 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004351 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004352 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004353 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004354 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004355 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004356 case KVM_S390_SET_INITIAL_PSW: {
4357 psw_t psw;
4358
Avi Kivitybc923cc2010-05-13 12:21:46 +03004359 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004360 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004361 break;
4362 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4363 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004364 }
4365 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03004366 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4367 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004368 case KVM_SET_ONE_REG:
4369 case KVM_GET_ONE_REG: {
4370 struct kvm_one_reg reg;
4371 r = -EFAULT;
4372 if (copy_from_user(&reg, argp, sizeof(reg)))
4373 break;
4374 if (ioctl == KVM_SET_ONE_REG)
4375 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4376 else
4377 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4378 break;
4379 }
Carsten Otte27e03932012-01-04 10:25:21 +01004380#ifdef CONFIG_KVM_S390_UCONTROL
4381 case KVM_S390_UCAS_MAP: {
4382 struct kvm_s390_ucas_mapping ucasmap;
4383
4384 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4385 r = -EFAULT;
4386 break;
4387 }
4388
4389 if (!kvm_is_ucontrol(vcpu->kvm)) {
4390 r = -EINVAL;
4391 break;
4392 }
4393
4394 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4395 ucasmap.vcpu_addr, ucasmap.length);
4396 break;
4397 }
4398 case KVM_S390_UCAS_UNMAP: {
4399 struct kvm_s390_ucas_mapping ucasmap;
4400
4401 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4402 r = -EFAULT;
4403 break;
4404 }
4405
4406 if (!kvm_is_ucontrol(vcpu->kvm)) {
4407 r = -EINVAL;
4408 break;
4409 }
4410
4411 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4412 ucasmap.length);
4413 break;
4414 }
4415#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004416 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004417 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004418 break;
4419 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004420 case KVM_ENABLE_CAP:
4421 {
4422 struct kvm_enable_cap cap;
4423 r = -EFAULT;
4424 if (copy_from_user(&cap, argp, sizeof(cap)))
4425 break;
4426 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4427 break;
4428 }
Thomas Huth41408c282015-02-06 15:01:21 +01004429 case KVM_S390_MEM_OP: {
4430 struct kvm_s390_mem_op mem_op;
4431
4432 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4433 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4434 else
4435 r = -EFAULT;
4436 break;
4437 }
Jens Freimann816c7662014-11-24 17:13:46 +01004438 case KVM_S390_SET_IRQ_STATE: {
4439 struct kvm_s390_irq_state irq_state;
4440
4441 r = -EFAULT;
4442 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4443 break;
4444 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4445 irq_state.len == 0 ||
4446 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4447 r = -EINVAL;
4448 break;
4449 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004450 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004451 r = kvm_s390_set_irq_state(vcpu,
4452 (void __user *) irq_state.buf,
4453 irq_state.len);
4454 break;
4455 }
4456 case KVM_S390_GET_IRQ_STATE: {
4457 struct kvm_s390_irq_state irq_state;
4458
4459 r = -EFAULT;
4460 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4461 break;
4462 if (irq_state.len == 0) {
4463 r = -EINVAL;
4464 break;
4465 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004466 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004467 r = kvm_s390_get_irq_state(vcpu,
4468 (__u8 __user *) irq_state.buf,
4469 irq_state.len);
4470 break;
4471 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004472 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004473 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004474 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004475
4476 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004477 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004478}
4479
Souptick Joarder1499fa82018-04-19 00:49:58 +05304480vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004481{
4482#ifdef CONFIG_KVM_S390_UCONTROL
4483 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4484 && (kvm_is_ucontrol(vcpu->kvm))) {
4485 vmf->page = virt_to_page(vcpu->arch.sie_block);
4486 get_page(vmf->page);
4487 return 0;
4488 }
4489#endif
4490 return VM_FAULT_SIGBUS;
4491}
4492
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05304493int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4494 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09004495{
4496 return 0;
4497}
4498
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004499/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004500int kvm_arch_prepare_memory_region(struct kvm *kvm,
4501 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004502 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004503 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004504{
Nick Wangdd2887e2013-03-25 17:22:57 +01004505 /* A few sanity checks. We can have memory slots which have to be
4506 located/ended at a segment boundary (1MB). The memory in userland is
4507 ok to be fragmented into various different vmas. It is okay to mmap()
4508 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004509
Carsten Otte598841c2011-07-24 10:48:21 +02004510 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004511 return -EINVAL;
4512
Carsten Otte598841c2011-07-24 10:48:21 +02004513 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004514 return -EINVAL;
4515
Dominik Dingela3a92c32014-12-01 17:24:42 +01004516 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4517 return -EINVAL;
4518
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004519 return 0;
4520}
4521
4522void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004523 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004524 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004525 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004526 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004527{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004528 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004529
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004530 switch (change) {
4531 case KVM_MR_DELETE:
4532 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4533 old->npages * PAGE_SIZE);
4534 break;
4535 case KVM_MR_MOVE:
4536 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4537 old->npages * PAGE_SIZE);
4538 if (rc)
4539 break;
4540 /* FALLTHROUGH */
4541 case KVM_MR_CREATE:
4542 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4543 mem->guest_phys_addr, mem->memory_size);
4544 break;
4545 case KVM_MR_FLAGS_ONLY:
4546 break;
4547 default:
4548 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
4549 }
Carsten Otte598841c2011-07-24 10:48:21 +02004550 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004551 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02004552 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004553}
4554
Alexander Yarygin60a37702016-04-01 15:38:57 +03004555static inline unsigned long nonhyp_mask(int i)
4556{
4557 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4558
4559 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4560}
4561
Christian Borntraeger3491caf2016-05-13 12:16:35 +02004562void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4563{
4564 vcpu->valid_wakeup = false;
4565}
4566
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004567static int __init kvm_s390_init(void)
4568{
Alexander Yarygin60a37702016-04-01 15:38:57 +03004569 int i;
4570
David Hildenbrand07197fd2015-01-30 16:01:38 +01004571 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004572 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01004573 return -ENODEV;
4574 }
4575
Janosch Franka4499382018-07-13 11:28:31 +01004576 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004577 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01004578 return -EINVAL;
4579 }
4580
Alexander Yarygin60a37702016-04-01 15:38:57 +03004581 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00004582 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03004583 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4584
Michael Mueller9d8d5782015-02-02 15:42:51 +01004585 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004586}
4587
4588static void __exit kvm_s390_exit(void)
4589{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004590 kvm_exit();
4591}
4592
4593module_init(kvm_s390_init);
4594module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02004595
4596/*
4597 * Enable autoloading of the kvm module.
4598 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4599 * since x86 takes a different approach.
4600 */
4601#include <linux/miscdevice.h>
4602MODULE_ALIAS_MISCDEV(KVM_MINOR);
4603MODULE_ALIAS("devname:kvm");