blob: ac6163c334d6bb5137c1a1a10396f12209cd161e [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Christian Borntraegera37cb072018-01-23 13:28:40 +01005 * Copyright IBM Corp. 2008, 2018
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010047#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010048#include "gaccess.h"
49
Cornelia Huck5786fff2012-07-23 17:20:29 +020050#define CREATE_TRACE_POINTS
51#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020052#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020053
Thomas Huth41408c282015-02-06 15:01:21 +010054#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010055#define LOCAL_IRQS 32
56#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010058
Heiko Carstensb0c632d2008-03-25 18:47:20 +010059#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Christian Borntraegerccc40c52018-03-08 12:48:05 +000060#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061
62struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020064 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010065 { "exit_validity", VCPU_STAT(exit_validity) },
66 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 { "exit_external_request", VCPU_STAT(exit_external_request) },
Christian Borntraegera5e0acea92018-02-23 07:57:33 +000068 { "exit_io_request", VCPU_STAT(exit_io_request) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010069 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030071 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020074 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010075 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020076 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020077 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
Christian Borntraeger8b905d22019-03-05 05:30:02 -050078 { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020079 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020080 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010082 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000084 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
85 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010086 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020087 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010088 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000089 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010090 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
91 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
92 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000093 { "deliver_program", VCPU_STAT(deliver_program) },
94 { "deliver_io", VCPU_STAT(deliver_io) },
QingFeng Hao32de0742018-03-02 11:56:47 +010095 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010096 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000097 { "inject_ckc", VCPU_STAT(inject_ckc) },
98 { "inject_cputm", VCPU_STAT(inject_cputm) },
99 { "inject_external_call", VCPU_STAT(inject_external_call) },
100 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
101 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
102 { "inject_io", VM_STAT(inject_io) },
103 { "inject_mchk", VCPU_STAT(inject_mchk) },
104 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
105 { "inject_program", VCPU_STAT(inject_program) },
106 { "inject_restart", VCPU_STAT(inject_restart) },
107 { "inject_service_signal", VM_STAT(inject_service_signal) },
108 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
109 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
110 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
111 { "inject_virtio", VM_STAT(inject_virtio) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100112 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
113 { "instruction_gs", VCPU_STAT(instruction_gs) },
114 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
115 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
116 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200117 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100118 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100119 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100120 { "instruction_sck", VCPU_STAT(instruction_sck) },
121 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100122 { "instruction_spx", VCPU_STAT(instruction_spx) },
123 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
124 { "instruction_stap", VCPU_STAT(instruction_stap) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100125 { "instruction_iske", VCPU_STAT(instruction_iske) },
126 { "instruction_ri", VCPU_STAT(instruction_ri) },
127 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
128 { "instruction_sske", VCPU_STAT(instruction_sske) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100129 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200130 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100131 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
132 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100133 { "instruction_tb", VCPU_STAT(instruction_tb) },
134 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200135 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100136 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200137 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200138 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100139 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100140 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200141 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100142 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200143 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
144 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100145 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200146 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
147 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500148 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100149 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
150 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
151 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200152 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
153 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
154 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100155 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
156 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
157 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
158 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
159 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
160 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100161 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100162 { NULL }
163};
164
Collin L. Walling8fa16962016-07-26 15:29:44 -0400165struct kvm_s390_tod_clock_ext {
166 __u8 epoch_idx;
167 __u64 tod;
168 __u8 reserved[7];
169} __packed;
170
David Hildenbranda411edf2016-02-02 15:41:22 +0100171/* allow nested virtualization in KVM (if enabled by user space) */
172static int nested;
173module_param(nested, int, S_IRUGO);
174MODULE_PARM_DESC(nested, "Nested virtualization support");
175
Janosch Franka4499382018-07-13 11:28:31 +0100176/* allow 1m huge page guest backing, if !nested */
177static int hpage;
178module_param(hpage, int, 0444);
179MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100180
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500181/* maximum percentage of steal time for polling. >100 is treated like 100 */
182static u8 halt_poll_max_steal = 10;
183module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000184MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500185
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000186/*
187 * For now we handle at most 16 double words as this is what the s390 base
188 * kernel handles and stores in the prefix page. If we ever need to go beyond
189 * this, this requires changes to code, but the external uapi can stay.
190 */
191#define SIZE_INTERNAL 16
192
193/*
194 * Base feature mask that defines default mask for facilities. Consists of the
195 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
196 */
197static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
198/*
199 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
200 * and defines the facilities that can be enabled via a cpu model.
201 */
202static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
203
204static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200205{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000206 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
207 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
208 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
209 sizeof(S390_lowcore.stfle_fac_list));
210
211 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200212}
213
David Hildenbrand15c97052015-03-19 17:36:43 +0100214/* available cpu features supported by kvm */
215static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200216/* available subfunctions indicated via query / "test bit" */
217static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100218
Michael Mueller9d8d5782015-02-02 15:42:51 +0100219static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200220static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200221debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100222
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100223/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200224int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100225{
226 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200227 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100228}
229
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100230static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
231 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200232
David Hildenbrand15757672018-02-07 12:46:45 +0100233static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
234{
235 u8 delta_idx = 0;
236
237 /*
238 * The TOD jumps by delta, we have to compensate this by adding
239 * -delta to the epoch.
240 */
241 delta = -delta;
242
243 /* sign-extension - we're adding to signed values below */
244 if ((s64)delta < 0)
245 delta_idx = -1;
246
247 scb->epoch += delta;
248 if (scb->ecd & ECD_MEF) {
249 scb->epdx += delta_idx;
250 if (scb->epoch < delta)
251 scb->epdx += 1;
252 }
253}
254
Fan Zhangfdf03652015-05-13 10:58:41 +0200255/*
256 * This callback is executed during stop_machine(). All CPUs are therefore
257 * temporarily stopped. In order not to change guest behavior, we have to
258 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
259 * so a CPU won't be stopped while calculating with the epoch.
260 */
261static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
262 void *v)
263{
264 struct kvm *kvm;
265 struct kvm_vcpu *vcpu;
266 int i;
267 unsigned long long *delta = v;
268
269 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200270 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100271 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
272 if (i == 0) {
273 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
274 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
275 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100276 if (vcpu->arch.cputm_enabled)
277 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100278 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100279 kvm_clock_sync_scb(vcpu->arch.vsie_block,
280 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200281 }
282 }
283 return NOTIFY_OK;
284}
285
286static struct notifier_block kvm_clock_notifier = {
287 .notifier_call = kvm_clock_sync,
288};
289
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100290int kvm_arch_hardware_setup(void)
291{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200292 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100293 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200294 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
295 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200296 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
297 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100298 return 0;
299}
300
301void kvm_arch_hardware_unsetup(void)
302{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100303 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200304 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200305 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
306 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100307}
308
David Hildenbrand22be5a132016-01-21 13:22:54 +0100309static void allow_cpu_feat(unsigned long nr)
310{
311 set_bit_inv(nr, kvm_s390_available_cpu_feat);
312}
313
David Hildenbrand0a763c72016-05-18 16:03:47 +0200314static inline int plo_test_bit(unsigned char nr)
315{
316 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100317 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200318
319 asm volatile(
320 /* Parameter registers are ignored for "test bit" */
321 " plo 0,0,0,0(0)\n"
322 " ipm %0\n"
323 " srl %0,28\n"
324 : "=d" (cc)
325 : "d" (r0)
326 : "cc");
327 return cc == 0;
328}
329
Christian Borntraegerd6681392019-02-20 03:04:07 -0500330static inline void __insn32_query(unsigned int opcode, u8 query[32])
331{
332 register unsigned long r0 asm("0") = 0; /* query function */
333 register unsigned long r1 asm("1") = (unsigned long) query;
334
335 asm volatile(
336 /* Parameter regs are ignored */
337 " .insn rrf,%[opc] << 16,2,4,6,0\n"
338 : "=m" (*query)
339 : "d" (r0), "a" (r1), [opc] "i" (opcode)
340 : "cc");
341}
342
Christian Borntraeger173aec22018-12-28 10:59:06 +0100343#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100344#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100345
David Hildenbrand22be5a132016-01-21 13:22:54 +0100346static void kvm_s390_cpu_feat_init(void)
347{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200348 int i;
349
350 for (i = 0; i < 256; ++i) {
351 if (plo_test_bit(i))
352 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
353 }
354
355 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400356 ptff(kvm_s390_available_subfunc.ptff,
357 sizeof(kvm_s390_available_subfunc.ptff),
358 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200359
360 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200361 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
362 kvm_s390_available_subfunc.kmac);
363 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
364 kvm_s390_available_subfunc.kmc);
365 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
366 kvm_s390_available_subfunc.km);
367 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
368 kvm_s390_available_subfunc.kimd);
369 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
370 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200371 }
372 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200373 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
374 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200375 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200376 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
377 kvm_s390_available_subfunc.kmctr);
378 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
379 kvm_s390_available_subfunc.kmf);
380 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
381 kvm_s390_available_subfunc.kmo);
382 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
383 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200384 }
385 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100386 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200387 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200388
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400389 if (test_facility(146)) /* MSA8 */
390 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
391 kvm_s390_available_subfunc.kma);
392
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100393 if (test_facility(155)) /* MSA9 */
394 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
395 kvm_s390_available_subfunc.kdsa);
396
Christian Borntraeger173aec22018-12-28 10:59:06 +0100397 if (test_facility(150)) /* SORTL */
398 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
399
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100400 if (test_facility(151)) /* DFLTCC */
401 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
402
David Hildenbrand22be5a132016-01-21 13:22:54 +0100403 if (MACHINE_HAS_ESOP)
404 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200405 /*
406 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
407 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
408 */
409 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100410 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200411 return;
412 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100413 if (sclp.has_64bscao)
414 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100415 if (sclp.has_siif)
416 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100417 if (sclp.has_gpere)
418 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100419 if (sclp.has_gsls)
420 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100421 if (sclp.has_ib)
422 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100423 if (sclp.has_cei)
424 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100425 if (sclp.has_ibs)
426 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500427 if (sclp.has_kss)
428 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200429 /*
430 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
431 * all skey handling functions read/set the skey from the PGSTE
432 * instead of the real storage key.
433 *
434 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
435 * pages being detected as preserved although they are resident.
436 *
437 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
438 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
439 *
440 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
441 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
442 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
443 *
444 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
445 * cannot easily shadow the SCA because of the ipte lock.
446 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100447}
448
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100449int kvm_arch_init(void *opaque)
450{
Michael Mueller308c3e62018-11-30 15:32:06 +0100451 int rc;
452
Christian Borntraeger78f26132015-07-22 15:50:58 +0200453 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
454 if (!kvm_s390_dbf)
455 return -ENOMEM;
456
457 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
Michael Mueller308c3e62018-11-30 15:32:06 +0100458 rc = -ENOMEM;
459 goto out_debug_unreg;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200460 }
461
David Hildenbrand22be5a132016-01-21 13:22:54 +0100462 kvm_s390_cpu_feat_init();
463
Cornelia Huck84877d92014-09-02 10:27:35 +0100464 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100465 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
466 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100467 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Michael Mueller308c3e62018-11-30 15:32:06 +0100468 goto out_debug_unreg;
469 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100470
471 rc = kvm_s390_gib_init(GAL_ISC);
472 if (rc)
473 goto out_gib_destroy;
474
Michael Mueller308c3e62018-11-30 15:32:06 +0100475 return 0;
476
Michael Muellerb1d1e762019-01-31 09:52:45 +0100477out_gib_destroy:
478 kvm_s390_gib_destroy();
Michael Mueller308c3e62018-11-30 15:32:06 +0100479out_debug_unreg:
480 debug_unregister(kvm_s390_dbf);
481 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100482}
483
Christian Borntraeger78f26132015-07-22 15:50:58 +0200484void kvm_arch_exit(void)
485{
Michael Mueller1282c212019-01-31 09:52:40 +0100486 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200487 debug_unregister(kvm_s390_dbf);
488}
489
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100490/* Section: device related */
491long kvm_arch_dev_ioctl(struct file *filp,
492 unsigned int ioctl, unsigned long arg)
493{
494 if (ioctl == KVM_S390_ENABLE_SIE)
495 return s390_enable_sie();
496 return -EINVAL;
497}
498
Alexander Graf784aa3d2014-07-14 18:27:35 +0200499int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100500{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100501 int r;
502
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200503 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100504 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200505 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100506 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100507#ifdef CONFIG_KVM_S390_UCONTROL
508 case KVM_CAP_S390_UCONTROL:
509#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200510 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100511 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200512 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100513 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100514 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100515 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200516 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200517 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200518 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200519 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100520 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100521 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200522 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100523 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400524 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100525 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200526 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200527 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100528 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100529 case KVM_CAP_S390_AIS_MIGRATION:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100530 r = 1;
531 break;
Janosch Franka4499382018-07-13 11:28:31 +0100532 case KVM_CAP_S390_HPAGE_1M:
533 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100534 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100535 r = 1;
536 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100537 case KVM_CAP_S390_MEM_OP:
538 r = MEM_OP_MAX_SIZE;
539 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200540 case KVM_CAP_NR_VCPUS:
541 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100542 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200543 if (!kvm_s390_use_sca_entries())
544 r = KVM_MAX_VCPUS;
545 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100546 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200547 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200548 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100549 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200550 break;
Eric Farman68c55752014-06-09 10:57:26 -0400551 case KVM_CAP_S390_VECTOR_REGISTERS:
552 r = MACHINE_HAS_VX;
553 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800554 case KVM_CAP_S390_RI:
555 r = test_facility(64);
556 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100557 case KVM_CAP_S390_GS:
558 r = test_facility(133);
559 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100560 case KVM_CAP_S390_BPB:
561 r = test_facility(82);
562 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200563 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100564 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200565 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100566 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100567}
568
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400569static void kvm_s390_sync_dirty_log(struct kvm *kvm,
Janosch Frank0959e162018-07-17 13:21:22 +0100570 struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400571{
Janosch Frank0959e162018-07-17 13:21:22 +0100572 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400573 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100574 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400575 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100576 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400577
Janosch Frank0959e162018-07-17 13:21:22 +0100578 /* Loop over all guest segments */
579 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400580 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100581 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
582 gaddr = gfn_to_gpa(cur_gfn);
583 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
584 if (kvm_is_error_hva(vmaddr))
585 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400586
Janosch Frank0959e162018-07-17 13:21:22 +0100587 bitmap_zero(bitmap, _PAGE_ENTRIES);
588 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
589 for (i = 0; i < _PAGE_ENTRIES; i++) {
590 if (test_bit(i, bitmap))
591 mark_page_dirty(kvm, cur_gfn + i);
592 }
593
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100594 if (fatal_signal_pending(current))
595 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100596 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400597 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400598}
599
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100600/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200601static void sca_del_vcpu(struct kvm_vcpu *vcpu);
602
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100603/*
604 * Get (and clear) the dirty memory log for a memory slot.
605 */
606int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
607 struct kvm_dirty_log *log)
608{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400609 int r;
610 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200611 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400612 struct kvm_memory_slot *memslot;
613 int is_dirty = 0;
614
Janosch Franke1e8a962017-02-02 16:39:31 +0100615 if (kvm_is_ucontrol(kvm))
616 return -EINVAL;
617
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400618 mutex_lock(&kvm->slots_lock);
619
620 r = -EINVAL;
621 if (log->slot >= KVM_USER_MEM_SLOTS)
622 goto out;
623
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200624 slots = kvm_memslots(kvm);
625 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400626 r = -ENOENT;
627 if (!memslot->dirty_bitmap)
628 goto out;
629
630 kvm_s390_sync_dirty_log(kvm, memslot);
631 r = kvm_get_dirty_log(kvm, log, &is_dirty);
632 if (r)
633 goto out;
634
635 /* Clear the dirty log */
636 if (is_dirty) {
637 n = kvm_dirty_bitmap_bytes(memslot);
638 memset(memslot->dirty_bitmap, 0, n);
639 }
640 r = 0;
641out:
642 mutex_unlock(&kvm->slots_lock);
643 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100644}
645
David Hildenbrand6502a342016-06-21 14:19:51 +0200646static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
647{
648 unsigned int i;
649 struct kvm_vcpu *vcpu;
650
651 kvm_for_each_vcpu(i, vcpu, kvm) {
652 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
653 }
654}
655
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100656int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200657{
658 int r;
659
660 if (cap->flags)
661 return -EINVAL;
662
663 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200664 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200665 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200666 kvm->arch.use_irqchip = 1;
667 r = 0;
668 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200669 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200670 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200671 kvm->arch.user_sigp = 1;
672 r = 0;
673 break;
Eric Farman68c55752014-06-09 10:57:26 -0400674 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100675 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200676 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100677 r = -EBUSY;
678 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100679 set_kvm_facility(kvm->arch.model.fac_mask, 129);
680 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200681 if (test_facility(134)) {
682 set_kvm_facility(kvm->arch.model.fac_mask, 134);
683 set_kvm_facility(kvm->arch.model.fac_list, 134);
684 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100685 if (test_facility(135)) {
686 set_kvm_facility(kvm->arch.model.fac_mask, 135);
687 set_kvm_facility(kvm->arch.model.fac_list, 135);
688 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100689 if (test_facility(148)) {
690 set_kvm_facility(kvm->arch.model.fac_mask, 148);
691 set_kvm_facility(kvm->arch.model.fac_list, 148);
692 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100693 if (test_facility(152)) {
694 set_kvm_facility(kvm->arch.model.fac_mask, 152);
695 set_kvm_facility(kvm->arch.model.fac_list, 152);
696 }
Michael Mueller18280d82015-03-16 16:05:41 +0100697 r = 0;
698 } else
699 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100700 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200701 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
702 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400703 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800704 case KVM_CAP_S390_RI:
705 r = -EINVAL;
706 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200707 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800708 r = -EBUSY;
709 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100710 set_kvm_facility(kvm->arch.model.fac_mask, 64);
711 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800712 r = 0;
713 }
714 mutex_unlock(&kvm->lock);
715 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
716 r ? "(not available)" : "(success)");
717 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100718 case KVM_CAP_S390_AIS:
719 mutex_lock(&kvm->lock);
720 if (kvm->created_vcpus) {
721 r = -EBUSY;
722 } else {
723 set_kvm_facility(kvm->arch.model.fac_mask, 72);
724 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100725 r = 0;
726 }
727 mutex_unlock(&kvm->lock);
728 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
729 r ? "(not available)" : "(success)");
730 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100731 case KVM_CAP_S390_GS:
732 r = -EINVAL;
733 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100734 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100735 r = -EBUSY;
736 } else if (test_facility(133)) {
737 set_kvm_facility(kvm->arch.model.fac_mask, 133);
738 set_kvm_facility(kvm->arch.model.fac_list, 133);
739 r = 0;
740 }
741 mutex_unlock(&kvm->lock);
742 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
743 r ? "(not available)" : "(success)");
744 break;
Janosch Franka4499382018-07-13 11:28:31 +0100745 case KVM_CAP_S390_HPAGE_1M:
746 mutex_lock(&kvm->lock);
747 if (kvm->created_vcpus)
748 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100749 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100750 r = -EINVAL;
751 else {
752 r = 0;
Janosch Frankdf88f312018-08-30 16:14:18 +0200753 down_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100754 kvm->mm->context.allow_gmap_hpage_1m = 1;
Janosch Frankdf88f312018-08-30 16:14:18 +0200755 up_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100756 /*
757 * We might have to create fake 4k page
758 * tables. To avoid that the hardware works on
759 * stale PGSTEs, we emulate these instructions.
760 */
761 kvm->arch.use_skf = 0;
762 kvm->arch.use_pfmfi = 0;
763 }
764 mutex_unlock(&kvm->lock);
765 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
766 r ? "(not available)" : "(success)");
767 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100768 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200769 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100770 kvm->arch.user_stsi = 1;
771 r = 0;
772 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200773 case KVM_CAP_S390_USER_INSTR0:
774 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
775 kvm->arch.user_instr0 = 1;
776 icpt_operexc_on_all_vcpus(kvm);
777 r = 0;
778 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200779 default:
780 r = -EINVAL;
781 break;
782 }
783 return r;
784}
785
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100786static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
787{
788 int ret;
789
790 switch (attr->attr) {
791 case KVM_S390_VM_MEM_LIMIT_SIZE:
792 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200793 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100794 kvm->arch.mem_limit);
795 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100796 ret = -EFAULT;
797 break;
798 default:
799 ret = -ENXIO;
800 break;
801 }
802 return ret;
803}
804
805static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200806{
807 int ret;
808 unsigned int idx;
809 switch (attr->attr) {
810 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100811 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100812 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200813 break;
814
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200815 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200816 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100817 if (kvm->created_vcpus)
818 ret = -EBUSY;
819 else if (kvm->mm->context.allow_gmap_hpage_1m)
820 ret = -EINVAL;
821 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200822 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100823 /* Not compatible with cmma. */
824 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200825 ret = 0;
826 }
827 mutex_unlock(&kvm->lock);
828 break;
829 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100830 ret = -ENXIO;
831 if (!sclp.has_cmma)
832 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200833 ret = -EINVAL;
834 if (!kvm->arch.use_cmma)
835 break;
836
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200837 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200838 mutex_lock(&kvm->lock);
839 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200840 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200841 srcu_read_unlock(&kvm->srcu, idx);
842 mutex_unlock(&kvm->lock);
843 ret = 0;
844 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100845 case KVM_S390_VM_MEM_LIMIT_SIZE: {
846 unsigned long new_limit;
847
848 if (kvm_is_ucontrol(kvm))
849 return -EINVAL;
850
851 if (get_user(new_limit, (u64 __user *)attr->addr))
852 return -EFAULT;
853
Dominik Dingela3a92c32014-12-01 17:24:42 +0100854 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
855 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100856 return -E2BIG;
857
Dominik Dingela3a92c32014-12-01 17:24:42 +0100858 if (!new_limit)
859 return -EINVAL;
860
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100861 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100862 if (new_limit != KVM_S390_NO_MEM_LIMIT)
863 new_limit -= 1;
864
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100865 ret = -EBUSY;
866 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200867 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100868 /* gmap_create will round the limit up */
869 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100870
871 if (!new) {
872 ret = -ENOMEM;
873 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100874 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100875 new->private = kvm;
876 kvm->arch.gmap = new;
877 ret = 0;
878 }
879 }
880 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100881 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
882 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
883 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100884 break;
885 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200886 default:
887 ret = -ENXIO;
888 break;
889 }
890 return ret;
891}
892
Tony Krowiaka374e892014-09-03 10:13:53 +0200893static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
894
Tony Krowiak20c922f2018-04-22 11:37:03 -0400895void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200896{
897 struct kvm_vcpu *vcpu;
898 int i;
899
Tony Krowiak20c922f2018-04-22 11:37:03 -0400900 kvm_s390_vcpu_block_all(kvm);
901
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400902 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400903 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400904 /* recreate the shadow crycb by leaving the VSIE handler */
905 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
906 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400907
908 kvm_s390_vcpu_unblock_all(kvm);
909}
910
911static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
912{
Tony Krowiaka374e892014-09-03 10:13:53 +0200913 mutex_lock(&kvm->lock);
914 switch (attr->attr) {
915 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200916 if (!test_kvm_facility(kvm, 76)) {
917 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400918 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200919 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200920 get_random_bytes(
921 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
922 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
923 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200924 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200925 break;
926 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200927 if (!test_kvm_facility(kvm, 76)) {
928 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400929 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200930 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200931 get_random_bytes(
932 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
933 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
934 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200935 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200936 break;
937 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200938 if (!test_kvm_facility(kvm, 76)) {
939 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400940 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200941 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200942 kvm->arch.crypto.aes_kw = 0;
943 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
944 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200945 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200946 break;
947 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200948 if (!test_kvm_facility(kvm, 76)) {
949 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400950 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200951 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200952 kvm->arch.crypto.dea_kw = 0;
953 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
954 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200955 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200956 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400957 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
958 if (!ap_instructions_available()) {
959 mutex_unlock(&kvm->lock);
960 return -EOPNOTSUPP;
961 }
962 kvm->arch.crypto.apie = 1;
963 break;
964 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
965 if (!ap_instructions_available()) {
966 mutex_unlock(&kvm->lock);
967 return -EOPNOTSUPP;
968 }
969 kvm->arch.crypto.apie = 0;
970 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200971 default:
972 mutex_unlock(&kvm->lock);
973 return -ENXIO;
974 }
975
Tony Krowiak20c922f2018-04-22 11:37:03 -0400976 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200977 mutex_unlock(&kvm->lock);
978 return 0;
979}
980
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200981static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
982{
983 int cx;
984 struct kvm_vcpu *vcpu;
985
986 kvm_for_each_vcpu(cx, vcpu, kvm)
987 kvm_s390_sync_request(req, vcpu);
988}
989
990/*
991 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100992 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200993 */
994static int kvm_s390_vm_start_migration(struct kvm *kvm)
995{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200996 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200997 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200998 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200999 int slotnr;
1000
1001 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001002 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001003 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001004 slots = kvm_memslots(kvm);
1005 if (!slots || !slots->used_slots)
1006 return -EINVAL;
1007
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001008 if (!kvm->arch.use_cmma) {
1009 kvm->arch.migration_mode = 1;
1010 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001011 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001012 /* mark all the pages in active slots as dirty */
1013 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1014 ms = slots->memslots + slotnr;
1015 /*
1016 * The second half of the bitmap is only used on x86,
1017 * and would be wasted otherwise, so we put it to good
1018 * use here to keep track of the state of the storage
1019 * attributes.
1020 */
1021 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1022 ram_pages += ms->npages;
1023 }
1024 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1025 kvm->arch.migration_mode = 1;
1026 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001027 return 0;
1028}
1029
1030/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001031 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001032 * kvm_s390_vm_start_migration.
1033 */
1034static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1035{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001036 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001037 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001038 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001039 kvm->arch.migration_mode = 0;
1040 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001041 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001042 return 0;
1043}
1044
1045static int kvm_s390_vm_set_migration(struct kvm *kvm,
1046 struct kvm_device_attr *attr)
1047{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001048 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001049
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001050 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001051 switch (attr->attr) {
1052 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001053 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001054 break;
1055 case KVM_S390_VM_MIGRATION_STOP:
1056 res = kvm_s390_vm_stop_migration(kvm);
1057 break;
1058 default:
1059 break;
1060 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001061 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001062
1063 return res;
1064}
1065
1066static int kvm_s390_vm_get_migration(struct kvm *kvm,
1067 struct kvm_device_attr *attr)
1068{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001069 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001070
1071 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1072 return -ENXIO;
1073
1074 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1075 return -EFAULT;
1076 return 0;
1077}
1078
Collin L. Walling8fa16962016-07-26 15:29:44 -04001079static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1080{
1081 struct kvm_s390_vm_tod_clock gtod;
1082
1083 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1084 return -EFAULT;
1085
David Hildenbrand0e7def52018-02-07 12:46:43 +01001086 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001087 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001088 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001089
1090 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1091 gtod.epoch_idx, gtod.tod);
1092
1093 return 0;
1094}
1095
Jason J. Herne72f25022014-11-25 09:46:02 -05001096static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1097{
1098 u8 gtod_high;
1099
1100 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1101 sizeof(gtod_high)))
1102 return -EFAULT;
1103
1104 if (gtod_high != 0)
1105 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001106 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001107
1108 return 0;
1109}
1110
1111static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1112{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001113 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001114
David Hildenbrand0e7def52018-02-07 12:46:43 +01001115 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1116 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001117 return -EFAULT;
1118
David Hildenbrand0e7def52018-02-07 12:46:43 +01001119 kvm_s390_set_tod_clock(kvm, &gtod);
1120 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001121 return 0;
1122}
1123
1124static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1125{
1126 int ret;
1127
1128 if (attr->flags)
1129 return -EINVAL;
1130
1131 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001132 case KVM_S390_VM_TOD_EXT:
1133 ret = kvm_s390_set_tod_ext(kvm, attr);
1134 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001135 case KVM_S390_VM_TOD_HIGH:
1136 ret = kvm_s390_set_tod_high(kvm, attr);
1137 break;
1138 case KVM_S390_VM_TOD_LOW:
1139 ret = kvm_s390_set_tod_low(kvm, attr);
1140 break;
1141 default:
1142 ret = -ENXIO;
1143 break;
1144 }
1145 return ret;
1146}
1147
David Hildenbrand33d1b272018-04-27 14:36:13 +02001148static void kvm_s390_get_tod_clock(struct kvm *kvm,
1149 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001150{
1151 struct kvm_s390_tod_clock_ext htod;
1152
1153 preempt_disable();
1154
1155 get_tod_clock_ext((char *)&htod);
1156
1157 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001158 gtod->epoch_idx = 0;
1159 if (test_kvm_facility(kvm, 139)) {
1160 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1161 if (gtod->tod < htod.tod)
1162 gtod->epoch_idx += 1;
1163 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001164
1165 preempt_enable();
1166}
1167
1168static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1169{
1170 struct kvm_s390_vm_tod_clock gtod;
1171
1172 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001173 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001174 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1175 return -EFAULT;
1176
1177 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1178 gtod.epoch_idx, gtod.tod);
1179 return 0;
1180}
1181
Jason J. Herne72f25022014-11-25 09:46:02 -05001182static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1183{
1184 u8 gtod_high = 0;
1185
1186 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1187 sizeof(gtod_high)))
1188 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001189 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001190
1191 return 0;
1192}
1193
1194static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1195{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001196 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001197
David Hildenbrand60417fc2015-09-29 16:20:36 +02001198 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001199 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1200 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001201 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001202
1203 return 0;
1204}
1205
1206static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1207{
1208 int ret;
1209
1210 if (attr->flags)
1211 return -EINVAL;
1212
1213 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001214 case KVM_S390_VM_TOD_EXT:
1215 ret = kvm_s390_get_tod_ext(kvm, attr);
1216 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001217 case KVM_S390_VM_TOD_HIGH:
1218 ret = kvm_s390_get_tod_high(kvm, attr);
1219 break;
1220 case KVM_S390_VM_TOD_LOW:
1221 ret = kvm_s390_get_tod_low(kvm, attr);
1222 break;
1223 default:
1224 ret = -ENXIO;
1225 break;
1226 }
1227 return ret;
1228}
1229
Michael Mueller658b6ed2015-02-02 15:49:35 +01001230static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1231{
1232 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001233 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001234 int ret = 0;
1235
1236 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001237 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001238 ret = -EBUSY;
1239 goto out;
1240 }
1241 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1242 if (!proc) {
1243 ret = -ENOMEM;
1244 goto out;
1245 }
1246 if (!copy_from_user(proc, (void __user *)attr->addr,
1247 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001248 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001249 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1250 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001251 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001252 if (proc->ibc > unblocked_ibc)
1253 kvm->arch.model.ibc = unblocked_ibc;
1254 else if (proc->ibc < lowest_ibc)
1255 kvm->arch.model.ibc = lowest_ibc;
1256 else
1257 kvm->arch.model.ibc = proc->ibc;
1258 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001259 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001260 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001261 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1262 kvm->arch.model.ibc,
1263 kvm->arch.model.cpuid);
1264 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1265 kvm->arch.model.fac_list[0],
1266 kvm->arch.model.fac_list[1],
1267 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001268 } else
1269 ret = -EFAULT;
1270 kfree(proc);
1271out:
1272 mutex_unlock(&kvm->lock);
1273 return ret;
1274}
1275
David Hildenbrand15c97052015-03-19 17:36:43 +01001276static int kvm_s390_set_processor_feat(struct kvm *kvm,
1277 struct kvm_device_attr *attr)
1278{
1279 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001280
1281 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1282 return -EFAULT;
1283 if (!bitmap_subset((unsigned long *) data.feat,
1284 kvm_s390_available_cpu_feat,
1285 KVM_S390_VM_CPU_FEAT_NR_BITS))
1286 return -EINVAL;
1287
1288 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001289 if (kvm->created_vcpus) {
1290 mutex_unlock(&kvm->lock);
1291 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001292 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001293 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1294 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001295 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001296 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1297 data.feat[0],
1298 data.feat[1],
1299 data.feat[2]);
1300 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001301}
1302
David Hildenbrand0a763c72016-05-18 16:03:47 +02001303static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1304 struct kvm_device_attr *attr)
1305{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001306 mutex_lock(&kvm->lock);
1307 if (kvm->created_vcpus) {
1308 mutex_unlock(&kvm->lock);
1309 return -EBUSY;
1310 }
1311
1312 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1313 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1314 mutex_unlock(&kvm->lock);
1315 return -EFAULT;
1316 }
1317 mutex_unlock(&kvm->lock);
1318
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001319 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1320 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1321 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1322 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1323 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1324 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1325 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1326 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1327 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1328 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1329 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1330 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1331 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1332 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1333 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1334 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1335 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1336 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1337 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1338 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1339 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1340 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1341 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1342 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1343 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1344 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1345 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1346 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1347 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1348 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1349 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1350 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1351 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1352 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1353 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1354 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1355 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1356 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1357 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1358 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1359 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1360 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1361 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1362 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001363 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1364 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1365 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001366 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1367 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1368 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1369 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1370 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001371 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1372 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1373 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1374 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1375 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001376
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001377 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001378}
1379
Michael Mueller658b6ed2015-02-02 15:49:35 +01001380static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1381{
1382 int ret = -ENXIO;
1383
1384 switch (attr->attr) {
1385 case KVM_S390_VM_CPU_PROCESSOR:
1386 ret = kvm_s390_set_processor(kvm, attr);
1387 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001388 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1389 ret = kvm_s390_set_processor_feat(kvm, attr);
1390 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001391 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1392 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1393 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001394 }
1395 return ret;
1396}
1397
1398static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1399{
1400 struct kvm_s390_vm_cpu_processor *proc;
1401 int ret = 0;
1402
1403 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1404 if (!proc) {
1405 ret = -ENOMEM;
1406 goto out;
1407 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001408 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001409 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001410 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1411 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001412 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1413 kvm->arch.model.ibc,
1414 kvm->arch.model.cpuid);
1415 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1416 kvm->arch.model.fac_list[0],
1417 kvm->arch.model.fac_list[1],
1418 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001419 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1420 ret = -EFAULT;
1421 kfree(proc);
1422out:
1423 return ret;
1424}
1425
1426static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1427{
1428 struct kvm_s390_vm_cpu_machine *mach;
1429 int ret = 0;
1430
1431 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1432 if (!mach) {
1433 ret = -ENOMEM;
1434 goto out;
1435 }
1436 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001437 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001438 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001439 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001440 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001441 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001442 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1443 kvm->arch.model.ibc,
1444 kvm->arch.model.cpuid);
1445 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1446 mach->fac_mask[0],
1447 mach->fac_mask[1],
1448 mach->fac_mask[2]);
1449 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1450 mach->fac_list[0],
1451 mach->fac_list[1],
1452 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001453 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1454 ret = -EFAULT;
1455 kfree(mach);
1456out:
1457 return ret;
1458}
1459
David Hildenbrand15c97052015-03-19 17:36:43 +01001460static int kvm_s390_get_processor_feat(struct kvm *kvm,
1461 struct kvm_device_attr *attr)
1462{
1463 struct kvm_s390_vm_cpu_feat data;
1464
1465 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1466 KVM_S390_VM_CPU_FEAT_NR_BITS);
1467 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1468 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001469 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1470 data.feat[0],
1471 data.feat[1],
1472 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001473 return 0;
1474}
1475
1476static int kvm_s390_get_machine_feat(struct kvm *kvm,
1477 struct kvm_device_attr *attr)
1478{
1479 struct kvm_s390_vm_cpu_feat data;
1480
1481 bitmap_copy((unsigned long *) data.feat,
1482 kvm_s390_available_cpu_feat,
1483 KVM_S390_VM_CPU_FEAT_NR_BITS);
1484 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1485 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001486 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1487 data.feat[0],
1488 data.feat[1],
1489 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001490 return 0;
1491}
1492
David Hildenbrand0a763c72016-05-18 16:03:47 +02001493static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1494 struct kvm_device_attr *attr)
1495{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001496 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1497 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1498 return -EFAULT;
1499
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001500 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1501 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1502 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1503 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1504 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1505 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1506 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1507 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1508 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1509 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1510 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1511 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1512 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1513 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1514 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1515 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1516 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1517 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1518 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1519 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1520 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1521 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1522 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1523 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1524 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1525 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1526 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1527 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1528 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1529 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1530 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1531 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1532 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1533 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1534 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1535 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1536 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1537 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1538 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1539 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1540 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1541 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1542 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001544 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1545 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1546 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001547 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1548 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1549 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1550 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1551 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001552 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1553 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1554 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1555 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1556 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001557
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001558 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001559}
1560
1561static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1562 struct kvm_device_attr *attr)
1563{
1564 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1565 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1566 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001567
1568 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1569 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1570 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1571 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1572 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1573 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1574 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1575 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1576 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1577 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1578 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1579 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1580 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1581 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1582 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1583 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1584 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1585 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1586 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1587 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1588 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1589 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1590 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1591 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1592 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1593 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1594 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1595 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1596 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1597 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1598 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1599 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1600 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1601 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1602 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1603 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1604 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1605 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1606 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1607 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1608 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1609 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1610 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1611 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001612 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1613 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1614 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001615 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1616 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1617 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1618 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1619 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001620 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1621 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1622 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1623 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1624 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001625
David Hildenbrand0a763c72016-05-18 16:03:47 +02001626 return 0;
1627}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001628
Michael Mueller658b6ed2015-02-02 15:49:35 +01001629static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1630{
1631 int ret = -ENXIO;
1632
1633 switch (attr->attr) {
1634 case KVM_S390_VM_CPU_PROCESSOR:
1635 ret = kvm_s390_get_processor(kvm, attr);
1636 break;
1637 case KVM_S390_VM_CPU_MACHINE:
1638 ret = kvm_s390_get_machine(kvm, attr);
1639 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001640 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1641 ret = kvm_s390_get_processor_feat(kvm, attr);
1642 break;
1643 case KVM_S390_VM_CPU_MACHINE_FEAT:
1644 ret = kvm_s390_get_machine_feat(kvm, attr);
1645 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001646 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1647 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1648 break;
1649 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1650 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1651 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001652 }
1653 return ret;
1654}
1655
Dominik Dingelf2061652014-04-09 13:13:00 +02001656static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1657{
1658 int ret;
1659
1660 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001661 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001662 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001663 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001664 case KVM_S390_VM_TOD:
1665 ret = kvm_s390_set_tod(kvm, attr);
1666 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001667 case KVM_S390_VM_CPU_MODEL:
1668 ret = kvm_s390_set_cpu_model(kvm, attr);
1669 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001670 case KVM_S390_VM_CRYPTO:
1671 ret = kvm_s390_vm_set_crypto(kvm, attr);
1672 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001673 case KVM_S390_VM_MIGRATION:
1674 ret = kvm_s390_vm_set_migration(kvm, attr);
1675 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001676 default:
1677 ret = -ENXIO;
1678 break;
1679 }
1680
1681 return ret;
1682}
1683
1684static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1685{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001686 int ret;
1687
1688 switch (attr->group) {
1689 case KVM_S390_VM_MEM_CTRL:
1690 ret = kvm_s390_get_mem_control(kvm, attr);
1691 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001692 case KVM_S390_VM_TOD:
1693 ret = kvm_s390_get_tod(kvm, attr);
1694 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001695 case KVM_S390_VM_CPU_MODEL:
1696 ret = kvm_s390_get_cpu_model(kvm, attr);
1697 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001698 case KVM_S390_VM_MIGRATION:
1699 ret = kvm_s390_vm_get_migration(kvm, attr);
1700 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001701 default:
1702 ret = -ENXIO;
1703 break;
1704 }
1705
1706 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001707}
1708
1709static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1710{
1711 int ret;
1712
1713 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001714 case KVM_S390_VM_MEM_CTRL:
1715 switch (attr->attr) {
1716 case KVM_S390_VM_MEM_ENABLE_CMMA:
1717 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001718 ret = sclp.has_cmma ? 0 : -ENXIO;
1719 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001720 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001721 ret = 0;
1722 break;
1723 default:
1724 ret = -ENXIO;
1725 break;
1726 }
1727 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001728 case KVM_S390_VM_TOD:
1729 switch (attr->attr) {
1730 case KVM_S390_VM_TOD_LOW:
1731 case KVM_S390_VM_TOD_HIGH:
1732 ret = 0;
1733 break;
1734 default:
1735 ret = -ENXIO;
1736 break;
1737 }
1738 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001739 case KVM_S390_VM_CPU_MODEL:
1740 switch (attr->attr) {
1741 case KVM_S390_VM_CPU_PROCESSOR:
1742 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001743 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1744 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001745 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001746 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001747 ret = 0;
1748 break;
1749 default:
1750 ret = -ENXIO;
1751 break;
1752 }
1753 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001754 case KVM_S390_VM_CRYPTO:
1755 switch (attr->attr) {
1756 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1757 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1758 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1759 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1760 ret = 0;
1761 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001762 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1763 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1764 ret = ap_instructions_available() ? 0 : -ENXIO;
1765 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001766 default:
1767 ret = -ENXIO;
1768 break;
1769 }
1770 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001771 case KVM_S390_VM_MIGRATION:
1772 ret = 0;
1773 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001774 default:
1775 ret = -ENXIO;
1776 break;
1777 }
1778
1779 return ret;
1780}
1781
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001782static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1783{
1784 uint8_t *keys;
1785 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001786 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001787
1788 if (args->flags != 0)
1789 return -EINVAL;
1790
1791 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001792 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001793 return KVM_S390_GET_SKEYS_NONE;
1794
1795 /* Enforce sane limit on memory allocation */
1796 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1797 return -EINVAL;
1798
Michal Hocko752ade62017-05-08 15:57:27 -07001799 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001800 if (!keys)
1801 return -ENOMEM;
1802
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001803 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001804 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001805 for (i = 0; i < args->count; i++) {
1806 hva = gfn_to_hva(kvm, args->start_gfn + i);
1807 if (kvm_is_error_hva(hva)) {
1808 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001809 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001810 }
1811
David Hildenbrand154c8c12016-05-09 11:22:34 +02001812 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1813 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001814 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001815 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001816 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001817 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001818
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001819 if (!r) {
1820 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1821 sizeof(uint8_t) * args->count);
1822 if (r)
1823 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001824 }
1825
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001826 kvfree(keys);
1827 return r;
1828}
1829
1830static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1831{
1832 uint8_t *keys;
1833 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001834 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001835 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001836
1837 if (args->flags != 0)
1838 return -EINVAL;
1839
1840 /* Enforce sane limit on memory allocation */
1841 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1842 return -EINVAL;
1843
Michal Hocko752ade62017-05-08 15:57:27 -07001844 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001845 if (!keys)
1846 return -ENOMEM;
1847
1848 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1849 sizeof(uint8_t) * args->count);
1850 if (r) {
1851 r = -EFAULT;
1852 goto out;
1853 }
1854
1855 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001856 r = s390_enable_skey();
1857 if (r)
1858 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001859
Janosch Frankbd096f62018-07-18 13:40:22 +01001860 i = 0;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001861 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001862 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001863 while (i < args->count) {
1864 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001865 hva = gfn_to_hva(kvm, args->start_gfn + i);
1866 if (kvm_is_error_hva(hva)) {
1867 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001868 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001869 }
1870
1871 /* Lowest order bit is reserved */
1872 if (keys[i] & 0x01) {
1873 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001874 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001875 }
1876
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001877 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001878 if (r) {
1879 r = fixup_user_fault(current, current->mm, hva,
1880 FAULT_FLAG_WRITE, &unlocked);
1881 if (r)
1882 break;
1883 }
1884 if (!r)
1885 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001886 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001887 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001888 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001889out:
1890 kvfree(keys);
1891 return r;
1892}
1893
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001894/*
1895 * Base address and length must be sent at the start of each block, therefore
1896 * it's cheaper to send some clean data, as long as it's less than the size of
1897 * two longs.
1898 */
1899#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1900/* for consistency */
1901#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1902
1903/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001904 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1905 * address falls in a hole. In that case the index of one of the memslots
1906 * bordering the hole is returned.
1907 */
1908static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1909{
1910 int start = 0, end = slots->used_slots;
1911 int slot = atomic_read(&slots->lru_slot);
1912 struct kvm_memory_slot *memslots = slots->memslots;
1913
1914 if (gfn >= memslots[slot].base_gfn &&
1915 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1916 return slot;
1917
1918 while (start < end) {
1919 slot = start + (end - start) / 2;
1920
1921 if (gfn >= memslots[slot].base_gfn)
1922 end = slot;
1923 else
1924 start = slot + 1;
1925 }
1926
1927 if (gfn >= memslots[start].base_gfn &&
1928 gfn < memslots[start].base_gfn + memslots[start].npages) {
1929 atomic_set(&slots->lru_slot, start);
1930 }
1931
1932 return start;
1933}
1934
1935static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1936 u8 *res, unsigned long bufsize)
1937{
1938 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1939
1940 args->count = 0;
1941 while (args->count < bufsize) {
1942 hva = gfn_to_hva(kvm, cur_gfn);
1943 /*
1944 * We return an error if the first value was invalid, but we
1945 * return successfully if at least one value was copied.
1946 */
1947 if (kvm_is_error_hva(hva))
1948 return args->count ? 0 : -EFAULT;
1949 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1950 pgstev = 0;
1951 res[args->count++] = (pgstev >> 24) & 0x43;
1952 cur_gfn++;
1953 }
1954
1955 return 0;
1956}
1957
1958static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1959 unsigned long cur_gfn)
1960{
1961 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1962 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1963 unsigned long ofs = cur_gfn - ms->base_gfn;
1964
1965 if (ms->base_gfn + ms->npages <= cur_gfn) {
1966 slotidx--;
1967 /* If we are above the highest slot, wrap around */
1968 if (slotidx < 0)
1969 slotidx = slots->used_slots - 1;
1970
1971 ms = slots->memslots + slotidx;
1972 ofs = 0;
1973 }
1974 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1975 while ((slotidx > 0) && (ofs >= ms->npages)) {
1976 slotidx--;
1977 ms = slots->memslots + slotidx;
1978 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1979 }
1980 return ms->base_gfn + ofs;
1981}
1982
1983static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1984 u8 *res, unsigned long bufsize)
1985{
1986 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
1987 struct kvm_memslots *slots = kvm_memslots(kvm);
1988 struct kvm_memory_slot *ms;
1989
1990 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
1991 ms = gfn_to_memslot(kvm, cur_gfn);
1992 args->count = 0;
1993 args->start_gfn = cur_gfn;
1994 if (!ms)
1995 return 0;
1996 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
1997 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
1998
1999 while (args->count < bufsize) {
2000 hva = gfn_to_hva(kvm, cur_gfn);
2001 if (kvm_is_error_hva(hva))
2002 return 0;
2003 /* Decrement only if we actually flipped the bit to 0 */
2004 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2005 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2006 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2007 pgstev = 0;
2008 /* Save the value */
2009 res[args->count++] = (pgstev >> 24) & 0x43;
2010 /* If the next bit is too far away, stop. */
2011 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2012 return 0;
2013 /* If we reached the previous "next", find the next one */
2014 if (cur_gfn == next_gfn)
2015 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2016 /* Reached the end of memory or of the buffer, stop */
2017 if ((next_gfn >= mem_end) ||
2018 (next_gfn - args->start_gfn >= bufsize))
2019 return 0;
2020 cur_gfn++;
2021 /* Reached the end of the current memslot, take the next one. */
2022 if (cur_gfn - ms->base_gfn >= ms->npages) {
2023 ms = gfn_to_memslot(kvm, cur_gfn);
2024 if (!ms)
2025 return 0;
2026 }
2027 }
2028 return 0;
2029}
2030
2031/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002032 * This function searches for the next page with dirty CMMA attributes, and
2033 * saves the attributes in the buffer up to either the end of the buffer or
2034 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2035 * no trailing clean bytes are saved.
2036 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2037 * output buffer will indicate 0 as length.
2038 */
2039static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2040 struct kvm_s390_cmma_log *args)
2041{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002042 unsigned long bufsize;
2043 int srcu_idx, peek, ret;
2044 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002045
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002046 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002047 return -ENXIO;
2048 /* Invalid/unsupported flags were specified */
2049 if (args->flags & ~KVM_S390_CMMA_PEEK)
2050 return -EINVAL;
2051 /* Migration mode query, and we are not doing a migration */
2052 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002053 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002054 return -EINVAL;
2055 /* CMMA is disabled or was not used, or the buffer has length zero */
2056 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002057 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002058 memset(args, 0, sizeof(*args));
2059 return 0;
2060 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002061 /* We are not peeking, and there are no dirty pages */
2062 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2063 memset(args, 0, sizeof(*args));
2064 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002065 }
2066
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002067 values = vmalloc(bufsize);
2068 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002069 return -ENOMEM;
2070
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002071 down_read(&kvm->mm->mmap_sem);
2072 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002073 if (peek)
2074 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2075 else
2076 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002077 srcu_read_unlock(&kvm->srcu, srcu_idx);
2078 up_read(&kvm->mm->mmap_sem);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002079
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002080 if (kvm->arch.migration_mode)
2081 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2082 else
2083 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002084
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002085 if (copy_to_user((void __user *)args->values, values, args->count))
2086 ret = -EFAULT;
2087
2088 vfree(values);
2089 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002090}
2091
2092/*
2093 * This function sets the CMMA attributes for the given pages. If the input
2094 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002095 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002096 */
2097static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2098 const struct kvm_s390_cmma_log *args)
2099{
2100 unsigned long hva, mask, pgstev, i;
2101 uint8_t *bits;
2102 int srcu_idx, r = 0;
2103
2104 mask = args->mask;
2105
2106 if (!kvm->arch.use_cmma)
2107 return -ENXIO;
2108 /* invalid/unsupported flags */
2109 if (args->flags != 0)
2110 return -EINVAL;
2111 /* Enforce sane limit on memory allocation */
2112 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2113 return -EINVAL;
2114 /* Nothing to do */
2115 if (args->count == 0)
2116 return 0;
2117
Kees Cook42bc47b2018-06-12 14:27:11 -07002118 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002119 if (!bits)
2120 return -ENOMEM;
2121
2122 r = copy_from_user(bits, (void __user *)args->values, args->count);
2123 if (r) {
2124 r = -EFAULT;
2125 goto out;
2126 }
2127
2128 down_read(&kvm->mm->mmap_sem);
2129 srcu_idx = srcu_read_lock(&kvm->srcu);
2130 for (i = 0; i < args->count; i++) {
2131 hva = gfn_to_hva(kvm, args->start_gfn + i);
2132 if (kvm_is_error_hva(hva)) {
2133 r = -EFAULT;
2134 break;
2135 }
2136
2137 pgstev = bits[i];
2138 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002139 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002140 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2141 }
2142 srcu_read_unlock(&kvm->srcu, srcu_idx);
2143 up_read(&kvm->mm->mmap_sem);
2144
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002145 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002146 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002147 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002148 up_write(&kvm->mm->mmap_sem);
2149 }
2150out:
2151 vfree(bits);
2152 return r;
2153}
2154
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002155long kvm_arch_vm_ioctl(struct file *filp,
2156 unsigned int ioctl, unsigned long arg)
2157{
2158 struct kvm *kvm = filp->private_data;
2159 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002160 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002161 int r;
2162
2163 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002164 case KVM_S390_INTERRUPT: {
2165 struct kvm_s390_interrupt s390int;
2166
2167 r = -EFAULT;
2168 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2169 break;
2170 r = kvm_s390_inject_vm(kvm, &s390int);
2171 break;
2172 }
Cornelia Huck84223592013-07-15 13:36:01 +02002173 case KVM_CREATE_IRQCHIP: {
2174 struct kvm_irq_routing_entry routing;
2175
2176 r = -EINVAL;
2177 if (kvm->arch.use_irqchip) {
2178 /* Set up dummy routing. */
2179 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002180 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002181 }
2182 break;
2183 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002184 case KVM_SET_DEVICE_ATTR: {
2185 r = -EFAULT;
2186 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2187 break;
2188 r = kvm_s390_vm_set_attr(kvm, &attr);
2189 break;
2190 }
2191 case KVM_GET_DEVICE_ATTR: {
2192 r = -EFAULT;
2193 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2194 break;
2195 r = kvm_s390_vm_get_attr(kvm, &attr);
2196 break;
2197 }
2198 case KVM_HAS_DEVICE_ATTR: {
2199 r = -EFAULT;
2200 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2201 break;
2202 r = kvm_s390_vm_has_attr(kvm, &attr);
2203 break;
2204 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002205 case KVM_S390_GET_SKEYS: {
2206 struct kvm_s390_skeys args;
2207
2208 r = -EFAULT;
2209 if (copy_from_user(&args, argp,
2210 sizeof(struct kvm_s390_skeys)))
2211 break;
2212 r = kvm_s390_get_skeys(kvm, &args);
2213 break;
2214 }
2215 case KVM_S390_SET_SKEYS: {
2216 struct kvm_s390_skeys args;
2217
2218 r = -EFAULT;
2219 if (copy_from_user(&args, argp,
2220 sizeof(struct kvm_s390_skeys)))
2221 break;
2222 r = kvm_s390_set_skeys(kvm, &args);
2223 break;
2224 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002225 case KVM_S390_GET_CMMA_BITS: {
2226 struct kvm_s390_cmma_log args;
2227
2228 r = -EFAULT;
2229 if (copy_from_user(&args, argp, sizeof(args)))
2230 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002231 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002232 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002233 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002234 if (!r) {
2235 r = copy_to_user(argp, &args, sizeof(args));
2236 if (r)
2237 r = -EFAULT;
2238 }
2239 break;
2240 }
2241 case KVM_S390_SET_CMMA_BITS: {
2242 struct kvm_s390_cmma_log args;
2243
2244 r = -EFAULT;
2245 if (copy_from_user(&args, argp, sizeof(args)))
2246 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002247 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002248 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002249 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002250 break;
2251 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002252 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002253 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002254 }
2255
2256 return r;
2257}
2258
Tony Krowiak45c9b472015-01-13 11:33:26 -05002259static int kvm_s390_apxa_installed(void)
2260{
Tony Krowiake585b242018-09-25 19:16:18 -04002261 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002262
Tony Krowiake585b242018-09-25 19:16:18 -04002263 if (ap_instructions_available()) {
2264 if (ap_qci(&info) == 0)
2265 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002266 }
2267
2268 return 0;
2269}
2270
Tony Krowiake585b242018-09-25 19:16:18 -04002271/*
2272 * The format of the crypto control block (CRYCB) is specified in the 3 low
2273 * order bits of the CRYCB designation (CRYCBD) field as follows:
2274 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2275 * AP extended addressing (APXA) facility are installed.
2276 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2277 * Format 2: Both the APXA and MSAX3 facilities are installed
2278 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002279static void kvm_s390_set_crycb_format(struct kvm *kvm)
2280{
2281 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2282
Tony Krowiake585b242018-09-25 19:16:18 -04002283 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2284 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2285
2286 /* Check whether MSAX3 is installed */
2287 if (!test_kvm_facility(kvm, 76))
2288 return;
2289
Tony Krowiak45c9b472015-01-13 11:33:26 -05002290 if (kvm_s390_apxa_installed())
2291 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2292 else
2293 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2294}
2295
Pierre Morel0e237e42018-10-05 10:31:09 +02002296void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2297 unsigned long *aqm, unsigned long *adm)
2298{
2299 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2300
2301 mutex_lock(&kvm->lock);
2302 kvm_s390_vcpu_block_all(kvm);
2303
2304 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2305 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2306 memcpy(crycb->apcb1.apm, apm, 32);
2307 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2308 apm[0], apm[1], apm[2], apm[3]);
2309 memcpy(crycb->apcb1.aqm, aqm, 32);
2310 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2311 aqm[0], aqm[1], aqm[2], aqm[3]);
2312 memcpy(crycb->apcb1.adm, adm, 32);
2313 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2314 adm[0], adm[1], adm[2], adm[3]);
2315 break;
2316 case CRYCB_FORMAT1:
2317 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2318 memcpy(crycb->apcb0.apm, apm, 8);
2319 memcpy(crycb->apcb0.aqm, aqm, 2);
2320 memcpy(crycb->apcb0.adm, adm, 2);
2321 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2322 apm[0], *((unsigned short *)aqm),
2323 *((unsigned short *)adm));
2324 break;
2325 default: /* Can not happen */
2326 break;
2327 }
2328
2329 /* recreate the shadow crycb for each vcpu */
2330 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2331 kvm_s390_vcpu_unblock_all(kvm);
2332 mutex_unlock(&kvm->lock);
2333}
2334EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2335
Tony Krowiak421045982018-09-25 19:16:25 -04002336void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2337{
2338 mutex_lock(&kvm->lock);
2339 kvm_s390_vcpu_block_all(kvm);
2340
2341 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2342 sizeof(kvm->arch.crypto.crycb->apcb0));
2343 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2344 sizeof(kvm->arch.crypto.crycb->apcb1));
2345
Pierre Morel0e237e42018-10-05 10:31:09 +02002346 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002347 /* recreate the shadow crycb for each vcpu */
2348 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002349 kvm_s390_vcpu_unblock_all(kvm);
2350 mutex_unlock(&kvm->lock);
2351}
2352EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2353
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002354static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002355{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002356 struct cpuid cpuid;
2357
2358 get_cpu_id(&cpuid);
2359 cpuid.version = 0xff;
2360 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002361}
2362
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002363static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002364{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002365 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002366 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002367
Tony Krowiake585b242018-09-25 19:16:18 -04002368 if (!test_kvm_facility(kvm, 76))
2369 return;
2370
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002371 /* Enable AES/DEA protected key functions by default */
2372 kvm->arch.crypto.aes_kw = 1;
2373 kvm->arch.crypto.dea_kw = 1;
2374 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2375 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2376 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2377 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002378}
2379
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002380static void sca_dispose(struct kvm *kvm)
2381{
2382 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002383 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002384 else
2385 free_page((unsigned long)(kvm->arch.sca));
2386 kvm->arch.sca = NULL;
2387}
2388
Carsten Ottee08b9632012-01-04 10:25:20 +01002389int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002390{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002391 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002392 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002393 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002394 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002395
Carsten Ottee08b9632012-01-04 10:25:20 +01002396 rc = -EINVAL;
2397#ifdef CONFIG_KVM_S390_UCONTROL
2398 if (type & ~KVM_VM_S390_UCONTROL)
2399 goto out_err;
2400 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2401 goto out_err;
2402#else
2403 if (type)
2404 goto out_err;
2405#endif
2406
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002407 rc = s390_enable_sie();
2408 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002409 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002410
Carsten Otteb2904112011-10-18 12:27:13 +02002411 rc = -ENOMEM;
2412
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002413 if (!sclp.has_64bscao)
2414 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002415 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002416 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002417 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002418 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002419 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002420 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002421 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002422 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002423 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002424 kvm->arch.sca = (struct bsca_block *)
2425 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002426 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002427
2428 sprintf(debug_name, "kvm-%u", current->pid);
2429
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002430 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002431 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002432 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002433
Michael Mueller19114be2017-05-30 14:26:02 +02002434 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002435 kvm->arch.sie_page2 =
2436 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2437 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002438 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002439
Michael Mueller25c84db2019-01-31 09:52:41 +01002440 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002441 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002442
2443 for (i = 0; i < kvm_s390_fac_size(); i++) {
2444 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2445 (kvm_s390_fac_base[i] |
2446 kvm_s390_fac_ext[i]);
2447 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2448 kvm_s390_fac_base[i];
2449 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002450 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002451
David Hildenbrand19352222017-08-29 16:31:08 +02002452 /* we are always in czam mode - even on pre z14 machines */
2453 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2454 set_kvm_facility(kvm->arch.model.fac_list, 138);
2455 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002456 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2457 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002458 if (MACHINE_HAS_TLB_GUEST) {
2459 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2460 set_kvm_facility(kvm->arch.model.fac_list, 147);
2461 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002462
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002463 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002464 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002465
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002466 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002467
Fei Li51978392017-02-17 17:06:26 +08002468 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002469 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002470 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2471 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002472 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002473 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002474
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002475 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002476 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002477
Carsten Ottee08b9632012-01-04 10:25:20 +01002478 if (type & KVM_VM_S390_UCONTROL) {
2479 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002480 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002481 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002482 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002483 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002484 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002485 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002486 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002487 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002488 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002489 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002490 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002491 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002492 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002493
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002494 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002495 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002496 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002497 kvm_s390_vsie_init(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002498 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002499 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002500
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002501 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002502out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002503 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002504 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002505 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002506 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002507 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002508}
2509
Luiz Capitulino235539b2016-09-07 14:47:23 -04002510bool kvm_arch_has_vcpu_debugfs(void)
2511{
2512 return false;
2513}
2514
2515int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2516{
2517 return 0;
2518}
2519
Christian Borntraegerd329c032008-11-26 14:50:27 +01002520void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2521{
2522 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002523 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002524 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002525 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002526 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002527 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002528
2529 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002530 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002531
Dominik Dingele6db1d62015-05-07 15:41:57 +02002532 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002533 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002534 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002535
Christian Borntraeger6692cef2008-11-26 14:51:08 +01002536 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02002537 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002538}
2539
2540static void kvm_free_vcpus(struct kvm *kvm)
2541{
2542 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002543 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002544
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002545 kvm_for_each_vcpu(i, vcpu, kvm)
2546 kvm_arch_vcpu_destroy(vcpu);
2547
2548 mutex_lock(&kvm->lock);
2549 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2550 kvm->vcpus[i] = NULL;
2551
2552 atomic_set(&kvm->online_vcpus, 0);
2553 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002554}
2555
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002556void kvm_arch_destroy_vm(struct kvm *kvm)
2557{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002558 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002559 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002560 debug_unregister(kvm->arch.dbf);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002561 kvm_s390_gisa_destroy(kvm);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002562 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002563 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002564 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002565 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002566 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002567 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002568 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002569}
2570
2571/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002572static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2573{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002574 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002575 if (!vcpu->arch.gmap)
2576 return -ENOMEM;
2577 vcpu->arch.gmap->private = vcpu->kvm;
2578
2579 return 0;
2580}
2581
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002582static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2583{
David Hildenbranda6940672016-08-08 22:39:32 +02002584 if (!kvm_s390_use_sca_entries())
2585 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002586 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002587 if (vcpu->kvm->arch.use_esca) {
2588 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002589
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002590 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002591 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002592 } else {
2593 struct bsca_block *sca = vcpu->kvm->arch.sca;
2594
2595 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002596 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002597 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002598 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002599}
2600
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002601static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002602{
David Hildenbranda6940672016-08-08 22:39:32 +02002603 if (!kvm_s390_use_sca_entries()) {
2604 struct bsca_block *sca = vcpu->kvm->arch.sca;
2605
2606 /* we still need the basic sca for the ipte control */
2607 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2608 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002609 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002610 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002611 read_lock(&vcpu->kvm->arch.sca_lock);
2612 if (vcpu->kvm->arch.use_esca) {
2613 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002614
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002615 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002616 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2617 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002618 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002619 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002620 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002621 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002622
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002623 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002624 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2625 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002626 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002627 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002628 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002629}
2630
2631/* Basic SCA to Extended SCA data copy routines */
2632static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2633{
2634 d->sda = s->sda;
2635 d->sigp_ctrl.c = s->sigp_ctrl.c;
2636 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2637}
2638
2639static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2640{
2641 int i;
2642
2643 d->ipte_control = s->ipte_control;
2644 d->mcn[0] = s->mcn;
2645 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2646 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2647}
2648
2649static int sca_switch_to_extended(struct kvm *kvm)
2650{
2651 struct bsca_block *old_sca = kvm->arch.sca;
2652 struct esca_block *new_sca;
2653 struct kvm_vcpu *vcpu;
2654 unsigned int vcpu_idx;
2655 u32 scaol, scaoh;
2656
2657 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2658 if (!new_sca)
2659 return -ENOMEM;
2660
2661 scaoh = (u32)((u64)(new_sca) >> 32);
2662 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2663
2664 kvm_s390_vcpu_block_all(kvm);
2665 write_lock(&kvm->arch.sca_lock);
2666
2667 sca_copy_b_to_e(new_sca, old_sca);
2668
2669 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2670 vcpu->arch.sie_block->scaoh = scaoh;
2671 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002672 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002673 }
2674 kvm->arch.sca = new_sca;
2675 kvm->arch.use_esca = 1;
2676
2677 write_unlock(&kvm->arch.sca_lock);
2678 kvm_s390_vcpu_unblock_all(kvm);
2679
2680 free_page((unsigned long)old_sca);
2681
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002682 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2683 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002684 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002685}
2686
2687static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2688{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002689 int rc;
2690
David Hildenbranda6940672016-08-08 22:39:32 +02002691 if (!kvm_s390_use_sca_entries()) {
2692 if (id < KVM_MAX_VCPUS)
2693 return true;
2694 return false;
2695 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002696 if (id < KVM_S390_BSCA_CPU_SLOTS)
2697 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002698 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002699 return false;
2700
2701 mutex_lock(&kvm->lock);
2702 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2703 mutex_unlock(&kvm->lock);
2704
2705 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002706}
2707
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002708int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2709{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002710 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2711 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002712 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2713 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002714 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002715 KVM_SYNC_CRS |
2716 KVM_SYNC_ARCH0 |
2717 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002718 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002719 if (test_kvm_facility(vcpu->kvm, 64))
2720 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002721 if (test_kvm_facility(vcpu->kvm, 82))
2722 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002723 if (test_kvm_facility(vcpu->kvm, 133))
2724 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002725 if (test_kvm_facility(vcpu->kvm, 156))
2726 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002727 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2728 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2729 */
2730 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002731 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002732 else
2733 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002734
2735 if (kvm_is_ucontrol(vcpu->kvm))
2736 return __kvm_ucontrol_vcpu_init(vcpu);
2737
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002738 return 0;
2739}
2740
David Hildenbranddb0758b2016-02-15 09:42:25 +01002741/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2742static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2743{
2744 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002745 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002746 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002747 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002748}
2749
2750/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2751static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2752{
2753 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002754 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002755 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2756 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002757 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002758}
2759
2760/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2761static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2762{
2763 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2764 vcpu->arch.cputm_enabled = true;
2765 __start_cpu_timer_accounting(vcpu);
2766}
2767
2768/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2769static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2770{
2771 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2772 __stop_cpu_timer_accounting(vcpu);
2773 vcpu->arch.cputm_enabled = false;
2774}
2775
2776static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2777{
2778 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2779 __enable_cpu_timer_accounting(vcpu);
2780 preempt_enable();
2781}
2782
2783static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2784{
2785 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2786 __disable_cpu_timer_accounting(vcpu);
2787 preempt_enable();
2788}
2789
David Hildenbrand4287f242016-02-15 09:40:12 +01002790/* set the cpu timer - may only be called from the VCPU thread itself */
2791void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2792{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002793 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002794 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002795 if (vcpu->arch.cputm_enabled)
2796 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002797 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002798 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002799 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002800}
2801
David Hildenbranddb0758b2016-02-15 09:42:25 +01002802/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002803__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2804{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002805 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002806 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002807
2808 if (unlikely(!vcpu->arch.cputm_enabled))
2809 return vcpu->arch.sie_block->cputm;
2810
David Hildenbrand9c23a132016-02-17 21:53:33 +01002811 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2812 do {
2813 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2814 /*
2815 * If the writer would ever execute a read in the critical
2816 * section, e.g. in irq context, we have a deadlock.
2817 */
2818 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2819 value = vcpu->arch.sie_block->cputm;
2820 /* if cputm_start is 0, accounting is being started/stopped */
2821 if (likely(vcpu->arch.cputm_start))
2822 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2823 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2824 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002825 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002826}
2827
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002828void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2829{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002830
David Hildenbrand37d9df92015-03-11 16:47:33 +01002831 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002832 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002833 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002834 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002835 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002836}
2837
2838void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2839{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002840 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002841 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002842 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01002843 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002844 vcpu->arch.enabled_gmap = gmap_get_enabled();
2845 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002846
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002847}
2848
2849static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2850{
2851 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2852 vcpu->arch.sie_block->gpsw.mask = 0UL;
2853 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002854 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002855 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002856 vcpu->arch.sie_block->ckc = 0UL;
2857 vcpu->arch.sie_block->todpr = 0;
2858 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
David Hildenbrandb9224cd2018-04-30 17:55:24 +02002859 vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 |
2860 CR0_INTERRUPT_KEY_SUBMASK |
2861 CR0_MEASUREMENT_ALERT_SUBMASK;
2862 vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
2863 CR14_UNUSED_33 |
2864 CR14_EXTERNAL_DAMAGE_SUBMASK;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002865 /* make sure the new fpc will be lazily loaded */
2866 save_fpu_regs();
2867 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002868 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002869 vcpu->arch.sie_block->pp = 0;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002870 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002871 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2872 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002873 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2874 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002875 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002876}
2877
Dominik Dingel31928aa2014-12-04 15:47:07 +01002878void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002879{
Jason J. Herne72f25022014-11-25 09:46:02 -05002880 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002881 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002882 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01002883 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02002884 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002885 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002886 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002887 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002888 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002889 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002890 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2891 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002892 /* make vcpu_load load the right gmap on the first trigger */
2893 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002894}
2895
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002896static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
2897{
2898 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
2899 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
2900 return true;
2901 return false;
2902}
2903
2904static bool kvm_has_pckmo_ecc(struct kvm *kvm)
2905{
2906 /* At least one ECC subfunction must be present */
2907 return kvm_has_pckmo_subfunc(kvm, 32) ||
2908 kvm_has_pckmo_subfunc(kvm, 33) ||
2909 kvm_has_pckmo_subfunc(kvm, 34) ||
2910 kvm_has_pckmo_subfunc(kvm, 40) ||
2911 kvm_has_pckmo_subfunc(kvm, 41);
2912
2913}
2914
Tony Krowiak5102ee82014-06-27 14:46:01 -04002915static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2916{
Tony Krowiake585b242018-09-25 19:16:18 -04002917 /*
2918 * If the AP instructions are not being interpreted and the MSAX3
2919 * facility is not configured for the guest, there is nothing to set up.
2920 */
2921 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002922 return;
2923
Tony Krowiake585b242018-09-25 19:16:18 -04002924 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02002925 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04002926 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002927 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02002928
Tony Krowiake585b242018-09-25 19:16:18 -04002929 if (vcpu->kvm->arch.crypto.apie)
2930 vcpu->arch.sie_block->eca |= ECA_APIE;
2931
2932 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002933 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02002934 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002935 /* ecc is also wrapped with AES key */
2936 if (kvm_has_pckmo_ecc(vcpu->kvm))
2937 vcpu->arch.sie_block->ecd |= ECD_ECC;
2938 }
2939
Tony Krowiaka374e892014-09-03 10:13:53 +02002940 if (vcpu->kvm->arch.crypto.dea_kw)
2941 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04002942}
2943
Dominik Dingelb31605c2014-03-25 13:47:11 +01002944void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2945{
2946 free_page(vcpu->arch.sie_block->cbrlo);
2947 vcpu->arch.sie_block->cbrlo = 0;
2948}
2949
2950int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2951{
2952 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2953 if (!vcpu->arch.sie_block->cbrlo)
2954 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002955 return 0;
2956}
2957
Michael Mueller91520f12015-02-27 14:32:11 +01002958static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2959{
2960 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2961
Michael Mueller91520f12015-02-27 14:32:11 +01002962 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002963 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002964 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002965}
2966
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002967int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2968{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002969 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002970
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002971 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2972 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002973 CPUSTAT_STOPPED);
2974
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002975 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002976 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002977 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002978 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002979
Michael Mueller91520f12015-02-27 14:32:11 +01002980 kvm_s390_vcpu_setup_model(vcpu);
2981
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002982 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2983 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002984 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002985 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002986 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002987 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002988 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002989
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002990 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002991 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002992 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002993 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2994 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002995 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002996 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002997 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002998 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002999 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003000 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003001 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003002 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01003003 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003004 vcpu->arch.sie_block->eca |= ECA_VX;
3005 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04003006 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003007 if (test_kvm_facility(vcpu->kvm, 139))
3008 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003009 if (test_kvm_facility(vcpu->kvm, 156))
3010 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02003011 if (vcpu->arch.sie_block->gd) {
3012 vcpu->arch.sie_block->eca |= ECA_AIV;
3013 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3014 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3015 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003016 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3017 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08003018 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05003019
3020 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003021 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05003022 else
3023 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05003024
Dominik Dingele6db1d62015-05-07 15:41:57 +02003025 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01003026 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3027 if (rc)
3028 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003029 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01003030 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02003031 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01003032
Collin Walling67d49d52018-08-31 12:51:19 -04003033 vcpu->arch.sie_block->hpid = HPID_KVM;
3034
Tony Krowiak5102ee82014-06-27 14:46:01 -04003035 kvm_s390_vcpu_crypto_setup(vcpu);
3036
Dominik Dingelb31605c2014-03-25 13:47:11 +01003037 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003038}
3039
3040struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
3041 unsigned int id)
3042{
Carsten Otte4d475552011-10-18 12:27:12 +02003043 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003044 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02003045 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003046
David Hildenbrand42158252015-10-12 12:57:22 +02003047 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02003048 goto out;
3049
3050 rc = -ENOMEM;
3051
Michael Muellerb110fea2013-06-12 13:54:54 +02003052 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003053 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02003054 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003055
QingFeng Haoda72ca42017-06-07 11:41:19 +02003056 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003057 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
3058 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003059 goto out_free_cpu;
3060
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003061 vcpu->arch.sie_block = &sie_page->sie_block;
3062 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3063
David Hildenbrandefed1102015-04-16 12:32:41 +02003064 /* the real guest size will always be smaller than msl */
3065 vcpu->arch.sie_block->mso = 0;
3066 vcpu->arch.sie_block->msl = sclp.hamax;
3067
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003068 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003069 spin_lock_init(&vcpu->arch.local_int.lock);
Michael Mueller982cff42019-01-31 09:52:38 +01003070 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003071 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3072 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003073 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003074
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003075 rc = kvm_vcpu_init(vcpu, kvm, id);
3076 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003077 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01003078 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003079 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02003080 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003081
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003082 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003083out_free_sie_block:
3084 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003085out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02003086 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02003087out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003088 return ERR_PTR(rc);
3089}
3090
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003091int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3092{
David Hildenbrand9a022062014-08-05 17:40:47 +02003093 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003094}
3095
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003096bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3097{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003098 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003099}
3100
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003101void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003102{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003103 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003104 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003105}
3106
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003107void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003108{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003109 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003110}
3111
Christian Borntraeger8e236542015-04-09 13:49:04 +02003112static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3113{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003114 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003115 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003116}
3117
David Hildenbrand9ea59722018-09-25 19:16:16 -04003118bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3119{
3120 return atomic_read(&vcpu->arch.sie_block->prog20) &
3121 (PROG_BLOCK_SIE | PROG_REQUEST);
3122}
3123
Christian Borntraeger8e236542015-04-09 13:49:04 +02003124static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3125{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003126 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003127}
3128
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003129/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003130 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003131 * If the CPU is not running (e.g. waiting as idle) the function will
3132 * return immediately. */
3133void exit_sie(struct kvm_vcpu *vcpu)
3134{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003135 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003136 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003137 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3138 cpu_relax();
3139}
3140
Christian Borntraeger8e236542015-04-09 13:49:04 +02003141/* Kick a guest cpu out of SIE to process a request synchronously */
3142void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003143{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003144 kvm_make_request(req, vcpu);
3145 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003146}
3147
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003148static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3149 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003150{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003151 struct kvm *kvm = gmap->private;
3152 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003153 unsigned long prefix;
3154 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003155
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003156 if (gmap_is_shadow(gmap))
3157 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003158 if (start >= 1UL << 31)
3159 /* We are only interested in prefix pages */
3160 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003161 kvm_for_each_vcpu(i, vcpu, kvm) {
3162 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003163 prefix = kvm_s390_get_prefix(vcpu);
3164 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3165 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3166 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003167 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003168 }
3169 }
3170}
3171
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003172bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3173{
3174 /* do not poll with more than halt_poll_max_steal percent of steal time */
3175 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3176 halt_poll_max_steal) {
3177 vcpu->stat.halt_no_poll_steal++;
3178 return true;
3179 }
3180 return false;
3181}
3182
Christoffer Dallb6d33832012-03-08 16:44:24 -05003183int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3184{
3185 /* kvm common code refers to this, but never calls it */
3186 BUG();
3187 return 0;
3188}
3189
Carsten Otte14eebd92012-05-15 14:15:26 +02003190static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3191 struct kvm_one_reg *reg)
3192{
3193 int r = -EINVAL;
3194
3195 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003196 case KVM_REG_S390_TODPR:
3197 r = put_user(vcpu->arch.sie_block->todpr,
3198 (u32 __user *)reg->addr);
3199 break;
3200 case KVM_REG_S390_EPOCHDIFF:
3201 r = put_user(vcpu->arch.sie_block->epoch,
3202 (u64 __user *)reg->addr);
3203 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003204 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003205 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003206 (u64 __user *)reg->addr);
3207 break;
3208 case KVM_REG_S390_CLOCK_COMP:
3209 r = put_user(vcpu->arch.sie_block->ckc,
3210 (u64 __user *)reg->addr);
3211 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003212 case KVM_REG_S390_PFTOKEN:
3213 r = put_user(vcpu->arch.pfault_token,
3214 (u64 __user *)reg->addr);
3215 break;
3216 case KVM_REG_S390_PFCOMPARE:
3217 r = put_user(vcpu->arch.pfault_compare,
3218 (u64 __user *)reg->addr);
3219 break;
3220 case KVM_REG_S390_PFSELECT:
3221 r = put_user(vcpu->arch.pfault_select,
3222 (u64 __user *)reg->addr);
3223 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003224 case KVM_REG_S390_PP:
3225 r = put_user(vcpu->arch.sie_block->pp,
3226 (u64 __user *)reg->addr);
3227 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003228 case KVM_REG_S390_GBEA:
3229 r = put_user(vcpu->arch.sie_block->gbea,
3230 (u64 __user *)reg->addr);
3231 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003232 default:
3233 break;
3234 }
3235
3236 return r;
3237}
3238
3239static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3240 struct kvm_one_reg *reg)
3241{
3242 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003243 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003244
3245 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003246 case KVM_REG_S390_TODPR:
3247 r = get_user(vcpu->arch.sie_block->todpr,
3248 (u32 __user *)reg->addr);
3249 break;
3250 case KVM_REG_S390_EPOCHDIFF:
3251 r = get_user(vcpu->arch.sie_block->epoch,
3252 (u64 __user *)reg->addr);
3253 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003254 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003255 r = get_user(val, (u64 __user *)reg->addr);
3256 if (!r)
3257 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003258 break;
3259 case KVM_REG_S390_CLOCK_COMP:
3260 r = get_user(vcpu->arch.sie_block->ckc,
3261 (u64 __user *)reg->addr);
3262 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003263 case KVM_REG_S390_PFTOKEN:
3264 r = get_user(vcpu->arch.pfault_token,
3265 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003266 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3267 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003268 break;
3269 case KVM_REG_S390_PFCOMPARE:
3270 r = get_user(vcpu->arch.pfault_compare,
3271 (u64 __user *)reg->addr);
3272 break;
3273 case KVM_REG_S390_PFSELECT:
3274 r = get_user(vcpu->arch.pfault_select,
3275 (u64 __user *)reg->addr);
3276 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003277 case KVM_REG_S390_PP:
3278 r = get_user(vcpu->arch.sie_block->pp,
3279 (u64 __user *)reg->addr);
3280 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003281 case KVM_REG_S390_GBEA:
3282 r = get_user(vcpu->arch.sie_block->gbea,
3283 (u64 __user *)reg->addr);
3284 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003285 default:
3286 break;
3287 }
3288
3289 return r;
3290}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003291
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003292static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3293{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003294 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003295 return 0;
3296}
3297
3298int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3299{
Christoffer Dall875656f2017-12-04 21:35:27 +01003300 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003301 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003302 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003303 return 0;
3304}
3305
3306int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3307{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003308 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003309 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003310 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003311 return 0;
3312}
3313
3314int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3315 struct kvm_sregs *sregs)
3316{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003317 vcpu_load(vcpu);
3318
Christian Borntraeger59674c12012-01-11 11:20:33 +01003319 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003320 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003321
3322 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003323 return 0;
3324}
3325
3326int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3327 struct kvm_sregs *sregs)
3328{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003329 vcpu_load(vcpu);
3330
Christian Borntraeger59674c12012-01-11 11:20:33 +01003331 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003332 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003333
3334 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003335 return 0;
3336}
3337
3338int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3339{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003340 int ret = 0;
3341
3342 vcpu_load(vcpu);
3343
3344 if (test_fp_ctl(fpu->fpc)) {
3345 ret = -EINVAL;
3346 goto out;
3347 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003348 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003349 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003350 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3351 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003352 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003353 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003354
3355out:
3356 vcpu_put(vcpu);
3357 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003358}
3359
3360int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3361{
Christoffer Dall13931232017-12-04 21:35:34 +01003362 vcpu_load(vcpu);
3363
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003364 /* make sure we have the latest values */
3365 save_fpu_regs();
3366 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003367 convert_vx_to_fp((freg_t *) fpu->fprs,
3368 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003369 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003370 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003371 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003372
3373 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003374 return 0;
3375}
3376
3377static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3378{
3379 int rc = 0;
3380
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003381 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003382 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003383 else {
3384 vcpu->run->psw_mask = psw.mask;
3385 vcpu->run->psw_addr = psw.addr;
3386 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003387 return rc;
3388}
3389
3390int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3391 struct kvm_translation *tr)
3392{
3393 return -EINVAL; /* not implemented yet */
3394}
3395
David Hildenbrand27291e22014-01-23 12:26:52 +01003396#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3397 KVM_GUESTDBG_USE_HW_BP | \
3398 KVM_GUESTDBG_ENABLE)
3399
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003400int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3401 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003402{
David Hildenbrand27291e22014-01-23 12:26:52 +01003403 int rc = 0;
3404
Christoffer Dall66b56562017-12-04 21:35:33 +01003405 vcpu_load(vcpu);
3406
David Hildenbrand27291e22014-01-23 12:26:52 +01003407 vcpu->guest_debug = 0;
3408 kvm_s390_clear_bp_data(vcpu);
3409
Christoffer Dall66b56562017-12-04 21:35:33 +01003410 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3411 rc = -EINVAL;
3412 goto out;
3413 }
3414 if (!sclp.has_gpere) {
3415 rc = -EINVAL;
3416 goto out;
3417 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003418
3419 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3420 vcpu->guest_debug = dbg->control;
3421 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003422 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003423
3424 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3425 rc = kvm_s390_import_bp_data(vcpu, dbg);
3426 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003427 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003428 vcpu->arch.guestdbg.last_bp = 0;
3429 }
3430
3431 if (rc) {
3432 vcpu->guest_debug = 0;
3433 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003434 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003435 }
3436
Christoffer Dall66b56562017-12-04 21:35:33 +01003437out:
3438 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003439 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003440}
3441
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003442int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3443 struct kvm_mp_state *mp_state)
3444{
Christoffer Dallfd232562017-12-04 21:35:30 +01003445 int ret;
3446
3447 vcpu_load(vcpu);
3448
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003449 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003450 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3451 KVM_MP_STATE_OPERATING;
3452
3453 vcpu_put(vcpu);
3454 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003455}
3456
3457int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3458 struct kvm_mp_state *mp_state)
3459{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003460 int rc = 0;
3461
Christoffer Dalle83dff52017-12-04 21:35:31 +01003462 vcpu_load(vcpu);
3463
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003464 /* user space knows about this interface - let it control the state */
3465 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3466
3467 switch (mp_state->mp_state) {
3468 case KVM_MP_STATE_STOPPED:
3469 kvm_s390_vcpu_stop(vcpu);
3470 break;
3471 case KVM_MP_STATE_OPERATING:
3472 kvm_s390_vcpu_start(vcpu);
3473 break;
3474 case KVM_MP_STATE_LOAD:
3475 case KVM_MP_STATE_CHECK_STOP:
3476 /* fall through - CHECK_STOP and LOAD are not supported yet */
3477 default:
3478 rc = -ENXIO;
3479 }
3480
Christoffer Dalle83dff52017-12-04 21:35:31 +01003481 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003482 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003483}
3484
David Hildenbrand8ad35752014-03-14 11:00:21 +01003485static bool ibs_enabled(struct kvm_vcpu *vcpu)
3486{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003487 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003488}
3489
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003490static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3491{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003492retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003493 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003494 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003495 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003496 /*
3497 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003498 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003499 * This ensures that the ipte instruction for this request has
3500 * already finished. We might race against a second unmapper that
3501 * wants to set the blocking bit. Lets just retry the request loop.
3502 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003503 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003504 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003505 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3506 kvm_s390_get_prefix(vcpu),
3507 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003508 if (rc) {
3509 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003510 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003511 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003512 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003513 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003514
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003515 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3516 vcpu->arch.sie_block->ihcpu = 0xffff;
3517 goto retry;
3518 }
3519
David Hildenbrand8ad35752014-03-14 11:00:21 +01003520 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3521 if (!ibs_enabled(vcpu)) {
3522 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003523 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003524 }
3525 goto retry;
3526 }
3527
3528 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3529 if (ibs_enabled(vcpu)) {
3530 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003531 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003532 }
3533 goto retry;
3534 }
3535
David Hildenbrand6502a342016-06-21 14:19:51 +02003536 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3537 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3538 goto retry;
3539 }
3540
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003541 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3542 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003543 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003544 * instruction manually, in order to provide additional
3545 * functionalities needed for live migration.
3546 */
3547 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3548 goto retry;
3549 }
3550
3551 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3552 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003553 * Re-enable CMM virtualization if CMMA is available and
3554 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003555 */
3556 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003557 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003558 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3559 goto retry;
3560 }
3561
David Hildenbrand0759d062014-05-13 16:54:32 +02003562 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003563 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003564 /* we left the vsie handler, nothing to do, just clear the request */
3565 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003566
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003567 return 0;
3568}
3569
David Hildenbrand0e7def52018-02-07 12:46:43 +01003570void kvm_s390_set_tod_clock(struct kvm *kvm,
3571 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003572{
3573 struct kvm_vcpu *vcpu;
3574 struct kvm_s390_tod_clock_ext htod;
3575 int i;
3576
3577 mutex_lock(&kvm->lock);
3578 preempt_disable();
3579
3580 get_tod_clock_ext((char *)&htod);
3581
3582 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003583 kvm->arch.epdx = 0;
3584 if (test_kvm_facility(kvm, 139)) {
3585 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3586 if (kvm->arch.epoch > gtod->tod)
3587 kvm->arch.epdx -= 1;
3588 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003589
3590 kvm_s390_vcpu_block_all(kvm);
3591 kvm_for_each_vcpu(i, vcpu, kvm) {
3592 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3593 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3594 }
3595
3596 kvm_s390_vcpu_unblock_all(kvm);
3597 preempt_enable();
3598 mutex_unlock(&kvm->lock);
3599}
3600
Thomas Huthfa576c52014-05-06 17:20:16 +02003601/**
3602 * kvm_arch_fault_in_page - fault-in guest page if necessary
3603 * @vcpu: The corresponding virtual cpu
3604 * @gpa: Guest physical address
3605 * @writable: Whether the page should be writable or not
3606 *
3607 * Make sure that a guest page has been faulted-in on the host.
3608 *
3609 * Return: Zero on success, negative error code otherwise.
3610 */
3611long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003612{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003613 return gmap_fault(vcpu->arch.gmap, gpa,
3614 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003615}
3616
Dominik Dingel3c038e62013-10-07 17:11:48 +02003617static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3618 unsigned long token)
3619{
3620 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003621 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003622
3623 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003624 irq.u.ext.ext_params2 = token;
3625 irq.type = KVM_S390_INT_PFAULT_INIT;
3626 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003627 } else {
3628 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003629 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003630 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3631 }
3632}
3633
3634void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3635 struct kvm_async_pf *work)
3636{
3637 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3638 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3639}
3640
3641void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3642 struct kvm_async_pf *work)
3643{
3644 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3645 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3646}
3647
3648void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3649 struct kvm_async_pf *work)
3650{
3651 /* s390 will always inject the page directly */
3652}
3653
3654bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3655{
3656 /*
3657 * s390 will always inject the page directly,
3658 * but we still want check_async_completion to cleanup
3659 */
3660 return true;
3661}
3662
3663static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3664{
3665 hva_t hva;
3666 struct kvm_arch_async_pf arch;
3667 int rc;
3668
3669 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3670 return 0;
3671 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3672 vcpu->arch.pfault_compare)
3673 return 0;
3674 if (psw_extint_disabled(vcpu))
3675 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003676 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003677 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003678 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003679 return 0;
3680 if (!vcpu->arch.gmap->pfault_enabled)
3681 return 0;
3682
Heiko Carstens81480cc2014-01-01 16:36:07 +01003683 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3684 hva += current->thread.gmap_addr & ~PAGE_MASK;
3685 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003686 return 0;
3687
3688 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3689 return rc;
3690}
3691
Thomas Huth3fb4c402013-09-12 10:33:43 +02003692static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003693{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003694 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003695
Dominik Dingel3c038e62013-10-07 17:11:48 +02003696 /*
3697 * On s390 notifications for arriving pages will be delivered directly
3698 * to the guest but the house keeping for completed pfaults is
3699 * handled outside the worker.
3700 */
3701 kvm_check_async_pf_completion(vcpu);
3702
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003703 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3704 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003705
3706 if (need_resched())
3707 schedule();
3708
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003709 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003710 s390_handle_mcck();
3711
Jens Freimann79395032014-04-17 10:10:30 +02003712 if (!kvm_is_ucontrol(vcpu->kvm)) {
3713 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3714 if (rc)
3715 return rc;
3716 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003717
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003718 rc = kvm_s390_handle_requests(vcpu);
3719 if (rc)
3720 return rc;
3721
David Hildenbrand27291e22014-01-23 12:26:52 +01003722 if (guestdbg_enabled(vcpu)) {
3723 kvm_s390_backup_guest_per_regs(vcpu);
3724 kvm_s390_patch_guest_per_regs(vcpu);
3725 }
3726
Michael Mueller9f30f622019-01-31 09:52:44 +01003727 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
3728
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003729 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003730 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3731 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3732 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003733
Thomas Huth3fb4c402013-09-12 10:33:43 +02003734 return 0;
3735}
3736
Thomas Huth492d8642015-02-10 16:11:01 +01003737static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3738{
David Hildenbrand56317922016-01-12 17:37:58 +01003739 struct kvm_s390_pgm_info pgm_info = {
3740 .code = PGM_ADDRESSING,
3741 };
3742 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003743 int rc;
3744
3745 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3746 trace_kvm_s390_sie_fault(vcpu);
3747
3748 /*
3749 * We want to inject an addressing exception, which is defined as a
3750 * suppressing or terminating exception. However, since we came here
3751 * by a DAT access exception, the PSW still points to the faulting
3752 * instruction since DAT exceptions are nullifying. So we've got
3753 * to look up the current opcode to get the length of the instruction
3754 * to be able to forward the PSW.
3755 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003756 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003757 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003758 if (rc < 0) {
3759 return rc;
3760 } else if (rc) {
3761 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3762 * Forward by arbitrary ilc, injection will take care of
3763 * nullification if necessary.
3764 */
3765 pgm_info = vcpu->arch.pgm;
3766 ilen = 4;
3767 }
David Hildenbrand56317922016-01-12 17:37:58 +01003768 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3769 kvm_s390_forward_psw(vcpu, ilen);
3770 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003771}
3772
Thomas Huth3fb4c402013-09-12 10:33:43 +02003773static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3774{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003775 struct mcck_volatile_info *mcck_info;
3776 struct sie_page *sie_page;
3777
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003778 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3779 vcpu->arch.sie_block->icptcode);
3780 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3781
David Hildenbrand27291e22014-01-23 12:26:52 +01003782 if (guestdbg_enabled(vcpu))
3783 kvm_s390_restore_guest_per_regs(vcpu);
3784
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003785 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3786 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003787
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003788 if (exit_reason == -EINTR) {
3789 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3790 sie_page = container_of(vcpu->arch.sie_block,
3791 struct sie_page, sie_block);
3792 mcck_info = &sie_page->mcck_info;
3793 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3794 return 0;
3795 }
3796
David Hildenbrand71f116b2015-10-19 16:24:28 +02003797 if (vcpu->arch.sie_block->icptcode > 0) {
3798 int rc = kvm_handle_sie_intercept(vcpu);
3799
3800 if (rc != -EOPNOTSUPP)
3801 return rc;
3802 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3803 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3804 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3805 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3806 return -EREMOTE;
3807 } else if (exit_reason != -EFAULT) {
3808 vcpu->stat.exit_null++;
3809 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003810 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3811 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3812 vcpu->run->s390_ucontrol.trans_exc_code =
3813 current->thread.gmap_addr;
3814 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003815 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003816 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003817 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003818 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003819 if (kvm_arch_setup_async_pf(vcpu))
3820 return 0;
3821 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003822 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003823 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003824}
3825
3826static int __vcpu_run(struct kvm_vcpu *vcpu)
3827{
3828 int rc, exit_reason;
3829
Thomas Huth800c1062013-09-12 10:33:45 +02003830 /*
3831 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3832 * ning the guest), so that memslots (and other stuff) are protected
3833 */
3834 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3835
Thomas Hutha76ccff2013-09-12 10:33:44 +02003836 do {
3837 rc = vcpu_pre_run(vcpu);
3838 if (rc)
3839 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003840
Thomas Huth800c1062013-09-12 10:33:45 +02003841 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003842 /*
3843 * As PF_VCPU will be used in fault handler, between
3844 * guest_enter and guest_exit should be no uaccess.
3845 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003846 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003847 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003848 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003849 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003850 exit_reason = sie64a(vcpu->arch.sie_block,
3851 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003852 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003853 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003854 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003855 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003856 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003857
Thomas Hutha76ccff2013-09-12 10:33:44 +02003858 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003859 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003860
Thomas Huth800c1062013-09-12 10:33:45 +02003861 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003862 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003863}
3864
David Hildenbrandb028ee32014-07-17 10:47:43 +02003865static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3866{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003867 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003868 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003869
3870 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003871 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003872 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3873 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3874 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3875 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3876 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3877 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003878 /* some control register changes require a tlb flush */
3879 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003880 }
3881 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003882 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003883 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3884 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3885 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3886 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3887 }
3888 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3889 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3890 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3891 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003892 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3893 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003894 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003895 /*
3896 * If userspace sets the riccb (e.g. after migration) to a valid state,
3897 * we should enable RI here instead of doing the lazy enablement.
3898 */
3899 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003900 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003901 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003902 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003903 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003904 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003905 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003906 /*
3907 * If userspace sets the gscb (e.g. after migration) to non-zero,
3908 * we should enable GS here instead of doing the lazy enablement.
3909 */
3910 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3911 test_kvm_facility(vcpu->kvm, 133) &&
3912 gscb->gssm &&
3913 !vcpu->arch.gs_enabled) {
3914 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3915 vcpu->arch.sie_block->ecb |= ECB_GS;
3916 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3917 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003918 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003919 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3920 test_kvm_facility(vcpu->kvm, 82)) {
3921 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3922 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3923 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003924 save_access_regs(vcpu->arch.host_acrs);
3925 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003926 /* save host (userspace) fprs/vrs */
3927 save_fpu_regs();
3928 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3929 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3930 if (MACHINE_HAS_VX)
3931 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3932 else
3933 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3934 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3935 if (test_fp_ctl(current->thread.fpu.fpc))
3936 /* User space provided an invalid FPC, let's clear it */
3937 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003938 if (MACHINE_HAS_GS) {
3939 preempt_disable();
3940 __ctl_set_bit(2, 4);
3941 if (current->thread.gs_cb) {
3942 vcpu->arch.host_gscb = current->thread.gs_cb;
3943 save_gs_cb(vcpu->arch.host_gscb);
3944 }
3945 if (vcpu->arch.gs_enabled) {
3946 current->thread.gs_cb = (struct gs_cb *)
3947 &vcpu->run->s.regs.gscb;
3948 restore_gs_cb(current->thread.gs_cb);
3949 }
3950 preempt_enable();
3951 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003952 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Fan Zhang80cd8762016-08-15 04:53:22 +02003953
David Hildenbrandb028ee32014-07-17 10:47:43 +02003954 kvm_run->kvm_dirty_regs = 0;
3955}
3956
3957static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3958{
3959 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3960 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3961 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3962 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003963 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003964 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3965 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3966 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3967 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3968 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3969 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3970 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003971 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003972 save_access_regs(vcpu->run->s.regs.acrs);
3973 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003974 /* Save guest register state */
3975 save_fpu_regs();
3976 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3977 /* Restore will be done lazily at return */
3978 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3979 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003980 if (MACHINE_HAS_GS) {
3981 __ctl_set_bit(2, 4);
3982 if (vcpu->arch.gs_enabled)
3983 save_gs_cb(current->thread.gs_cb);
3984 preempt_disable();
3985 current->thread.gs_cb = vcpu->arch.host_gscb;
3986 restore_gs_cb(vcpu->arch.host_gscb);
3987 preempt_enable();
3988 if (!vcpu->arch.host_gscb)
3989 __ctl_clear_bit(2, 4);
3990 vcpu->arch.host_gscb = NULL;
3991 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003992 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02003993}
3994
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003995int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3996{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003997 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003998
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003999 if (kvm_run->immediate_exit)
4000 return -EINTR;
4001
Christoffer Dallaccb7572017-12-04 21:35:25 +01004002 vcpu_load(vcpu);
4003
David Hildenbrand27291e22014-01-23 12:26:52 +01004004 if (guestdbg_exit_pending(vcpu)) {
4005 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004006 rc = 0;
4007 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004008 }
4009
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004010 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004011
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004012 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4013 kvm_s390_vcpu_start(vcpu);
4014 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004015 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004016 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004017 rc = -EINVAL;
4018 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004019 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004020
David Hildenbrandb028ee32014-07-17 10:47:43 +02004021 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004022 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004023
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004024 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004025 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004026
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004027 if (signal_pending(current) && !rc) {
4028 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004029 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004030 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004031
David Hildenbrand27291e22014-01-23 12:26:52 +01004032 if (guestdbg_exit_pending(vcpu) && !rc) {
4033 kvm_s390_prepare_debug_exit(vcpu);
4034 rc = 0;
4035 }
4036
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004037 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004038 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004039 rc = 0;
4040 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004041
David Hildenbranddb0758b2016-02-15 09:42:25 +01004042 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004043 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004044
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004045 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004046
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004047 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004048out:
4049 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004050 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004051}
4052
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004053/*
4054 * store status at address
4055 * we use have two special cases:
4056 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4057 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4058 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004059int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004060{
Carsten Otte092670c2011-07-24 10:48:22 +02004061 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004062 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004063 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004064 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004065 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004066
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004067 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004068 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4069 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004070 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004071 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004072 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4073 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004074 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004075 gpa = px;
4076 } else
4077 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004078
4079 /* manually convert vector registers if necessary */
4080 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004081 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004082 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4083 fprs, 128);
4084 } else {
4085 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004086 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004087 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004088 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004089 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004090 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004091 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004092 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004093 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004094 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004095 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004096 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004097 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004098 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004099 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004100 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004101 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004102 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004103 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004104 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004105 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004106 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004107 &vcpu->arch.sie_block->gcr, 128);
4108 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004109}
4110
Thomas Huthe8798922013-11-06 15:46:33 +01004111int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4112{
4113 /*
4114 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004115 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004116 * it into the save area
4117 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004118 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004119 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004120 save_access_regs(vcpu->run->s.regs.acrs);
4121
4122 return kvm_s390_store_status_unloaded(vcpu, addr);
4123}
4124
David Hildenbrand8ad35752014-03-14 11:00:21 +01004125static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4126{
4127 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004128 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004129}
4130
4131static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4132{
4133 unsigned int i;
4134 struct kvm_vcpu *vcpu;
4135
4136 kvm_for_each_vcpu(i, vcpu, kvm) {
4137 __disable_ibs_on_vcpu(vcpu);
4138 }
4139}
4140
4141static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4142{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004143 if (!sclp.has_ibs)
4144 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004145 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004146 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004147}
4148
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004149void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
4150{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004151 int i, online_vcpus, started_vcpus = 0;
4152
4153 if (!is_vcpu_stopped(vcpu))
4154 return;
4155
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004156 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004157 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004158 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004159 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4160
4161 for (i = 0; i < online_vcpus; i++) {
4162 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4163 started_vcpus++;
4164 }
4165
4166 if (started_vcpus == 0) {
4167 /* we're the only active VCPU -> speed it up */
4168 __enable_ibs_on_vcpu(vcpu);
4169 } else if (started_vcpus == 1) {
4170 /*
4171 * As we are starting a second VCPU, we have to disable
4172 * the IBS facility on all VCPUs to remove potentially
4173 * oustanding ENABLE requests.
4174 */
4175 __disable_ibs_on_all_vcpus(vcpu->kvm);
4176 }
4177
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004178 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004179 /*
4180 * Another VCPU might have used IBS while we were offline.
4181 * Let's play safe and flush the VCPU at startup.
4182 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004183 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004184 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004185 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004186}
4187
4188void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
4189{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004190 int i, online_vcpus, started_vcpus = 0;
4191 struct kvm_vcpu *started_vcpu = NULL;
4192
4193 if (is_vcpu_stopped(vcpu))
4194 return;
4195
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004196 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004197 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004198 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004199 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4200
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004201 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004202 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004203
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004204 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004205 __disable_ibs_on_vcpu(vcpu);
4206
4207 for (i = 0; i < online_vcpus; i++) {
4208 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4209 started_vcpus++;
4210 started_vcpu = vcpu->kvm->vcpus[i];
4211 }
4212 }
4213
4214 if (started_vcpus == 1) {
4215 /*
4216 * As we only have one VCPU left, we want to enable the
4217 * IBS facility for that VCPU to speed it up.
4218 */
4219 __enable_ibs_on_vcpu(started_vcpu);
4220 }
4221
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004222 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004223 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004224}
4225
Cornelia Huckd6712df2012-12-20 15:32:11 +01004226static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4227 struct kvm_enable_cap *cap)
4228{
4229 int r;
4230
4231 if (cap->flags)
4232 return -EINVAL;
4233
4234 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004235 case KVM_CAP_S390_CSS_SUPPORT:
4236 if (!vcpu->kvm->arch.css_support) {
4237 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004238 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004239 trace_kvm_s390_enable_css(vcpu->kvm);
4240 }
4241 r = 0;
4242 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004243 default:
4244 r = -EINVAL;
4245 break;
4246 }
4247 return r;
4248}
4249
Thomas Huth41408c282015-02-06 15:01:21 +01004250static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4251 struct kvm_s390_mem_op *mop)
4252{
4253 void __user *uaddr = (void __user *)mop->buf;
4254 void *tmpbuf = NULL;
4255 int r, srcu_idx;
4256 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4257 | KVM_S390_MEMOP_F_CHECK_ONLY;
4258
4259 if (mop->flags & ~supported_flags)
4260 return -EINVAL;
4261
4262 if (mop->size > MEM_OP_MAX_SIZE)
4263 return -E2BIG;
4264
4265 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4266 tmpbuf = vmalloc(mop->size);
4267 if (!tmpbuf)
4268 return -ENOMEM;
4269 }
4270
4271 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4272
4273 switch (mop->op) {
4274 case KVM_S390_MEMOP_LOGICAL_READ:
4275 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004276 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4277 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004278 break;
4279 }
4280 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4281 if (r == 0) {
4282 if (copy_to_user(uaddr, tmpbuf, mop->size))
4283 r = -EFAULT;
4284 }
4285 break;
4286 case KVM_S390_MEMOP_LOGICAL_WRITE:
4287 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004288 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4289 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004290 break;
4291 }
4292 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4293 r = -EFAULT;
4294 break;
4295 }
4296 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4297 break;
4298 default:
4299 r = -EINVAL;
4300 }
4301
4302 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4303
4304 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4305 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4306
4307 vfree(tmpbuf);
4308 return r;
4309}
4310
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004311long kvm_arch_vcpu_async_ioctl(struct file *filp,
4312 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004313{
4314 struct kvm_vcpu *vcpu = filp->private_data;
4315 void __user *argp = (void __user *)arg;
4316
Avi Kivity93736622010-05-13 12:35:17 +03004317 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004318 case KVM_S390_IRQ: {
4319 struct kvm_s390_irq s390irq;
4320
Jens Freimann47b43c52014-11-11 20:57:06 +01004321 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004322 return -EFAULT;
4323 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004324 }
Avi Kivity93736622010-05-13 12:35:17 +03004325 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004326 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02004327 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01004328
4329 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004330 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004331 if (s390int_to_s390irq(&s390int, &s390irq))
4332 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004333 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004334 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004335 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004336 return -ENOIOCTLCMD;
4337}
4338
4339long kvm_arch_vcpu_ioctl(struct file *filp,
4340 unsigned int ioctl, unsigned long arg)
4341{
4342 struct kvm_vcpu *vcpu = filp->private_data;
4343 void __user *argp = (void __user *)arg;
4344 int idx;
4345 long r;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004346
4347 vcpu_load(vcpu);
4348
4349 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004350 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004351 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004352 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004353 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004354 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004355 case KVM_S390_SET_INITIAL_PSW: {
4356 psw_t psw;
4357
Avi Kivitybc923cc2010-05-13 12:21:46 +03004358 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004359 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004360 break;
4361 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4362 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004363 }
4364 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03004365 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4366 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004367 case KVM_SET_ONE_REG:
4368 case KVM_GET_ONE_REG: {
4369 struct kvm_one_reg reg;
4370 r = -EFAULT;
4371 if (copy_from_user(&reg, argp, sizeof(reg)))
4372 break;
4373 if (ioctl == KVM_SET_ONE_REG)
4374 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4375 else
4376 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4377 break;
4378 }
Carsten Otte27e03932012-01-04 10:25:21 +01004379#ifdef CONFIG_KVM_S390_UCONTROL
4380 case KVM_S390_UCAS_MAP: {
4381 struct kvm_s390_ucas_mapping ucasmap;
4382
4383 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4384 r = -EFAULT;
4385 break;
4386 }
4387
4388 if (!kvm_is_ucontrol(vcpu->kvm)) {
4389 r = -EINVAL;
4390 break;
4391 }
4392
4393 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4394 ucasmap.vcpu_addr, ucasmap.length);
4395 break;
4396 }
4397 case KVM_S390_UCAS_UNMAP: {
4398 struct kvm_s390_ucas_mapping ucasmap;
4399
4400 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4401 r = -EFAULT;
4402 break;
4403 }
4404
4405 if (!kvm_is_ucontrol(vcpu->kvm)) {
4406 r = -EINVAL;
4407 break;
4408 }
4409
4410 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4411 ucasmap.length);
4412 break;
4413 }
4414#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004415 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004416 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004417 break;
4418 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004419 case KVM_ENABLE_CAP:
4420 {
4421 struct kvm_enable_cap cap;
4422 r = -EFAULT;
4423 if (copy_from_user(&cap, argp, sizeof(cap)))
4424 break;
4425 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4426 break;
4427 }
Thomas Huth41408c282015-02-06 15:01:21 +01004428 case KVM_S390_MEM_OP: {
4429 struct kvm_s390_mem_op mem_op;
4430
4431 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4432 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4433 else
4434 r = -EFAULT;
4435 break;
4436 }
Jens Freimann816c7662014-11-24 17:13:46 +01004437 case KVM_S390_SET_IRQ_STATE: {
4438 struct kvm_s390_irq_state irq_state;
4439
4440 r = -EFAULT;
4441 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4442 break;
4443 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4444 irq_state.len == 0 ||
4445 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4446 r = -EINVAL;
4447 break;
4448 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004449 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004450 r = kvm_s390_set_irq_state(vcpu,
4451 (void __user *) irq_state.buf,
4452 irq_state.len);
4453 break;
4454 }
4455 case KVM_S390_GET_IRQ_STATE: {
4456 struct kvm_s390_irq_state irq_state;
4457
4458 r = -EFAULT;
4459 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4460 break;
4461 if (irq_state.len == 0) {
4462 r = -EINVAL;
4463 break;
4464 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004465 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004466 r = kvm_s390_get_irq_state(vcpu,
4467 (__u8 __user *) irq_state.buf,
4468 irq_state.len);
4469 break;
4470 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004471 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004472 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004473 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004474
4475 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004476 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004477}
4478
Souptick Joarder1499fa82018-04-19 00:49:58 +05304479vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004480{
4481#ifdef CONFIG_KVM_S390_UCONTROL
4482 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4483 && (kvm_is_ucontrol(vcpu->kvm))) {
4484 vmf->page = virt_to_page(vcpu->arch.sie_block);
4485 get_page(vmf->page);
4486 return 0;
4487 }
4488#endif
4489 return VM_FAULT_SIGBUS;
4490}
4491
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05304492int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4493 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09004494{
4495 return 0;
4496}
4497
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004498/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004499int kvm_arch_prepare_memory_region(struct kvm *kvm,
4500 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004501 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004502 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004503{
Nick Wangdd2887e2013-03-25 17:22:57 +01004504 /* A few sanity checks. We can have memory slots which have to be
4505 located/ended at a segment boundary (1MB). The memory in userland is
4506 ok to be fragmented into various different vmas. It is okay to mmap()
4507 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004508
Carsten Otte598841c2011-07-24 10:48:21 +02004509 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004510 return -EINVAL;
4511
Carsten Otte598841c2011-07-24 10:48:21 +02004512 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004513 return -EINVAL;
4514
Dominik Dingela3a92c32014-12-01 17:24:42 +01004515 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4516 return -EINVAL;
4517
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004518 return 0;
4519}
4520
4521void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004522 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004523 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004524 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004525 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004526{
Carsten Ottef7850c92011-07-24 10:48:23 +02004527 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004528
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01004529 /* If the basics of the memslot do not change, we do not want
4530 * to update the gmap. Every update causes several unnecessary
4531 * segment translation exceptions. This is usually handled just
4532 * fine by the normal fault handler + gmap, but it will also
4533 * cause faults on the prefix page of running guest CPUs.
4534 */
4535 if (old->userspace_addr == mem->userspace_addr &&
4536 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
4537 old->npages * PAGE_SIZE == mem->memory_size)
4538 return;
Carsten Otte598841c2011-07-24 10:48:21 +02004539
4540 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4541 mem->guest_phys_addr, mem->memory_size);
4542 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004543 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02004544 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004545}
4546
Alexander Yarygin60a37702016-04-01 15:38:57 +03004547static inline unsigned long nonhyp_mask(int i)
4548{
4549 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4550
4551 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4552}
4553
Christian Borntraeger3491caf2016-05-13 12:16:35 +02004554void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4555{
4556 vcpu->valid_wakeup = false;
4557}
4558
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004559static int __init kvm_s390_init(void)
4560{
Alexander Yarygin60a37702016-04-01 15:38:57 +03004561 int i;
4562
David Hildenbrand07197fd2015-01-30 16:01:38 +01004563 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004564 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01004565 return -ENODEV;
4566 }
4567
Janosch Franka4499382018-07-13 11:28:31 +01004568 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004569 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01004570 return -EINVAL;
4571 }
4572
Alexander Yarygin60a37702016-04-01 15:38:57 +03004573 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00004574 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03004575 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4576
Michael Mueller9d8d5782015-02-02 15:42:51 +01004577 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004578}
4579
4580static void __exit kvm_s390_exit(void)
4581{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004582 kvm_exit();
4583}
4584
4585module_init(kvm_s390_init);
4586module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02004587
4588/*
4589 * Enable autoloading of the kvm module.
4590 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4591 * since x86 takes a different approach.
4592 */
4593#include <linux/miscdevice.h>
4594MODULE_ALIAS_MISCDEV(KVM_MINOR);
4595MODULE_ALIAS("devname:kvm");