blob: 7936af0a971f4ab93300be17e82127f5c7326841 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Christian Borntraegera37cb072018-01-23 13:28:40 +01005 * Copyright IBM Corp. 2008, 2018
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010047#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010048#include "gaccess.h"
49
Cornelia Huck5786fff2012-07-23 17:20:29 +020050#define CREATE_TRACE_POINTS
51#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020052#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020053
Thomas Huth41408c282015-02-06 15:01:21 +010054#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010055#define LOCAL_IRQS 32
56#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010058
Heiko Carstensb0c632d2008-03-25 18:47:20 +010059#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Christian Borntraegerccc40c52018-03-08 12:48:05 +000060#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061
62struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020064 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010065 { "exit_validity", VCPU_STAT(exit_validity) },
66 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 { "exit_external_request", VCPU_STAT(exit_external_request) },
Christian Borntraegera5e0acea92018-02-23 07:57:33 +000068 { "exit_io_request", VCPU_STAT(exit_io_request) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010069 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030071 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020074 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010075 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020076 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020077 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
Christian Borntraeger8b905d22019-03-05 05:30:02 -050078 { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020079 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020080 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010082 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000084 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
85 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010086 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020087 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010088 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000089 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010090 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
91 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
92 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000093 { "deliver_program", VCPU_STAT(deliver_program) },
94 { "deliver_io", VCPU_STAT(deliver_io) },
QingFeng Hao32de0742018-03-02 11:56:47 +010095 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010096 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000097 { "inject_ckc", VCPU_STAT(inject_ckc) },
98 { "inject_cputm", VCPU_STAT(inject_cputm) },
99 { "inject_external_call", VCPU_STAT(inject_external_call) },
100 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
101 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
102 { "inject_io", VM_STAT(inject_io) },
103 { "inject_mchk", VCPU_STAT(inject_mchk) },
104 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
105 { "inject_program", VCPU_STAT(inject_program) },
106 { "inject_restart", VCPU_STAT(inject_restart) },
107 { "inject_service_signal", VM_STAT(inject_service_signal) },
108 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
109 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
110 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
111 { "inject_virtio", VM_STAT(inject_virtio) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100112 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
113 { "instruction_gs", VCPU_STAT(instruction_gs) },
114 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
115 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
116 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200117 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100118 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100119 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100120 { "instruction_sck", VCPU_STAT(instruction_sck) },
121 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100122 { "instruction_spx", VCPU_STAT(instruction_spx) },
123 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
124 { "instruction_stap", VCPU_STAT(instruction_stap) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100125 { "instruction_iske", VCPU_STAT(instruction_iske) },
126 { "instruction_ri", VCPU_STAT(instruction_ri) },
127 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
128 { "instruction_sske", VCPU_STAT(instruction_sske) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100129 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200130 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100131 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
132 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100133 { "instruction_tb", VCPU_STAT(instruction_tb) },
134 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200135 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100136 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200137 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200138 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100139 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100140 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200141 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100142 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200143 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
144 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100145 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200146 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
147 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500148 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100149 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
150 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
151 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200152 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
153 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
154 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100155 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
156 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
157 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
158 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
159 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
160 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100161 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100162 { NULL }
163};
164
Collin L. Walling8fa16962016-07-26 15:29:44 -0400165struct kvm_s390_tod_clock_ext {
166 __u8 epoch_idx;
167 __u64 tod;
168 __u8 reserved[7];
169} __packed;
170
David Hildenbranda411edf2016-02-02 15:41:22 +0100171/* allow nested virtualization in KVM (if enabled by user space) */
172static int nested;
173module_param(nested, int, S_IRUGO);
174MODULE_PARM_DESC(nested, "Nested virtualization support");
175
Janosch Franka4499382018-07-13 11:28:31 +0100176/* allow 1m huge page guest backing, if !nested */
177static int hpage;
178module_param(hpage, int, 0444);
179MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100180
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500181/* maximum percentage of steal time for polling. >100 is treated like 100 */
182static u8 halt_poll_max_steal = 10;
183module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000184MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500185
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000186/*
187 * For now we handle at most 16 double words as this is what the s390 base
188 * kernel handles and stores in the prefix page. If we ever need to go beyond
189 * this, this requires changes to code, but the external uapi can stay.
190 */
191#define SIZE_INTERNAL 16
192
193/*
194 * Base feature mask that defines default mask for facilities. Consists of the
195 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
196 */
197static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
198/*
199 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
200 * and defines the facilities that can be enabled via a cpu model.
201 */
202static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
203
204static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200205{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000206 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
207 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
208 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
209 sizeof(S390_lowcore.stfle_fac_list));
210
211 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200212}
213
David Hildenbrand15c97052015-03-19 17:36:43 +0100214/* available cpu features supported by kvm */
215static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200216/* available subfunctions indicated via query / "test bit" */
217static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100218
Michael Mueller9d8d5782015-02-02 15:42:51 +0100219static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200220static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200221debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100222
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100223/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200224int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100225{
226 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200227 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100228}
229
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700230int kvm_arch_check_processor_compat(void)
231{
232 return 0;
233}
234
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100235static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
236 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200237
David Hildenbrand15757672018-02-07 12:46:45 +0100238static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
239{
240 u8 delta_idx = 0;
241
242 /*
243 * The TOD jumps by delta, we have to compensate this by adding
244 * -delta to the epoch.
245 */
246 delta = -delta;
247
248 /* sign-extension - we're adding to signed values below */
249 if ((s64)delta < 0)
250 delta_idx = -1;
251
252 scb->epoch += delta;
253 if (scb->ecd & ECD_MEF) {
254 scb->epdx += delta_idx;
255 if (scb->epoch < delta)
256 scb->epdx += 1;
257 }
258}
259
Fan Zhangfdf03652015-05-13 10:58:41 +0200260/*
261 * This callback is executed during stop_machine(). All CPUs are therefore
262 * temporarily stopped. In order not to change guest behavior, we have to
263 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
264 * so a CPU won't be stopped while calculating with the epoch.
265 */
266static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
267 void *v)
268{
269 struct kvm *kvm;
270 struct kvm_vcpu *vcpu;
271 int i;
272 unsigned long long *delta = v;
273
274 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200275 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100276 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
277 if (i == 0) {
278 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
279 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
280 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100281 if (vcpu->arch.cputm_enabled)
282 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100283 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100284 kvm_clock_sync_scb(vcpu->arch.vsie_block,
285 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200286 }
287 }
288 return NOTIFY_OK;
289}
290
291static struct notifier_block kvm_clock_notifier = {
292 .notifier_call = kvm_clock_sync,
293};
294
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100295int kvm_arch_hardware_setup(void)
296{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200297 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100298 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200299 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
300 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200301 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
302 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100303 return 0;
304}
305
306void kvm_arch_hardware_unsetup(void)
307{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100308 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200309 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200310 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
311 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100312}
313
David Hildenbrand22be5a132016-01-21 13:22:54 +0100314static void allow_cpu_feat(unsigned long nr)
315{
316 set_bit_inv(nr, kvm_s390_available_cpu_feat);
317}
318
David Hildenbrand0a763c72016-05-18 16:03:47 +0200319static inline int plo_test_bit(unsigned char nr)
320{
321 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100322 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200323
324 asm volatile(
325 /* Parameter registers are ignored for "test bit" */
326 " plo 0,0,0,0(0)\n"
327 " ipm %0\n"
328 " srl %0,28\n"
329 : "=d" (cc)
330 : "d" (r0)
331 : "cc");
332 return cc == 0;
333}
334
Christian Borntraegerd6681392019-02-20 03:04:07 -0500335static inline void __insn32_query(unsigned int opcode, u8 query[32])
336{
337 register unsigned long r0 asm("0") = 0; /* query function */
338 register unsigned long r1 asm("1") = (unsigned long) query;
339
340 asm volatile(
341 /* Parameter regs are ignored */
342 " .insn rrf,%[opc] << 16,2,4,6,0\n"
343 : "=m" (*query)
344 : "d" (r0), "a" (r1), [opc] "i" (opcode)
345 : "cc");
346}
347
Christian Borntraeger173aec22018-12-28 10:59:06 +0100348#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100349#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100350
David Hildenbrand22be5a132016-01-21 13:22:54 +0100351static void kvm_s390_cpu_feat_init(void)
352{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200353 int i;
354
355 for (i = 0; i < 256; ++i) {
356 if (plo_test_bit(i))
357 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
358 }
359
360 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400361 ptff(kvm_s390_available_subfunc.ptff,
362 sizeof(kvm_s390_available_subfunc.ptff),
363 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200364
365 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200366 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
367 kvm_s390_available_subfunc.kmac);
368 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
369 kvm_s390_available_subfunc.kmc);
370 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
371 kvm_s390_available_subfunc.km);
372 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
373 kvm_s390_available_subfunc.kimd);
374 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
375 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200376 }
377 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200378 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
379 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200380 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200381 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
382 kvm_s390_available_subfunc.kmctr);
383 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
384 kvm_s390_available_subfunc.kmf);
385 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
386 kvm_s390_available_subfunc.kmo);
387 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
388 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200389 }
390 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100391 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200392 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200393
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400394 if (test_facility(146)) /* MSA8 */
395 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
396 kvm_s390_available_subfunc.kma);
397
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100398 if (test_facility(155)) /* MSA9 */
399 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
400 kvm_s390_available_subfunc.kdsa);
401
Christian Borntraeger173aec22018-12-28 10:59:06 +0100402 if (test_facility(150)) /* SORTL */
403 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
404
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100405 if (test_facility(151)) /* DFLTCC */
406 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
407
David Hildenbrand22be5a132016-01-21 13:22:54 +0100408 if (MACHINE_HAS_ESOP)
409 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200410 /*
411 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
412 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
413 */
414 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100415 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200416 return;
417 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100418 if (sclp.has_64bscao)
419 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100420 if (sclp.has_siif)
421 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100422 if (sclp.has_gpere)
423 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100424 if (sclp.has_gsls)
425 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100426 if (sclp.has_ib)
427 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100428 if (sclp.has_cei)
429 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100430 if (sclp.has_ibs)
431 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500432 if (sclp.has_kss)
433 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200434 /*
435 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
436 * all skey handling functions read/set the skey from the PGSTE
437 * instead of the real storage key.
438 *
439 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
440 * pages being detected as preserved although they are resident.
441 *
442 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
443 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
444 *
445 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
446 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
447 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
448 *
449 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
450 * cannot easily shadow the SCA because of the ipte lock.
451 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100452}
453
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100454int kvm_arch_init(void *opaque)
455{
Michael Mueller308c3e62018-11-30 15:32:06 +0100456 int rc;
457
Christian Borntraeger78f26132015-07-22 15:50:58 +0200458 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
459 if (!kvm_s390_dbf)
460 return -ENOMEM;
461
462 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
Michael Mueller308c3e62018-11-30 15:32:06 +0100463 rc = -ENOMEM;
464 goto out_debug_unreg;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200465 }
466
David Hildenbrand22be5a132016-01-21 13:22:54 +0100467 kvm_s390_cpu_feat_init();
468
Cornelia Huck84877d92014-09-02 10:27:35 +0100469 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100470 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
471 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100472 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Michael Mueller308c3e62018-11-30 15:32:06 +0100473 goto out_debug_unreg;
474 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100475
476 rc = kvm_s390_gib_init(GAL_ISC);
477 if (rc)
478 goto out_gib_destroy;
479
Michael Mueller308c3e62018-11-30 15:32:06 +0100480 return 0;
481
Michael Muellerb1d1e762019-01-31 09:52:45 +0100482out_gib_destroy:
483 kvm_s390_gib_destroy();
Michael Mueller308c3e62018-11-30 15:32:06 +0100484out_debug_unreg:
485 debug_unregister(kvm_s390_dbf);
486 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100487}
488
Christian Borntraeger78f26132015-07-22 15:50:58 +0200489void kvm_arch_exit(void)
490{
Michael Mueller1282c212019-01-31 09:52:40 +0100491 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200492 debug_unregister(kvm_s390_dbf);
493}
494
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100495/* Section: device related */
496long kvm_arch_dev_ioctl(struct file *filp,
497 unsigned int ioctl, unsigned long arg)
498{
499 if (ioctl == KVM_S390_ENABLE_SIE)
500 return s390_enable_sie();
501 return -EINVAL;
502}
503
Alexander Graf784aa3d2014-07-14 18:27:35 +0200504int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100505{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100506 int r;
507
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200508 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100509 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200510 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100511 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100512#ifdef CONFIG_KVM_S390_UCONTROL
513 case KVM_CAP_S390_UCONTROL:
514#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200515 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100516 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200517 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100518 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100519 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100520 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200521 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200522 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200523 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200524 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100525 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100526 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200527 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100528 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400529 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100530 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200531 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200532 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100533 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100534 case KVM_CAP_S390_AIS_MIGRATION:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100535 r = 1;
536 break;
Janosch Franka4499382018-07-13 11:28:31 +0100537 case KVM_CAP_S390_HPAGE_1M:
538 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100539 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100540 r = 1;
541 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100542 case KVM_CAP_S390_MEM_OP:
543 r = MEM_OP_MAX_SIZE;
544 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200545 case KVM_CAP_NR_VCPUS:
546 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200547 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100548 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200549 if (!kvm_s390_use_sca_entries())
550 r = KVM_MAX_VCPUS;
551 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100552 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200553 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200554 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100555 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200556 break;
Eric Farman68c55752014-06-09 10:57:26 -0400557 case KVM_CAP_S390_VECTOR_REGISTERS:
558 r = MACHINE_HAS_VX;
559 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800560 case KVM_CAP_S390_RI:
561 r = test_facility(64);
562 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100563 case KVM_CAP_S390_GS:
564 r = test_facility(133);
565 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100566 case KVM_CAP_S390_BPB:
567 r = test_facility(82);
568 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200569 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100570 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200571 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100572 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100573}
574
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400575static void kvm_s390_sync_dirty_log(struct kvm *kvm,
Janosch Frank0959e162018-07-17 13:21:22 +0100576 struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400577{
Janosch Frank0959e162018-07-17 13:21:22 +0100578 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400579 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100580 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400581 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100582 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400583
Janosch Frank0959e162018-07-17 13:21:22 +0100584 /* Loop over all guest segments */
585 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400586 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100587 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
588 gaddr = gfn_to_gpa(cur_gfn);
589 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
590 if (kvm_is_error_hva(vmaddr))
591 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400592
Janosch Frank0959e162018-07-17 13:21:22 +0100593 bitmap_zero(bitmap, _PAGE_ENTRIES);
594 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
595 for (i = 0; i < _PAGE_ENTRIES; i++) {
596 if (test_bit(i, bitmap))
597 mark_page_dirty(kvm, cur_gfn + i);
598 }
599
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100600 if (fatal_signal_pending(current))
601 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100602 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400603 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400604}
605
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100606/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200607static void sca_del_vcpu(struct kvm_vcpu *vcpu);
608
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100609/*
610 * Get (and clear) the dirty memory log for a memory slot.
611 */
612int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
613 struct kvm_dirty_log *log)
614{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400615 int r;
616 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200617 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400618 struct kvm_memory_slot *memslot;
619 int is_dirty = 0;
620
Janosch Franke1e8a962017-02-02 16:39:31 +0100621 if (kvm_is_ucontrol(kvm))
622 return -EINVAL;
623
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400624 mutex_lock(&kvm->slots_lock);
625
626 r = -EINVAL;
627 if (log->slot >= KVM_USER_MEM_SLOTS)
628 goto out;
629
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200630 slots = kvm_memslots(kvm);
631 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400632 r = -ENOENT;
633 if (!memslot->dirty_bitmap)
634 goto out;
635
636 kvm_s390_sync_dirty_log(kvm, memslot);
637 r = kvm_get_dirty_log(kvm, log, &is_dirty);
638 if (r)
639 goto out;
640
641 /* Clear the dirty log */
642 if (is_dirty) {
643 n = kvm_dirty_bitmap_bytes(memslot);
644 memset(memslot->dirty_bitmap, 0, n);
645 }
646 r = 0;
647out:
648 mutex_unlock(&kvm->slots_lock);
649 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100650}
651
David Hildenbrand6502a342016-06-21 14:19:51 +0200652static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
653{
654 unsigned int i;
655 struct kvm_vcpu *vcpu;
656
657 kvm_for_each_vcpu(i, vcpu, kvm) {
658 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
659 }
660}
661
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100662int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200663{
664 int r;
665
666 if (cap->flags)
667 return -EINVAL;
668
669 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200670 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200671 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200672 kvm->arch.use_irqchip = 1;
673 r = 0;
674 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200675 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200676 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200677 kvm->arch.user_sigp = 1;
678 r = 0;
679 break;
Eric Farman68c55752014-06-09 10:57:26 -0400680 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100681 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200682 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100683 r = -EBUSY;
684 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100685 set_kvm_facility(kvm->arch.model.fac_mask, 129);
686 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200687 if (test_facility(134)) {
688 set_kvm_facility(kvm->arch.model.fac_mask, 134);
689 set_kvm_facility(kvm->arch.model.fac_list, 134);
690 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100691 if (test_facility(135)) {
692 set_kvm_facility(kvm->arch.model.fac_mask, 135);
693 set_kvm_facility(kvm->arch.model.fac_list, 135);
694 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100695 if (test_facility(148)) {
696 set_kvm_facility(kvm->arch.model.fac_mask, 148);
697 set_kvm_facility(kvm->arch.model.fac_list, 148);
698 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100699 if (test_facility(152)) {
700 set_kvm_facility(kvm->arch.model.fac_mask, 152);
701 set_kvm_facility(kvm->arch.model.fac_list, 152);
702 }
Michael Mueller18280d82015-03-16 16:05:41 +0100703 r = 0;
704 } else
705 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100706 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200707 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
708 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400709 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800710 case KVM_CAP_S390_RI:
711 r = -EINVAL;
712 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200713 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800714 r = -EBUSY;
715 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100716 set_kvm_facility(kvm->arch.model.fac_mask, 64);
717 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800718 r = 0;
719 }
720 mutex_unlock(&kvm->lock);
721 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
722 r ? "(not available)" : "(success)");
723 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100724 case KVM_CAP_S390_AIS:
725 mutex_lock(&kvm->lock);
726 if (kvm->created_vcpus) {
727 r = -EBUSY;
728 } else {
729 set_kvm_facility(kvm->arch.model.fac_mask, 72);
730 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100731 r = 0;
732 }
733 mutex_unlock(&kvm->lock);
734 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
735 r ? "(not available)" : "(success)");
736 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100737 case KVM_CAP_S390_GS:
738 r = -EINVAL;
739 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100740 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100741 r = -EBUSY;
742 } else if (test_facility(133)) {
743 set_kvm_facility(kvm->arch.model.fac_mask, 133);
744 set_kvm_facility(kvm->arch.model.fac_list, 133);
745 r = 0;
746 }
747 mutex_unlock(&kvm->lock);
748 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
749 r ? "(not available)" : "(success)");
750 break;
Janosch Franka4499382018-07-13 11:28:31 +0100751 case KVM_CAP_S390_HPAGE_1M:
752 mutex_lock(&kvm->lock);
753 if (kvm->created_vcpus)
754 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100755 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100756 r = -EINVAL;
757 else {
758 r = 0;
Janosch Frankdf88f312018-08-30 16:14:18 +0200759 down_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100760 kvm->mm->context.allow_gmap_hpage_1m = 1;
Janosch Frankdf88f312018-08-30 16:14:18 +0200761 up_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100762 /*
763 * We might have to create fake 4k page
764 * tables. To avoid that the hardware works on
765 * stale PGSTEs, we emulate these instructions.
766 */
767 kvm->arch.use_skf = 0;
768 kvm->arch.use_pfmfi = 0;
769 }
770 mutex_unlock(&kvm->lock);
771 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
772 r ? "(not available)" : "(success)");
773 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100774 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200775 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100776 kvm->arch.user_stsi = 1;
777 r = 0;
778 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200779 case KVM_CAP_S390_USER_INSTR0:
780 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
781 kvm->arch.user_instr0 = 1;
782 icpt_operexc_on_all_vcpus(kvm);
783 r = 0;
784 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200785 default:
786 r = -EINVAL;
787 break;
788 }
789 return r;
790}
791
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100792static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
793{
794 int ret;
795
796 switch (attr->attr) {
797 case KVM_S390_VM_MEM_LIMIT_SIZE:
798 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200799 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100800 kvm->arch.mem_limit);
801 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100802 ret = -EFAULT;
803 break;
804 default:
805 ret = -ENXIO;
806 break;
807 }
808 return ret;
809}
810
811static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200812{
813 int ret;
814 unsigned int idx;
815 switch (attr->attr) {
816 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100817 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100818 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200819 break;
820
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200821 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200822 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100823 if (kvm->created_vcpus)
824 ret = -EBUSY;
825 else if (kvm->mm->context.allow_gmap_hpage_1m)
826 ret = -EINVAL;
827 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200828 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100829 /* Not compatible with cmma. */
830 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200831 ret = 0;
832 }
833 mutex_unlock(&kvm->lock);
834 break;
835 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100836 ret = -ENXIO;
837 if (!sclp.has_cmma)
838 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200839 ret = -EINVAL;
840 if (!kvm->arch.use_cmma)
841 break;
842
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200843 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200844 mutex_lock(&kvm->lock);
845 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200846 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200847 srcu_read_unlock(&kvm->srcu, idx);
848 mutex_unlock(&kvm->lock);
849 ret = 0;
850 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100851 case KVM_S390_VM_MEM_LIMIT_SIZE: {
852 unsigned long new_limit;
853
854 if (kvm_is_ucontrol(kvm))
855 return -EINVAL;
856
857 if (get_user(new_limit, (u64 __user *)attr->addr))
858 return -EFAULT;
859
Dominik Dingela3a92c32014-12-01 17:24:42 +0100860 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
861 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100862 return -E2BIG;
863
Dominik Dingela3a92c32014-12-01 17:24:42 +0100864 if (!new_limit)
865 return -EINVAL;
866
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100867 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100868 if (new_limit != KVM_S390_NO_MEM_LIMIT)
869 new_limit -= 1;
870
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100871 ret = -EBUSY;
872 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200873 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100874 /* gmap_create will round the limit up */
875 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100876
877 if (!new) {
878 ret = -ENOMEM;
879 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100880 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100881 new->private = kvm;
882 kvm->arch.gmap = new;
883 ret = 0;
884 }
885 }
886 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100887 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
888 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
889 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100890 break;
891 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200892 default:
893 ret = -ENXIO;
894 break;
895 }
896 return ret;
897}
898
Tony Krowiaka374e892014-09-03 10:13:53 +0200899static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
900
Tony Krowiak20c922f2018-04-22 11:37:03 -0400901void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200902{
903 struct kvm_vcpu *vcpu;
904 int i;
905
Tony Krowiak20c922f2018-04-22 11:37:03 -0400906 kvm_s390_vcpu_block_all(kvm);
907
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400908 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400909 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400910 /* recreate the shadow crycb by leaving the VSIE handler */
911 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
912 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400913
914 kvm_s390_vcpu_unblock_all(kvm);
915}
916
917static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
918{
Tony Krowiaka374e892014-09-03 10:13:53 +0200919 mutex_lock(&kvm->lock);
920 switch (attr->attr) {
921 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200922 if (!test_kvm_facility(kvm, 76)) {
923 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400924 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200925 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200926 get_random_bytes(
927 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
928 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
929 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200930 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200931 break;
932 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200933 if (!test_kvm_facility(kvm, 76)) {
934 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400935 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200936 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200937 get_random_bytes(
938 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
939 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
940 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200941 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200942 break;
943 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200944 if (!test_kvm_facility(kvm, 76)) {
945 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400946 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200947 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200948 kvm->arch.crypto.aes_kw = 0;
949 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
950 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200951 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200952 break;
953 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200954 if (!test_kvm_facility(kvm, 76)) {
955 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400956 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200957 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200958 kvm->arch.crypto.dea_kw = 0;
959 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
960 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200961 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200962 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400963 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
964 if (!ap_instructions_available()) {
965 mutex_unlock(&kvm->lock);
966 return -EOPNOTSUPP;
967 }
968 kvm->arch.crypto.apie = 1;
969 break;
970 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
971 if (!ap_instructions_available()) {
972 mutex_unlock(&kvm->lock);
973 return -EOPNOTSUPP;
974 }
975 kvm->arch.crypto.apie = 0;
976 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200977 default:
978 mutex_unlock(&kvm->lock);
979 return -ENXIO;
980 }
981
Tony Krowiak20c922f2018-04-22 11:37:03 -0400982 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200983 mutex_unlock(&kvm->lock);
984 return 0;
985}
986
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200987static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
988{
989 int cx;
990 struct kvm_vcpu *vcpu;
991
992 kvm_for_each_vcpu(cx, vcpu, kvm)
993 kvm_s390_sync_request(req, vcpu);
994}
995
996/*
997 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100998 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200999 */
1000static int kvm_s390_vm_start_migration(struct kvm *kvm)
1001{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001002 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001003 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001004 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001005 int slotnr;
1006
1007 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001008 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001009 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001010 slots = kvm_memslots(kvm);
1011 if (!slots || !slots->used_slots)
1012 return -EINVAL;
1013
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001014 if (!kvm->arch.use_cmma) {
1015 kvm->arch.migration_mode = 1;
1016 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001017 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001018 /* mark all the pages in active slots as dirty */
1019 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1020 ms = slots->memslots + slotnr;
1021 /*
1022 * The second half of the bitmap is only used on x86,
1023 * and would be wasted otherwise, so we put it to good
1024 * use here to keep track of the state of the storage
1025 * attributes.
1026 */
1027 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1028 ram_pages += ms->npages;
1029 }
1030 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1031 kvm->arch.migration_mode = 1;
1032 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001033 return 0;
1034}
1035
1036/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001037 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001038 * kvm_s390_vm_start_migration.
1039 */
1040static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1041{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001042 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001043 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001044 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001045 kvm->arch.migration_mode = 0;
1046 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001047 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001048 return 0;
1049}
1050
1051static int kvm_s390_vm_set_migration(struct kvm *kvm,
1052 struct kvm_device_attr *attr)
1053{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001054 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001055
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001056 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001057 switch (attr->attr) {
1058 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001059 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001060 break;
1061 case KVM_S390_VM_MIGRATION_STOP:
1062 res = kvm_s390_vm_stop_migration(kvm);
1063 break;
1064 default:
1065 break;
1066 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001067 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001068
1069 return res;
1070}
1071
1072static int kvm_s390_vm_get_migration(struct kvm *kvm,
1073 struct kvm_device_attr *attr)
1074{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001075 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001076
1077 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1078 return -ENXIO;
1079
1080 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1081 return -EFAULT;
1082 return 0;
1083}
1084
Collin L. Walling8fa16962016-07-26 15:29:44 -04001085static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1086{
1087 struct kvm_s390_vm_tod_clock gtod;
1088
1089 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1090 return -EFAULT;
1091
David Hildenbrand0e7def52018-02-07 12:46:43 +01001092 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001093 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001094 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001095
1096 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1097 gtod.epoch_idx, gtod.tod);
1098
1099 return 0;
1100}
1101
Jason J. Herne72f25022014-11-25 09:46:02 -05001102static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1103{
1104 u8 gtod_high;
1105
1106 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1107 sizeof(gtod_high)))
1108 return -EFAULT;
1109
1110 if (gtod_high != 0)
1111 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001112 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001113
1114 return 0;
1115}
1116
1117static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1118{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001119 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001120
David Hildenbrand0e7def52018-02-07 12:46:43 +01001121 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1122 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001123 return -EFAULT;
1124
David Hildenbrand0e7def52018-02-07 12:46:43 +01001125 kvm_s390_set_tod_clock(kvm, &gtod);
1126 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001127 return 0;
1128}
1129
1130static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1131{
1132 int ret;
1133
1134 if (attr->flags)
1135 return -EINVAL;
1136
1137 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001138 case KVM_S390_VM_TOD_EXT:
1139 ret = kvm_s390_set_tod_ext(kvm, attr);
1140 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001141 case KVM_S390_VM_TOD_HIGH:
1142 ret = kvm_s390_set_tod_high(kvm, attr);
1143 break;
1144 case KVM_S390_VM_TOD_LOW:
1145 ret = kvm_s390_set_tod_low(kvm, attr);
1146 break;
1147 default:
1148 ret = -ENXIO;
1149 break;
1150 }
1151 return ret;
1152}
1153
David Hildenbrand33d1b272018-04-27 14:36:13 +02001154static void kvm_s390_get_tod_clock(struct kvm *kvm,
1155 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001156{
1157 struct kvm_s390_tod_clock_ext htod;
1158
1159 preempt_disable();
1160
1161 get_tod_clock_ext((char *)&htod);
1162
1163 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001164 gtod->epoch_idx = 0;
1165 if (test_kvm_facility(kvm, 139)) {
1166 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1167 if (gtod->tod < htod.tod)
1168 gtod->epoch_idx += 1;
1169 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001170
1171 preempt_enable();
1172}
1173
1174static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1175{
1176 struct kvm_s390_vm_tod_clock gtod;
1177
1178 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001179 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001180 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1181 return -EFAULT;
1182
1183 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1184 gtod.epoch_idx, gtod.tod);
1185 return 0;
1186}
1187
Jason J. Herne72f25022014-11-25 09:46:02 -05001188static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1189{
1190 u8 gtod_high = 0;
1191
1192 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1193 sizeof(gtod_high)))
1194 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001195 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001196
1197 return 0;
1198}
1199
1200static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1201{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001202 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001203
David Hildenbrand60417fc2015-09-29 16:20:36 +02001204 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001205 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1206 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001207 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001208
1209 return 0;
1210}
1211
1212static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1213{
1214 int ret;
1215
1216 if (attr->flags)
1217 return -EINVAL;
1218
1219 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001220 case KVM_S390_VM_TOD_EXT:
1221 ret = kvm_s390_get_tod_ext(kvm, attr);
1222 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001223 case KVM_S390_VM_TOD_HIGH:
1224 ret = kvm_s390_get_tod_high(kvm, attr);
1225 break;
1226 case KVM_S390_VM_TOD_LOW:
1227 ret = kvm_s390_get_tod_low(kvm, attr);
1228 break;
1229 default:
1230 ret = -ENXIO;
1231 break;
1232 }
1233 return ret;
1234}
1235
Michael Mueller658b6ed2015-02-02 15:49:35 +01001236static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1237{
1238 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001239 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001240 int ret = 0;
1241
1242 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001243 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001244 ret = -EBUSY;
1245 goto out;
1246 }
1247 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1248 if (!proc) {
1249 ret = -ENOMEM;
1250 goto out;
1251 }
1252 if (!copy_from_user(proc, (void __user *)attr->addr,
1253 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001254 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001255 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1256 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001257 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001258 if (proc->ibc > unblocked_ibc)
1259 kvm->arch.model.ibc = unblocked_ibc;
1260 else if (proc->ibc < lowest_ibc)
1261 kvm->arch.model.ibc = lowest_ibc;
1262 else
1263 kvm->arch.model.ibc = proc->ibc;
1264 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001265 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001266 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001267 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1268 kvm->arch.model.ibc,
1269 kvm->arch.model.cpuid);
1270 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1271 kvm->arch.model.fac_list[0],
1272 kvm->arch.model.fac_list[1],
1273 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001274 } else
1275 ret = -EFAULT;
1276 kfree(proc);
1277out:
1278 mutex_unlock(&kvm->lock);
1279 return ret;
1280}
1281
David Hildenbrand15c97052015-03-19 17:36:43 +01001282static int kvm_s390_set_processor_feat(struct kvm *kvm,
1283 struct kvm_device_attr *attr)
1284{
1285 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001286
1287 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1288 return -EFAULT;
1289 if (!bitmap_subset((unsigned long *) data.feat,
1290 kvm_s390_available_cpu_feat,
1291 KVM_S390_VM_CPU_FEAT_NR_BITS))
1292 return -EINVAL;
1293
1294 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001295 if (kvm->created_vcpus) {
1296 mutex_unlock(&kvm->lock);
1297 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001298 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001299 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1300 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001301 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001302 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1303 data.feat[0],
1304 data.feat[1],
1305 data.feat[2]);
1306 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001307}
1308
David Hildenbrand0a763c72016-05-18 16:03:47 +02001309static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1310 struct kvm_device_attr *attr)
1311{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001312 mutex_lock(&kvm->lock);
1313 if (kvm->created_vcpus) {
1314 mutex_unlock(&kvm->lock);
1315 return -EBUSY;
1316 }
1317
1318 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1319 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1320 mutex_unlock(&kvm->lock);
1321 return -EFAULT;
1322 }
1323 mutex_unlock(&kvm->lock);
1324
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001325 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1326 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1327 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1328 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1329 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1330 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1331 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1332 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1333 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1334 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1335 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1336 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1337 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1338 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1339 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1340 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1341 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1342 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1343 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1344 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1345 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1346 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1347 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1348 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1349 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1350 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1351 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1352 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1353 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1354 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1355 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1356 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1357 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1358 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1359 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1360 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1361 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1362 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1363 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1364 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1365 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1366 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1367 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1368 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001369 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1370 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1371 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001372 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1373 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1374 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1375 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1376 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001377 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1378 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1379 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1380 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1381 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001382
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001383 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001384}
1385
Michael Mueller658b6ed2015-02-02 15:49:35 +01001386static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1387{
1388 int ret = -ENXIO;
1389
1390 switch (attr->attr) {
1391 case KVM_S390_VM_CPU_PROCESSOR:
1392 ret = kvm_s390_set_processor(kvm, attr);
1393 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001394 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1395 ret = kvm_s390_set_processor_feat(kvm, attr);
1396 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001397 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1398 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1399 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001400 }
1401 return ret;
1402}
1403
1404static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1405{
1406 struct kvm_s390_vm_cpu_processor *proc;
1407 int ret = 0;
1408
1409 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1410 if (!proc) {
1411 ret = -ENOMEM;
1412 goto out;
1413 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001414 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001415 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001416 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1417 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001418 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1419 kvm->arch.model.ibc,
1420 kvm->arch.model.cpuid);
1421 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1422 kvm->arch.model.fac_list[0],
1423 kvm->arch.model.fac_list[1],
1424 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001425 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1426 ret = -EFAULT;
1427 kfree(proc);
1428out:
1429 return ret;
1430}
1431
1432static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1433{
1434 struct kvm_s390_vm_cpu_machine *mach;
1435 int ret = 0;
1436
1437 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1438 if (!mach) {
1439 ret = -ENOMEM;
1440 goto out;
1441 }
1442 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001443 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001444 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001445 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001446 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001447 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001448 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1449 kvm->arch.model.ibc,
1450 kvm->arch.model.cpuid);
1451 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1452 mach->fac_mask[0],
1453 mach->fac_mask[1],
1454 mach->fac_mask[2]);
1455 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1456 mach->fac_list[0],
1457 mach->fac_list[1],
1458 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001459 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1460 ret = -EFAULT;
1461 kfree(mach);
1462out:
1463 return ret;
1464}
1465
David Hildenbrand15c97052015-03-19 17:36:43 +01001466static int kvm_s390_get_processor_feat(struct kvm *kvm,
1467 struct kvm_device_attr *attr)
1468{
1469 struct kvm_s390_vm_cpu_feat data;
1470
1471 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1472 KVM_S390_VM_CPU_FEAT_NR_BITS);
1473 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1474 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001475 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1476 data.feat[0],
1477 data.feat[1],
1478 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001479 return 0;
1480}
1481
1482static int kvm_s390_get_machine_feat(struct kvm *kvm,
1483 struct kvm_device_attr *attr)
1484{
1485 struct kvm_s390_vm_cpu_feat data;
1486
1487 bitmap_copy((unsigned long *) data.feat,
1488 kvm_s390_available_cpu_feat,
1489 KVM_S390_VM_CPU_FEAT_NR_BITS);
1490 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1491 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001492 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1493 data.feat[0],
1494 data.feat[1],
1495 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001496 return 0;
1497}
1498
David Hildenbrand0a763c72016-05-18 16:03:47 +02001499static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1500 struct kvm_device_attr *attr)
1501{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001502 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1503 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1504 return -EFAULT;
1505
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001506 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1507 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1508 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1509 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1510 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1511 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1512 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1513 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1514 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1515 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1516 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1517 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1518 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1519 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1520 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1521 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1522 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1523 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1524 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1525 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1526 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1527 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1528 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1529 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1530 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1531 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1532 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1533 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1534 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1535 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1536 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1537 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1538 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1539 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1540 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1541 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1542 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1543 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1544 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1545 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1546 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1547 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1548 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1549 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001550 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1551 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1552 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001553 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1554 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1555 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1556 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1557 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001558 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1559 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1560 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1561 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1562 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001563
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001564 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001565}
1566
1567static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1568 struct kvm_device_attr *attr)
1569{
1570 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1571 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1572 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001573
1574 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1575 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1576 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1577 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1578 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1579 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1580 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1581 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1582 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1583 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1584 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1585 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1586 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1587 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1588 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1589 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1590 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1591 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1592 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1593 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1594 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1595 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1596 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1597 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1598 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1599 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1600 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1601 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1602 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1603 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1604 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1605 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1606 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1607 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1608 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1609 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1610 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1611 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1612 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1613 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1614 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1615 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1616 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1617 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001618 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1619 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1620 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001621 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1622 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1623 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1624 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1625 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001626 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1627 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1628 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1629 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1630 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001631
David Hildenbrand0a763c72016-05-18 16:03:47 +02001632 return 0;
1633}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001634
Michael Mueller658b6ed2015-02-02 15:49:35 +01001635static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1636{
1637 int ret = -ENXIO;
1638
1639 switch (attr->attr) {
1640 case KVM_S390_VM_CPU_PROCESSOR:
1641 ret = kvm_s390_get_processor(kvm, attr);
1642 break;
1643 case KVM_S390_VM_CPU_MACHINE:
1644 ret = kvm_s390_get_machine(kvm, attr);
1645 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001646 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1647 ret = kvm_s390_get_processor_feat(kvm, attr);
1648 break;
1649 case KVM_S390_VM_CPU_MACHINE_FEAT:
1650 ret = kvm_s390_get_machine_feat(kvm, attr);
1651 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001652 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1653 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1654 break;
1655 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1656 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1657 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001658 }
1659 return ret;
1660}
1661
Dominik Dingelf2061652014-04-09 13:13:00 +02001662static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1663{
1664 int ret;
1665
1666 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001667 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001668 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001669 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001670 case KVM_S390_VM_TOD:
1671 ret = kvm_s390_set_tod(kvm, attr);
1672 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001673 case KVM_S390_VM_CPU_MODEL:
1674 ret = kvm_s390_set_cpu_model(kvm, attr);
1675 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001676 case KVM_S390_VM_CRYPTO:
1677 ret = kvm_s390_vm_set_crypto(kvm, attr);
1678 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001679 case KVM_S390_VM_MIGRATION:
1680 ret = kvm_s390_vm_set_migration(kvm, attr);
1681 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001682 default:
1683 ret = -ENXIO;
1684 break;
1685 }
1686
1687 return ret;
1688}
1689
1690static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1691{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001692 int ret;
1693
1694 switch (attr->group) {
1695 case KVM_S390_VM_MEM_CTRL:
1696 ret = kvm_s390_get_mem_control(kvm, attr);
1697 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001698 case KVM_S390_VM_TOD:
1699 ret = kvm_s390_get_tod(kvm, attr);
1700 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001701 case KVM_S390_VM_CPU_MODEL:
1702 ret = kvm_s390_get_cpu_model(kvm, attr);
1703 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001704 case KVM_S390_VM_MIGRATION:
1705 ret = kvm_s390_vm_get_migration(kvm, attr);
1706 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001707 default:
1708 ret = -ENXIO;
1709 break;
1710 }
1711
1712 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001713}
1714
1715static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1716{
1717 int ret;
1718
1719 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001720 case KVM_S390_VM_MEM_CTRL:
1721 switch (attr->attr) {
1722 case KVM_S390_VM_MEM_ENABLE_CMMA:
1723 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001724 ret = sclp.has_cmma ? 0 : -ENXIO;
1725 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001726 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001727 ret = 0;
1728 break;
1729 default:
1730 ret = -ENXIO;
1731 break;
1732 }
1733 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001734 case KVM_S390_VM_TOD:
1735 switch (attr->attr) {
1736 case KVM_S390_VM_TOD_LOW:
1737 case KVM_S390_VM_TOD_HIGH:
1738 ret = 0;
1739 break;
1740 default:
1741 ret = -ENXIO;
1742 break;
1743 }
1744 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001745 case KVM_S390_VM_CPU_MODEL:
1746 switch (attr->attr) {
1747 case KVM_S390_VM_CPU_PROCESSOR:
1748 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001749 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1750 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001751 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001752 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001753 ret = 0;
1754 break;
1755 default:
1756 ret = -ENXIO;
1757 break;
1758 }
1759 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001760 case KVM_S390_VM_CRYPTO:
1761 switch (attr->attr) {
1762 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1763 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1764 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1765 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1766 ret = 0;
1767 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001768 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1769 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1770 ret = ap_instructions_available() ? 0 : -ENXIO;
1771 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001772 default:
1773 ret = -ENXIO;
1774 break;
1775 }
1776 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001777 case KVM_S390_VM_MIGRATION:
1778 ret = 0;
1779 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001780 default:
1781 ret = -ENXIO;
1782 break;
1783 }
1784
1785 return ret;
1786}
1787
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001788static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1789{
1790 uint8_t *keys;
1791 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001792 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001793
1794 if (args->flags != 0)
1795 return -EINVAL;
1796
1797 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001798 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001799 return KVM_S390_GET_SKEYS_NONE;
1800
1801 /* Enforce sane limit on memory allocation */
1802 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1803 return -EINVAL;
1804
Michal Hocko752ade62017-05-08 15:57:27 -07001805 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001806 if (!keys)
1807 return -ENOMEM;
1808
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001809 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001810 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001811 for (i = 0; i < args->count; i++) {
1812 hva = gfn_to_hva(kvm, args->start_gfn + i);
1813 if (kvm_is_error_hva(hva)) {
1814 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001815 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001816 }
1817
David Hildenbrand154c8c12016-05-09 11:22:34 +02001818 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1819 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001820 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001821 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001822 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001823 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001824
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001825 if (!r) {
1826 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1827 sizeof(uint8_t) * args->count);
1828 if (r)
1829 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001830 }
1831
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001832 kvfree(keys);
1833 return r;
1834}
1835
1836static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1837{
1838 uint8_t *keys;
1839 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001840 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001841 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001842
1843 if (args->flags != 0)
1844 return -EINVAL;
1845
1846 /* Enforce sane limit on memory allocation */
1847 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1848 return -EINVAL;
1849
Michal Hocko752ade62017-05-08 15:57:27 -07001850 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001851 if (!keys)
1852 return -ENOMEM;
1853
1854 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1855 sizeof(uint8_t) * args->count);
1856 if (r) {
1857 r = -EFAULT;
1858 goto out;
1859 }
1860
1861 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001862 r = s390_enable_skey();
1863 if (r)
1864 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001865
Janosch Frankbd096f62018-07-18 13:40:22 +01001866 i = 0;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001867 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001868 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001869 while (i < args->count) {
1870 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001871 hva = gfn_to_hva(kvm, args->start_gfn + i);
1872 if (kvm_is_error_hva(hva)) {
1873 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001874 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001875 }
1876
1877 /* Lowest order bit is reserved */
1878 if (keys[i] & 0x01) {
1879 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001880 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001881 }
1882
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001883 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001884 if (r) {
1885 r = fixup_user_fault(current, current->mm, hva,
1886 FAULT_FLAG_WRITE, &unlocked);
1887 if (r)
1888 break;
1889 }
1890 if (!r)
1891 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001892 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001893 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001894 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001895out:
1896 kvfree(keys);
1897 return r;
1898}
1899
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001900/*
1901 * Base address and length must be sent at the start of each block, therefore
1902 * it's cheaper to send some clean data, as long as it's less than the size of
1903 * two longs.
1904 */
1905#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1906/* for consistency */
1907#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1908
1909/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001910 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1911 * address falls in a hole. In that case the index of one of the memslots
1912 * bordering the hole is returned.
1913 */
1914static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1915{
1916 int start = 0, end = slots->used_slots;
1917 int slot = atomic_read(&slots->lru_slot);
1918 struct kvm_memory_slot *memslots = slots->memslots;
1919
1920 if (gfn >= memslots[slot].base_gfn &&
1921 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1922 return slot;
1923
1924 while (start < end) {
1925 slot = start + (end - start) / 2;
1926
1927 if (gfn >= memslots[slot].base_gfn)
1928 end = slot;
1929 else
1930 start = slot + 1;
1931 }
1932
1933 if (gfn >= memslots[start].base_gfn &&
1934 gfn < memslots[start].base_gfn + memslots[start].npages) {
1935 atomic_set(&slots->lru_slot, start);
1936 }
1937
1938 return start;
1939}
1940
1941static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1942 u8 *res, unsigned long bufsize)
1943{
1944 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1945
1946 args->count = 0;
1947 while (args->count < bufsize) {
1948 hva = gfn_to_hva(kvm, cur_gfn);
1949 /*
1950 * We return an error if the first value was invalid, but we
1951 * return successfully if at least one value was copied.
1952 */
1953 if (kvm_is_error_hva(hva))
1954 return args->count ? 0 : -EFAULT;
1955 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1956 pgstev = 0;
1957 res[args->count++] = (pgstev >> 24) & 0x43;
1958 cur_gfn++;
1959 }
1960
1961 return 0;
1962}
1963
1964static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1965 unsigned long cur_gfn)
1966{
1967 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1968 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1969 unsigned long ofs = cur_gfn - ms->base_gfn;
1970
1971 if (ms->base_gfn + ms->npages <= cur_gfn) {
1972 slotidx--;
1973 /* If we are above the highest slot, wrap around */
1974 if (slotidx < 0)
1975 slotidx = slots->used_slots - 1;
1976
1977 ms = slots->memslots + slotidx;
1978 ofs = 0;
1979 }
1980 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1981 while ((slotidx > 0) && (ofs >= ms->npages)) {
1982 slotidx--;
1983 ms = slots->memslots + slotidx;
1984 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1985 }
1986 return ms->base_gfn + ofs;
1987}
1988
1989static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1990 u8 *res, unsigned long bufsize)
1991{
1992 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
1993 struct kvm_memslots *slots = kvm_memslots(kvm);
1994 struct kvm_memory_slot *ms;
1995
1996 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
1997 ms = gfn_to_memslot(kvm, cur_gfn);
1998 args->count = 0;
1999 args->start_gfn = cur_gfn;
2000 if (!ms)
2001 return 0;
2002 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2003 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2004
2005 while (args->count < bufsize) {
2006 hva = gfn_to_hva(kvm, cur_gfn);
2007 if (kvm_is_error_hva(hva))
2008 return 0;
2009 /* Decrement only if we actually flipped the bit to 0 */
2010 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2011 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2012 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2013 pgstev = 0;
2014 /* Save the value */
2015 res[args->count++] = (pgstev >> 24) & 0x43;
2016 /* If the next bit is too far away, stop. */
2017 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2018 return 0;
2019 /* If we reached the previous "next", find the next one */
2020 if (cur_gfn == next_gfn)
2021 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2022 /* Reached the end of memory or of the buffer, stop */
2023 if ((next_gfn >= mem_end) ||
2024 (next_gfn - args->start_gfn >= bufsize))
2025 return 0;
2026 cur_gfn++;
2027 /* Reached the end of the current memslot, take the next one. */
2028 if (cur_gfn - ms->base_gfn >= ms->npages) {
2029 ms = gfn_to_memslot(kvm, cur_gfn);
2030 if (!ms)
2031 return 0;
2032 }
2033 }
2034 return 0;
2035}
2036
2037/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002038 * This function searches for the next page with dirty CMMA attributes, and
2039 * saves the attributes in the buffer up to either the end of the buffer or
2040 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2041 * no trailing clean bytes are saved.
2042 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2043 * output buffer will indicate 0 as length.
2044 */
2045static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2046 struct kvm_s390_cmma_log *args)
2047{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002048 unsigned long bufsize;
2049 int srcu_idx, peek, ret;
2050 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002051
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002052 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002053 return -ENXIO;
2054 /* Invalid/unsupported flags were specified */
2055 if (args->flags & ~KVM_S390_CMMA_PEEK)
2056 return -EINVAL;
2057 /* Migration mode query, and we are not doing a migration */
2058 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002059 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002060 return -EINVAL;
2061 /* CMMA is disabled or was not used, or the buffer has length zero */
2062 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002063 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002064 memset(args, 0, sizeof(*args));
2065 return 0;
2066 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002067 /* We are not peeking, and there are no dirty pages */
2068 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2069 memset(args, 0, sizeof(*args));
2070 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002071 }
2072
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002073 values = vmalloc(bufsize);
2074 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002075 return -ENOMEM;
2076
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002077 down_read(&kvm->mm->mmap_sem);
2078 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002079 if (peek)
2080 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2081 else
2082 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002083 srcu_read_unlock(&kvm->srcu, srcu_idx);
2084 up_read(&kvm->mm->mmap_sem);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002085
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002086 if (kvm->arch.migration_mode)
2087 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2088 else
2089 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002090
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002091 if (copy_to_user((void __user *)args->values, values, args->count))
2092 ret = -EFAULT;
2093
2094 vfree(values);
2095 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002096}
2097
2098/*
2099 * This function sets the CMMA attributes for the given pages. If the input
2100 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002101 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002102 */
2103static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2104 const struct kvm_s390_cmma_log *args)
2105{
2106 unsigned long hva, mask, pgstev, i;
2107 uint8_t *bits;
2108 int srcu_idx, r = 0;
2109
2110 mask = args->mask;
2111
2112 if (!kvm->arch.use_cmma)
2113 return -ENXIO;
2114 /* invalid/unsupported flags */
2115 if (args->flags != 0)
2116 return -EINVAL;
2117 /* Enforce sane limit on memory allocation */
2118 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2119 return -EINVAL;
2120 /* Nothing to do */
2121 if (args->count == 0)
2122 return 0;
2123
Kees Cook42bc47b2018-06-12 14:27:11 -07002124 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002125 if (!bits)
2126 return -ENOMEM;
2127
2128 r = copy_from_user(bits, (void __user *)args->values, args->count);
2129 if (r) {
2130 r = -EFAULT;
2131 goto out;
2132 }
2133
2134 down_read(&kvm->mm->mmap_sem);
2135 srcu_idx = srcu_read_lock(&kvm->srcu);
2136 for (i = 0; i < args->count; i++) {
2137 hva = gfn_to_hva(kvm, args->start_gfn + i);
2138 if (kvm_is_error_hva(hva)) {
2139 r = -EFAULT;
2140 break;
2141 }
2142
2143 pgstev = bits[i];
2144 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002145 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002146 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2147 }
2148 srcu_read_unlock(&kvm->srcu, srcu_idx);
2149 up_read(&kvm->mm->mmap_sem);
2150
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002151 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002152 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002153 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002154 up_write(&kvm->mm->mmap_sem);
2155 }
2156out:
2157 vfree(bits);
2158 return r;
2159}
2160
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002161long kvm_arch_vm_ioctl(struct file *filp,
2162 unsigned int ioctl, unsigned long arg)
2163{
2164 struct kvm *kvm = filp->private_data;
2165 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002166 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002167 int r;
2168
2169 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002170 case KVM_S390_INTERRUPT: {
2171 struct kvm_s390_interrupt s390int;
2172
2173 r = -EFAULT;
2174 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2175 break;
2176 r = kvm_s390_inject_vm(kvm, &s390int);
2177 break;
2178 }
Cornelia Huck84223592013-07-15 13:36:01 +02002179 case KVM_CREATE_IRQCHIP: {
2180 struct kvm_irq_routing_entry routing;
2181
2182 r = -EINVAL;
2183 if (kvm->arch.use_irqchip) {
2184 /* Set up dummy routing. */
2185 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002186 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002187 }
2188 break;
2189 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002190 case KVM_SET_DEVICE_ATTR: {
2191 r = -EFAULT;
2192 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2193 break;
2194 r = kvm_s390_vm_set_attr(kvm, &attr);
2195 break;
2196 }
2197 case KVM_GET_DEVICE_ATTR: {
2198 r = -EFAULT;
2199 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2200 break;
2201 r = kvm_s390_vm_get_attr(kvm, &attr);
2202 break;
2203 }
2204 case KVM_HAS_DEVICE_ATTR: {
2205 r = -EFAULT;
2206 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2207 break;
2208 r = kvm_s390_vm_has_attr(kvm, &attr);
2209 break;
2210 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002211 case KVM_S390_GET_SKEYS: {
2212 struct kvm_s390_skeys args;
2213
2214 r = -EFAULT;
2215 if (copy_from_user(&args, argp,
2216 sizeof(struct kvm_s390_skeys)))
2217 break;
2218 r = kvm_s390_get_skeys(kvm, &args);
2219 break;
2220 }
2221 case KVM_S390_SET_SKEYS: {
2222 struct kvm_s390_skeys args;
2223
2224 r = -EFAULT;
2225 if (copy_from_user(&args, argp,
2226 sizeof(struct kvm_s390_skeys)))
2227 break;
2228 r = kvm_s390_set_skeys(kvm, &args);
2229 break;
2230 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002231 case KVM_S390_GET_CMMA_BITS: {
2232 struct kvm_s390_cmma_log args;
2233
2234 r = -EFAULT;
2235 if (copy_from_user(&args, argp, sizeof(args)))
2236 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002237 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002238 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002239 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002240 if (!r) {
2241 r = copy_to_user(argp, &args, sizeof(args));
2242 if (r)
2243 r = -EFAULT;
2244 }
2245 break;
2246 }
2247 case KVM_S390_SET_CMMA_BITS: {
2248 struct kvm_s390_cmma_log args;
2249
2250 r = -EFAULT;
2251 if (copy_from_user(&args, argp, sizeof(args)))
2252 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002253 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002254 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002255 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002256 break;
2257 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002258 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002259 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002260 }
2261
2262 return r;
2263}
2264
Tony Krowiak45c9b472015-01-13 11:33:26 -05002265static int kvm_s390_apxa_installed(void)
2266{
Tony Krowiake585b242018-09-25 19:16:18 -04002267 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002268
Tony Krowiake585b242018-09-25 19:16:18 -04002269 if (ap_instructions_available()) {
2270 if (ap_qci(&info) == 0)
2271 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002272 }
2273
2274 return 0;
2275}
2276
Tony Krowiake585b242018-09-25 19:16:18 -04002277/*
2278 * The format of the crypto control block (CRYCB) is specified in the 3 low
2279 * order bits of the CRYCB designation (CRYCBD) field as follows:
2280 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2281 * AP extended addressing (APXA) facility are installed.
2282 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2283 * Format 2: Both the APXA and MSAX3 facilities are installed
2284 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002285static void kvm_s390_set_crycb_format(struct kvm *kvm)
2286{
2287 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2288
Tony Krowiake585b242018-09-25 19:16:18 -04002289 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2290 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2291
2292 /* Check whether MSAX3 is installed */
2293 if (!test_kvm_facility(kvm, 76))
2294 return;
2295
Tony Krowiak45c9b472015-01-13 11:33:26 -05002296 if (kvm_s390_apxa_installed())
2297 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2298 else
2299 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2300}
2301
Pierre Morel0e237e42018-10-05 10:31:09 +02002302void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2303 unsigned long *aqm, unsigned long *adm)
2304{
2305 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2306
2307 mutex_lock(&kvm->lock);
2308 kvm_s390_vcpu_block_all(kvm);
2309
2310 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2311 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2312 memcpy(crycb->apcb1.apm, apm, 32);
2313 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2314 apm[0], apm[1], apm[2], apm[3]);
2315 memcpy(crycb->apcb1.aqm, aqm, 32);
2316 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2317 aqm[0], aqm[1], aqm[2], aqm[3]);
2318 memcpy(crycb->apcb1.adm, adm, 32);
2319 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2320 adm[0], adm[1], adm[2], adm[3]);
2321 break;
2322 case CRYCB_FORMAT1:
2323 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2324 memcpy(crycb->apcb0.apm, apm, 8);
2325 memcpy(crycb->apcb0.aqm, aqm, 2);
2326 memcpy(crycb->apcb0.adm, adm, 2);
2327 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2328 apm[0], *((unsigned short *)aqm),
2329 *((unsigned short *)adm));
2330 break;
2331 default: /* Can not happen */
2332 break;
2333 }
2334
2335 /* recreate the shadow crycb for each vcpu */
2336 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2337 kvm_s390_vcpu_unblock_all(kvm);
2338 mutex_unlock(&kvm->lock);
2339}
2340EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2341
Tony Krowiak421045982018-09-25 19:16:25 -04002342void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2343{
2344 mutex_lock(&kvm->lock);
2345 kvm_s390_vcpu_block_all(kvm);
2346
2347 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2348 sizeof(kvm->arch.crypto.crycb->apcb0));
2349 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2350 sizeof(kvm->arch.crypto.crycb->apcb1));
2351
Pierre Morel0e237e42018-10-05 10:31:09 +02002352 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002353 /* recreate the shadow crycb for each vcpu */
2354 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002355 kvm_s390_vcpu_unblock_all(kvm);
2356 mutex_unlock(&kvm->lock);
2357}
2358EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2359
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002360static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002361{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002362 struct cpuid cpuid;
2363
2364 get_cpu_id(&cpuid);
2365 cpuid.version = 0xff;
2366 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002367}
2368
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002369static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002370{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002371 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002372 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002373
Tony Krowiake585b242018-09-25 19:16:18 -04002374 if (!test_kvm_facility(kvm, 76))
2375 return;
2376
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002377 /* Enable AES/DEA protected key functions by default */
2378 kvm->arch.crypto.aes_kw = 1;
2379 kvm->arch.crypto.dea_kw = 1;
2380 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2381 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2382 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2383 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002384}
2385
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002386static void sca_dispose(struct kvm *kvm)
2387{
2388 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002389 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002390 else
2391 free_page((unsigned long)(kvm->arch.sca));
2392 kvm->arch.sca = NULL;
2393}
2394
Carsten Ottee08b9632012-01-04 10:25:20 +01002395int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002396{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002397 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002398 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002399 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002400 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002401
Carsten Ottee08b9632012-01-04 10:25:20 +01002402 rc = -EINVAL;
2403#ifdef CONFIG_KVM_S390_UCONTROL
2404 if (type & ~KVM_VM_S390_UCONTROL)
2405 goto out_err;
2406 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2407 goto out_err;
2408#else
2409 if (type)
2410 goto out_err;
2411#endif
2412
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002413 rc = s390_enable_sie();
2414 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002415 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002416
Carsten Otteb2904112011-10-18 12:27:13 +02002417 rc = -ENOMEM;
2418
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002419 if (!sclp.has_64bscao)
2420 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002421 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002422 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002423 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002424 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002425 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002426 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002427 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002428 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002429 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002430 kvm->arch.sca = (struct bsca_block *)
2431 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002432 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002433
2434 sprintf(debug_name, "kvm-%u", current->pid);
2435
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002436 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002437 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002438 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002439
Michael Mueller19114be2017-05-30 14:26:02 +02002440 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002441 kvm->arch.sie_page2 =
2442 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2443 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002444 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002445
Michael Mueller25c84db2019-01-31 09:52:41 +01002446 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002447 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002448
2449 for (i = 0; i < kvm_s390_fac_size(); i++) {
2450 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2451 (kvm_s390_fac_base[i] |
2452 kvm_s390_fac_ext[i]);
2453 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2454 kvm_s390_fac_base[i];
2455 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002456 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002457
David Hildenbrand19352222017-08-29 16:31:08 +02002458 /* we are always in czam mode - even on pre z14 machines */
2459 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2460 set_kvm_facility(kvm->arch.model.fac_list, 138);
2461 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002462 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2463 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002464 if (MACHINE_HAS_TLB_GUEST) {
2465 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2466 set_kvm_facility(kvm->arch.model.fac_list, 147);
2467 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002468
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002469 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002470 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002471
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002472 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002473
Fei Li51978392017-02-17 17:06:26 +08002474 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002475 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002476 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2477 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002478 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002479 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002480
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002481 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002482 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002483
Carsten Ottee08b9632012-01-04 10:25:20 +01002484 if (type & KVM_VM_S390_UCONTROL) {
2485 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002486 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002487 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002488 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002489 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002490 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002491 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002492 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002493 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002494 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002495 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002496 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002497 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002498 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002499
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002500 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002501 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002502 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002503 kvm_s390_vsie_init(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002504 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002505 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002506
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002507 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002508out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002509 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002510 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002511 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002512 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002513 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002514}
2515
Luiz Capitulino235539b2016-09-07 14:47:23 -04002516bool kvm_arch_has_vcpu_debugfs(void)
2517{
2518 return false;
2519}
2520
2521int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2522{
2523 return 0;
2524}
2525
Christian Borntraegerd329c032008-11-26 14:50:27 +01002526void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2527{
2528 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002529 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002530 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002531 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002532 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002533 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002534
2535 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002536 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002537
Dominik Dingele6db1d62015-05-07 15:41:57 +02002538 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002539 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002540 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002541
Christian Borntraeger6692cef2008-11-26 14:51:08 +01002542 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02002543 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002544}
2545
2546static void kvm_free_vcpus(struct kvm *kvm)
2547{
2548 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002549 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002550
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002551 kvm_for_each_vcpu(i, vcpu, kvm)
2552 kvm_arch_vcpu_destroy(vcpu);
2553
2554 mutex_lock(&kvm->lock);
2555 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2556 kvm->vcpus[i] = NULL;
2557
2558 atomic_set(&kvm->online_vcpus, 0);
2559 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002560}
2561
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002562void kvm_arch_destroy_vm(struct kvm *kvm)
2563{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002564 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002565 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002566 debug_unregister(kvm->arch.dbf);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002567 kvm_s390_gisa_destroy(kvm);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002568 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002569 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002570 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002571 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002572 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002573 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002574 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002575}
2576
2577/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002578static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2579{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002580 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002581 if (!vcpu->arch.gmap)
2582 return -ENOMEM;
2583 vcpu->arch.gmap->private = vcpu->kvm;
2584
2585 return 0;
2586}
2587
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002588static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2589{
David Hildenbranda6940672016-08-08 22:39:32 +02002590 if (!kvm_s390_use_sca_entries())
2591 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002592 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002593 if (vcpu->kvm->arch.use_esca) {
2594 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002595
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002596 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002597 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002598 } else {
2599 struct bsca_block *sca = vcpu->kvm->arch.sca;
2600
2601 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002602 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002603 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002604 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002605}
2606
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002607static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002608{
David Hildenbranda6940672016-08-08 22:39:32 +02002609 if (!kvm_s390_use_sca_entries()) {
2610 struct bsca_block *sca = vcpu->kvm->arch.sca;
2611
2612 /* we still need the basic sca for the ipte control */
2613 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2614 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002615 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002616 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002617 read_lock(&vcpu->kvm->arch.sca_lock);
2618 if (vcpu->kvm->arch.use_esca) {
2619 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002620
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002621 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002622 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2623 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002624 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002625 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002626 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002627 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002628
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002629 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002630 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2631 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002632 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002633 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002634 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002635}
2636
2637/* Basic SCA to Extended SCA data copy routines */
2638static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2639{
2640 d->sda = s->sda;
2641 d->sigp_ctrl.c = s->sigp_ctrl.c;
2642 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2643}
2644
2645static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2646{
2647 int i;
2648
2649 d->ipte_control = s->ipte_control;
2650 d->mcn[0] = s->mcn;
2651 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2652 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2653}
2654
2655static int sca_switch_to_extended(struct kvm *kvm)
2656{
2657 struct bsca_block *old_sca = kvm->arch.sca;
2658 struct esca_block *new_sca;
2659 struct kvm_vcpu *vcpu;
2660 unsigned int vcpu_idx;
2661 u32 scaol, scaoh;
2662
2663 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2664 if (!new_sca)
2665 return -ENOMEM;
2666
2667 scaoh = (u32)((u64)(new_sca) >> 32);
2668 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2669
2670 kvm_s390_vcpu_block_all(kvm);
2671 write_lock(&kvm->arch.sca_lock);
2672
2673 sca_copy_b_to_e(new_sca, old_sca);
2674
2675 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2676 vcpu->arch.sie_block->scaoh = scaoh;
2677 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002678 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002679 }
2680 kvm->arch.sca = new_sca;
2681 kvm->arch.use_esca = 1;
2682
2683 write_unlock(&kvm->arch.sca_lock);
2684 kvm_s390_vcpu_unblock_all(kvm);
2685
2686 free_page((unsigned long)old_sca);
2687
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002688 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2689 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002690 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002691}
2692
2693static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2694{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002695 int rc;
2696
David Hildenbranda6940672016-08-08 22:39:32 +02002697 if (!kvm_s390_use_sca_entries()) {
2698 if (id < KVM_MAX_VCPUS)
2699 return true;
2700 return false;
2701 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002702 if (id < KVM_S390_BSCA_CPU_SLOTS)
2703 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002704 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002705 return false;
2706
2707 mutex_lock(&kvm->lock);
2708 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2709 mutex_unlock(&kvm->lock);
2710
2711 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002712}
2713
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002714int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2715{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002716 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2717 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002718 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2719 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002720 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002721 KVM_SYNC_CRS |
2722 KVM_SYNC_ARCH0 |
2723 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002724 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002725 if (test_kvm_facility(vcpu->kvm, 64))
2726 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002727 if (test_kvm_facility(vcpu->kvm, 82))
2728 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002729 if (test_kvm_facility(vcpu->kvm, 133))
2730 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002731 if (test_kvm_facility(vcpu->kvm, 156))
2732 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002733 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2734 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2735 */
2736 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002737 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002738 else
2739 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002740
2741 if (kvm_is_ucontrol(vcpu->kvm))
2742 return __kvm_ucontrol_vcpu_init(vcpu);
2743
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002744 return 0;
2745}
2746
David Hildenbranddb0758b2016-02-15 09:42:25 +01002747/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2748static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2749{
2750 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002751 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002752 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002753 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002754}
2755
2756/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2757static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2758{
2759 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002760 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002761 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2762 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002763 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002764}
2765
2766/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2767static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2768{
2769 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2770 vcpu->arch.cputm_enabled = true;
2771 __start_cpu_timer_accounting(vcpu);
2772}
2773
2774/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2775static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2776{
2777 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2778 __stop_cpu_timer_accounting(vcpu);
2779 vcpu->arch.cputm_enabled = false;
2780}
2781
2782static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2783{
2784 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2785 __enable_cpu_timer_accounting(vcpu);
2786 preempt_enable();
2787}
2788
2789static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2790{
2791 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2792 __disable_cpu_timer_accounting(vcpu);
2793 preempt_enable();
2794}
2795
David Hildenbrand4287f242016-02-15 09:40:12 +01002796/* set the cpu timer - may only be called from the VCPU thread itself */
2797void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2798{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002799 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002800 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002801 if (vcpu->arch.cputm_enabled)
2802 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002803 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002804 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002805 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002806}
2807
David Hildenbranddb0758b2016-02-15 09:42:25 +01002808/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002809__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2810{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002811 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002812 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002813
2814 if (unlikely(!vcpu->arch.cputm_enabled))
2815 return vcpu->arch.sie_block->cputm;
2816
David Hildenbrand9c23a132016-02-17 21:53:33 +01002817 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2818 do {
2819 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2820 /*
2821 * If the writer would ever execute a read in the critical
2822 * section, e.g. in irq context, we have a deadlock.
2823 */
2824 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2825 value = vcpu->arch.sie_block->cputm;
2826 /* if cputm_start is 0, accounting is being started/stopped */
2827 if (likely(vcpu->arch.cputm_start))
2828 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2829 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2830 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002831 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002832}
2833
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002834void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2835{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002836
David Hildenbrand37d9df92015-03-11 16:47:33 +01002837 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002838 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002839 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002840 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002841 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002842}
2843
2844void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2845{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002846 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002847 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002848 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01002849 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002850 vcpu->arch.enabled_gmap = gmap_get_enabled();
2851 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002852
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002853}
2854
2855static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2856{
2857 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2858 vcpu->arch.sie_block->gpsw.mask = 0UL;
2859 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002860 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002861 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002862 vcpu->arch.sie_block->ckc = 0UL;
2863 vcpu->arch.sie_block->todpr = 0;
2864 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
David Hildenbrandb9224cd2018-04-30 17:55:24 +02002865 vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 |
2866 CR0_INTERRUPT_KEY_SUBMASK |
2867 CR0_MEASUREMENT_ALERT_SUBMASK;
2868 vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
2869 CR14_UNUSED_33 |
2870 CR14_EXTERNAL_DAMAGE_SUBMASK;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002871 /* make sure the new fpc will be lazily loaded */
2872 save_fpu_regs();
2873 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002874 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002875 vcpu->arch.sie_block->pp = 0;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002876 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002877 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2878 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002879 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2880 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002881 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002882}
2883
Dominik Dingel31928aa2014-12-04 15:47:07 +01002884void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002885{
Jason J. Herne72f25022014-11-25 09:46:02 -05002886 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002887 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002888 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01002889 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02002890 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002891 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002892 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002893 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002894 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002895 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002896 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2897 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002898 /* make vcpu_load load the right gmap on the first trigger */
2899 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002900}
2901
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002902static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
2903{
2904 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
2905 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
2906 return true;
2907 return false;
2908}
2909
2910static bool kvm_has_pckmo_ecc(struct kvm *kvm)
2911{
2912 /* At least one ECC subfunction must be present */
2913 return kvm_has_pckmo_subfunc(kvm, 32) ||
2914 kvm_has_pckmo_subfunc(kvm, 33) ||
2915 kvm_has_pckmo_subfunc(kvm, 34) ||
2916 kvm_has_pckmo_subfunc(kvm, 40) ||
2917 kvm_has_pckmo_subfunc(kvm, 41);
2918
2919}
2920
Tony Krowiak5102ee82014-06-27 14:46:01 -04002921static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2922{
Tony Krowiake585b242018-09-25 19:16:18 -04002923 /*
2924 * If the AP instructions are not being interpreted and the MSAX3
2925 * facility is not configured for the guest, there is nothing to set up.
2926 */
2927 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002928 return;
2929
Tony Krowiake585b242018-09-25 19:16:18 -04002930 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02002931 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04002932 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002933 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02002934
Tony Krowiake585b242018-09-25 19:16:18 -04002935 if (vcpu->kvm->arch.crypto.apie)
2936 vcpu->arch.sie_block->eca |= ECA_APIE;
2937
2938 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002939 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02002940 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002941 /* ecc is also wrapped with AES key */
2942 if (kvm_has_pckmo_ecc(vcpu->kvm))
2943 vcpu->arch.sie_block->ecd |= ECD_ECC;
2944 }
2945
Tony Krowiaka374e892014-09-03 10:13:53 +02002946 if (vcpu->kvm->arch.crypto.dea_kw)
2947 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04002948}
2949
Dominik Dingelb31605c2014-03-25 13:47:11 +01002950void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2951{
2952 free_page(vcpu->arch.sie_block->cbrlo);
2953 vcpu->arch.sie_block->cbrlo = 0;
2954}
2955
2956int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2957{
2958 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2959 if (!vcpu->arch.sie_block->cbrlo)
2960 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002961 return 0;
2962}
2963
Michael Mueller91520f12015-02-27 14:32:11 +01002964static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2965{
2966 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2967
Michael Mueller91520f12015-02-27 14:32:11 +01002968 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002969 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002970 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002971}
2972
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002973int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2974{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002975 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002976
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002977 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2978 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002979 CPUSTAT_STOPPED);
2980
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002981 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002982 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002983 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002984 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002985
Michael Mueller91520f12015-02-27 14:32:11 +01002986 kvm_s390_vcpu_setup_model(vcpu);
2987
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002988 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2989 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002990 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002991 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002992 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002993 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002994 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002995
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002996 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002997 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002998 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002999 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3000 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02003001 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003002 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02003003 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003004 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003005 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003006 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003007 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003008 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01003009 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003010 vcpu->arch.sie_block->eca |= ECA_VX;
3011 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04003012 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003013 if (test_kvm_facility(vcpu->kvm, 139))
3014 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003015 if (test_kvm_facility(vcpu->kvm, 156))
3016 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02003017 if (vcpu->arch.sie_block->gd) {
3018 vcpu->arch.sie_block->eca |= ECA_AIV;
3019 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3020 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3021 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003022 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3023 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08003024 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05003025
3026 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003027 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05003028 else
3029 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05003030
Dominik Dingele6db1d62015-05-07 15:41:57 +02003031 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01003032 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3033 if (rc)
3034 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003035 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01003036 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02003037 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01003038
Collin Walling67d49d52018-08-31 12:51:19 -04003039 vcpu->arch.sie_block->hpid = HPID_KVM;
3040
Tony Krowiak5102ee82014-06-27 14:46:01 -04003041 kvm_s390_vcpu_crypto_setup(vcpu);
3042
Dominik Dingelb31605c2014-03-25 13:47:11 +01003043 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003044}
3045
3046struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
3047 unsigned int id)
3048{
Carsten Otte4d475552011-10-18 12:27:12 +02003049 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003050 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02003051 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003052
David Hildenbrand42158252015-10-12 12:57:22 +02003053 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02003054 goto out;
3055
3056 rc = -ENOMEM;
3057
Michael Muellerb110fea2013-06-12 13:54:54 +02003058 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003059 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02003060 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003061
QingFeng Haoda72ca42017-06-07 11:41:19 +02003062 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003063 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
3064 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003065 goto out_free_cpu;
3066
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003067 vcpu->arch.sie_block = &sie_page->sie_block;
3068 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3069
David Hildenbrandefed1102015-04-16 12:32:41 +02003070 /* the real guest size will always be smaller than msl */
3071 vcpu->arch.sie_block->mso = 0;
3072 vcpu->arch.sie_block->msl = sclp.hamax;
3073
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003074 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003075 spin_lock_init(&vcpu->arch.local_int.lock);
Michael Mueller982cff42019-01-31 09:52:38 +01003076 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003077 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3078 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003079 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003080
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003081 rc = kvm_vcpu_init(vcpu, kvm, id);
3082 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003083 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01003084 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003085 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02003086 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003087
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003088 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003089out_free_sie_block:
3090 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003091out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02003092 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02003093out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003094 return ERR_PTR(rc);
3095}
3096
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003097int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3098{
David Hildenbrand9a022062014-08-05 17:40:47 +02003099 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003100}
3101
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003102bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3103{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003104 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003105}
3106
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003107void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003108{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003109 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003110 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003111}
3112
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003113void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003114{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003115 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003116}
3117
Christian Borntraeger8e236542015-04-09 13:49:04 +02003118static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3119{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003120 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003121 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003122}
3123
David Hildenbrand9ea59722018-09-25 19:16:16 -04003124bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3125{
3126 return atomic_read(&vcpu->arch.sie_block->prog20) &
3127 (PROG_BLOCK_SIE | PROG_REQUEST);
3128}
3129
Christian Borntraeger8e236542015-04-09 13:49:04 +02003130static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3131{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003132 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003133}
3134
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003135/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003136 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003137 * If the CPU is not running (e.g. waiting as idle) the function will
3138 * return immediately. */
3139void exit_sie(struct kvm_vcpu *vcpu)
3140{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003141 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003142 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003143 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3144 cpu_relax();
3145}
3146
Christian Borntraeger8e236542015-04-09 13:49:04 +02003147/* Kick a guest cpu out of SIE to process a request synchronously */
3148void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003149{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003150 kvm_make_request(req, vcpu);
3151 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003152}
3153
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003154static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3155 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003156{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003157 struct kvm *kvm = gmap->private;
3158 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003159 unsigned long prefix;
3160 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003161
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003162 if (gmap_is_shadow(gmap))
3163 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003164 if (start >= 1UL << 31)
3165 /* We are only interested in prefix pages */
3166 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003167 kvm_for_each_vcpu(i, vcpu, kvm) {
3168 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003169 prefix = kvm_s390_get_prefix(vcpu);
3170 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3171 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3172 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003173 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003174 }
3175 }
3176}
3177
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003178bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3179{
3180 /* do not poll with more than halt_poll_max_steal percent of steal time */
3181 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3182 halt_poll_max_steal) {
3183 vcpu->stat.halt_no_poll_steal++;
3184 return true;
3185 }
3186 return false;
3187}
3188
Christoffer Dallb6d33832012-03-08 16:44:24 -05003189int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3190{
3191 /* kvm common code refers to this, but never calls it */
3192 BUG();
3193 return 0;
3194}
3195
Carsten Otte14eebd92012-05-15 14:15:26 +02003196static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3197 struct kvm_one_reg *reg)
3198{
3199 int r = -EINVAL;
3200
3201 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003202 case KVM_REG_S390_TODPR:
3203 r = put_user(vcpu->arch.sie_block->todpr,
3204 (u32 __user *)reg->addr);
3205 break;
3206 case KVM_REG_S390_EPOCHDIFF:
3207 r = put_user(vcpu->arch.sie_block->epoch,
3208 (u64 __user *)reg->addr);
3209 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003210 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003211 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003212 (u64 __user *)reg->addr);
3213 break;
3214 case KVM_REG_S390_CLOCK_COMP:
3215 r = put_user(vcpu->arch.sie_block->ckc,
3216 (u64 __user *)reg->addr);
3217 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003218 case KVM_REG_S390_PFTOKEN:
3219 r = put_user(vcpu->arch.pfault_token,
3220 (u64 __user *)reg->addr);
3221 break;
3222 case KVM_REG_S390_PFCOMPARE:
3223 r = put_user(vcpu->arch.pfault_compare,
3224 (u64 __user *)reg->addr);
3225 break;
3226 case KVM_REG_S390_PFSELECT:
3227 r = put_user(vcpu->arch.pfault_select,
3228 (u64 __user *)reg->addr);
3229 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003230 case KVM_REG_S390_PP:
3231 r = put_user(vcpu->arch.sie_block->pp,
3232 (u64 __user *)reg->addr);
3233 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003234 case KVM_REG_S390_GBEA:
3235 r = put_user(vcpu->arch.sie_block->gbea,
3236 (u64 __user *)reg->addr);
3237 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003238 default:
3239 break;
3240 }
3241
3242 return r;
3243}
3244
3245static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3246 struct kvm_one_reg *reg)
3247{
3248 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003249 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003250
3251 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003252 case KVM_REG_S390_TODPR:
3253 r = get_user(vcpu->arch.sie_block->todpr,
3254 (u32 __user *)reg->addr);
3255 break;
3256 case KVM_REG_S390_EPOCHDIFF:
3257 r = get_user(vcpu->arch.sie_block->epoch,
3258 (u64 __user *)reg->addr);
3259 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003260 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003261 r = get_user(val, (u64 __user *)reg->addr);
3262 if (!r)
3263 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003264 break;
3265 case KVM_REG_S390_CLOCK_COMP:
3266 r = get_user(vcpu->arch.sie_block->ckc,
3267 (u64 __user *)reg->addr);
3268 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003269 case KVM_REG_S390_PFTOKEN:
3270 r = get_user(vcpu->arch.pfault_token,
3271 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003272 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3273 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003274 break;
3275 case KVM_REG_S390_PFCOMPARE:
3276 r = get_user(vcpu->arch.pfault_compare,
3277 (u64 __user *)reg->addr);
3278 break;
3279 case KVM_REG_S390_PFSELECT:
3280 r = get_user(vcpu->arch.pfault_select,
3281 (u64 __user *)reg->addr);
3282 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003283 case KVM_REG_S390_PP:
3284 r = get_user(vcpu->arch.sie_block->pp,
3285 (u64 __user *)reg->addr);
3286 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003287 case KVM_REG_S390_GBEA:
3288 r = get_user(vcpu->arch.sie_block->gbea,
3289 (u64 __user *)reg->addr);
3290 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003291 default:
3292 break;
3293 }
3294
3295 return r;
3296}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003297
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003298static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3299{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003300 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003301 return 0;
3302}
3303
3304int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3305{
Christoffer Dall875656f2017-12-04 21:35:27 +01003306 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003307 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003308 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003309 return 0;
3310}
3311
3312int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3313{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003314 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003315 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003316 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003317 return 0;
3318}
3319
3320int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3321 struct kvm_sregs *sregs)
3322{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003323 vcpu_load(vcpu);
3324
Christian Borntraeger59674c12012-01-11 11:20:33 +01003325 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003326 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003327
3328 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003329 return 0;
3330}
3331
3332int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3333 struct kvm_sregs *sregs)
3334{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003335 vcpu_load(vcpu);
3336
Christian Borntraeger59674c12012-01-11 11:20:33 +01003337 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003338 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003339
3340 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003341 return 0;
3342}
3343
3344int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3345{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003346 int ret = 0;
3347
3348 vcpu_load(vcpu);
3349
3350 if (test_fp_ctl(fpu->fpc)) {
3351 ret = -EINVAL;
3352 goto out;
3353 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003354 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003355 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003356 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3357 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003358 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003359 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003360
3361out:
3362 vcpu_put(vcpu);
3363 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003364}
3365
3366int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3367{
Christoffer Dall13931232017-12-04 21:35:34 +01003368 vcpu_load(vcpu);
3369
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003370 /* make sure we have the latest values */
3371 save_fpu_regs();
3372 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003373 convert_vx_to_fp((freg_t *) fpu->fprs,
3374 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003375 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003376 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003377 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003378
3379 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003380 return 0;
3381}
3382
3383static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3384{
3385 int rc = 0;
3386
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003387 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003388 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003389 else {
3390 vcpu->run->psw_mask = psw.mask;
3391 vcpu->run->psw_addr = psw.addr;
3392 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003393 return rc;
3394}
3395
3396int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3397 struct kvm_translation *tr)
3398{
3399 return -EINVAL; /* not implemented yet */
3400}
3401
David Hildenbrand27291e22014-01-23 12:26:52 +01003402#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3403 KVM_GUESTDBG_USE_HW_BP | \
3404 KVM_GUESTDBG_ENABLE)
3405
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003406int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3407 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003408{
David Hildenbrand27291e22014-01-23 12:26:52 +01003409 int rc = 0;
3410
Christoffer Dall66b56562017-12-04 21:35:33 +01003411 vcpu_load(vcpu);
3412
David Hildenbrand27291e22014-01-23 12:26:52 +01003413 vcpu->guest_debug = 0;
3414 kvm_s390_clear_bp_data(vcpu);
3415
Christoffer Dall66b56562017-12-04 21:35:33 +01003416 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3417 rc = -EINVAL;
3418 goto out;
3419 }
3420 if (!sclp.has_gpere) {
3421 rc = -EINVAL;
3422 goto out;
3423 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003424
3425 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3426 vcpu->guest_debug = dbg->control;
3427 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003428 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003429
3430 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3431 rc = kvm_s390_import_bp_data(vcpu, dbg);
3432 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003433 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003434 vcpu->arch.guestdbg.last_bp = 0;
3435 }
3436
3437 if (rc) {
3438 vcpu->guest_debug = 0;
3439 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003440 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003441 }
3442
Christoffer Dall66b56562017-12-04 21:35:33 +01003443out:
3444 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003445 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003446}
3447
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003448int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3449 struct kvm_mp_state *mp_state)
3450{
Christoffer Dallfd232562017-12-04 21:35:30 +01003451 int ret;
3452
3453 vcpu_load(vcpu);
3454
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003455 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003456 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3457 KVM_MP_STATE_OPERATING;
3458
3459 vcpu_put(vcpu);
3460 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003461}
3462
3463int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3464 struct kvm_mp_state *mp_state)
3465{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003466 int rc = 0;
3467
Christoffer Dalle83dff52017-12-04 21:35:31 +01003468 vcpu_load(vcpu);
3469
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003470 /* user space knows about this interface - let it control the state */
3471 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3472
3473 switch (mp_state->mp_state) {
3474 case KVM_MP_STATE_STOPPED:
3475 kvm_s390_vcpu_stop(vcpu);
3476 break;
3477 case KVM_MP_STATE_OPERATING:
3478 kvm_s390_vcpu_start(vcpu);
3479 break;
3480 case KVM_MP_STATE_LOAD:
3481 case KVM_MP_STATE_CHECK_STOP:
3482 /* fall through - CHECK_STOP and LOAD are not supported yet */
3483 default:
3484 rc = -ENXIO;
3485 }
3486
Christoffer Dalle83dff52017-12-04 21:35:31 +01003487 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003488 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003489}
3490
David Hildenbrand8ad35752014-03-14 11:00:21 +01003491static bool ibs_enabled(struct kvm_vcpu *vcpu)
3492{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003493 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003494}
3495
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003496static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3497{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003498retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003499 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003500 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003501 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003502 /*
3503 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003504 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003505 * This ensures that the ipte instruction for this request has
3506 * already finished. We might race against a second unmapper that
3507 * wants to set the blocking bit. Lets just retry the request loop.
3508 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003509 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003510 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003511 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3512 kvm_s390_get_prefix(vcpu),
3513 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003514 if (rc) {
3515 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003516 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003517 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003518 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003519 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003520
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003521 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3522 vcpu->arch.sie_block->ihcpu = 0xffff;
3523 goto retry;
3524 }
3525
David Hildenbrand8ad35752014-03-14 11:00:21 +01003526 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3527 if (!ibs_enabled(vcpu)) {
3528 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003529 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003530 }
3531 goto retry;
3532 }
3533
3534 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3535 if (ibs_enabled(vcpu)) {
3536 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003537 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003538 }
3539 goto retry;
3540 }
3541
David Hildenbrand6502a342016-06-21 14:19:51 +02003542 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3543 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3544 goto retry;
3545 }
3546
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003547 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3548 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003549 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003550 * instruction manually, in order to provide additional
3551 * functionalities needed for live migration.
3552 */
3553 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3554 goto retry;
3555 }
3556
3557 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3558 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003559 * Re-enable CMM virtualization if CMMA is available and
3560 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003561 */
3562 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003563 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003564 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3565 goto retry;
3566 }
3567
David Hildenbrand0759d062014-05-13 16:54:32 +02003568 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003569 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003570 /* we left the vsie handler, nothing to do, just clear the request */
3571 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003572
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003573 return 0;
3574}
3575
David Hildenbrand0e7def52018-02-07 12:46:43 +01003576void kvm_s390_set_tod_clock(struct kvm *kvm,
3577 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003578{
3579 struct kvm_vcpu *vcpu;
3580 struct kvm_s390_tod_clock_ext htod;
3581 int i;
3582
3583 mutex_lock(&kvm->lock);
3584 preempt_disable();
3585
3586 get_tod_clock_ext((char *)&htod);
3587
3588 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003589 kvm->arch.epdx = 0;
3590 if (test_kvm_facility(kvm, 139)) {
3591 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3592 if (kvm->arch.epoch > gtod->tod)
3593 kvm->arch.epdx -= 1;
3594 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003595
3596 kvm_s390_vcpu_block_all(kvm);
3597 kvm_for_each_vcpu(i, vcpu, kvm) {
3598 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3599 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3600 }
3601
3602 kvm_s390_vcpu_unblock_all(kvm);
3603 preempt_enable();
3604 mutex_unlock(&kvm->lock);
3605}
3606
Thomas Huthfa576c52014-05-06 17:20:16 +02003607/**
3608 * kvm_arch_fault_in_page - fault-in guest page if necessary
3609 * @vcpu: The corresponding virtual cpu
3610 * @gpa: Guest physical address
3611 * @writable: Whether the page should be writable or not
3612 *
3613 * Make sure that a guest page has been faulted-in on the host.
3614 *
3615 * Return: Zero on success, negative error code otherwise.
3616 */
3617long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003618{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003619 return gmap_fault(vcpu->arch.gmap, gpa,
3620 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003621}
3622
Dominik Dingel3c038e62013-10-07 17:11:48 +02003623static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3624 unsigned long token)
3625{
3626 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003627 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003628
3629 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003630 irq.u.ext.ext_params2 = token;
3631 irq.type = KVM_S390_INT_PFAULT_INIT;
3632 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003633 } else {
3634 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003635 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003636 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3637 }
3638}
3639
3640void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3641 struct kvm_async_pf *work)
3642{
3643 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3644 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3645}
3646
3647void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3648 struct kvm_async_pf *work)
3649{
3650 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3651 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3652}
3653
3654void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3655 struct kvm_async_pf *work)
3656{
3657 /* s390 will always inject the page directly */
3658}
3659
3660bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3661{
3662 /*
3663 * s390 will always inject the page directly,
3664 * but we still want check_async_completion to cleanup
3665 */
3666 return true;
3667}
3668
3669static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3670{
3671 hva_t hva;
3672 struct kvm_arch_async_pf arch;
3673 int rc;
3674
3675 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3676 return 0;
3677 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3678 vcpu->arch.pfault_compare)
3679 return 0;
3680 if (psw_extint_disabled(vcpu))
3681 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003682 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003683 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003684 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003685 return 0;
3686 if (!vcpu->arch.gmap->pfault_enabled)
3687 return 0;
3688
Heiko Carstens81480cc2014-01-01 16:36:07 +01003689 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3690 hva += current->thread.gmap_addr & ~PAGE_MASK;
3691 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003692 return 0;
3693
3694 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3695 return rc;
3696}
3697
Thomas Huth3fb4c402013-09-12 10:33:43 +02003698static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003699{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003700 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003701
Dominik Dingel3c038e62013-10-07 17:11:48 +02003702 /*
3703 * On s390 notifications for arriving pages will be delivered directly
3704 * to the guest but the house keeping for completed pfaults is
3705 * handled outside the worker.
3706 */
3707 kvm_check_async_pf_completion(vcpu);
3708
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003709 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3710 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003711
3712 if (need_resched())
3713 schedule();
3714
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003715 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003716 s390_handle_mcck();
3717
Jens Freimann79395032014-04-17 10:10:30 +02003718 if (!kvm_is_ucontrol(vcpu->kvm)) {
3719 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3720 if (rc)
3721 return rc;
3722 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003723
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003724 rc = kvm_s390_handle_requests(vcpu);
3725 if (rc)
3726 return rc;
3727
David Hildenbrand27291e22014-01-23 12:26:52 +01003728 if (guestdbg_enabled(vcpu)) {
3729 kvm_s390_backup_guest_per_regs(vcpu);
3730 kvm_s390_patch_guest_per_regs(vcpu);
3731 }
3732
Michael Mueller9f30f622019-01-31 09:52:44 +01003733 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
3734
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003735 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003736 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3737 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3738 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003739
Thomas Huth3fb4c402013-09-12 10:33:43 +02003740 return 0;
3741}
3742
Thomas Huth492d8642015-02-10 16:11:01 +01003743static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3744{
David Hildenbrand56317922016-01-12 17:37:58 +01003745 struct kvm_s390_pgm_info pgm_info = {
3746 .code = PGM_ADDRESSING,
3747 };
3748 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003749 int rc;
3750
3751 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3752 trace_kvm_s390_sie_fault(vcpu);
3753
3754 /*
3755 * We want to inject an addressing exception, which is defined as a
3756 * suppressing or terminating exception. However, since we came here
3757 * by a DAT access exception, the PSW still points to the faulting
3758 * instruction since DAT exceptions are nullifying. So we've got
3759 * to look up the current opcode to get the length of the instruction
3760 * to be able to forward the PSW.
3761 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003762 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003763 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003764 if (rc < 0) {
3765 return rc;
3766 } else if (rc) {
3767 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3768 * Forward by arbitrary ilc, injection will take care of
3769 * nullification if necessary.
3770 */
3771 pgm_info = vcpu->arch.pgm;
3772 ilen = 4;
3773 }
David Hildenbrand56317922016-01-12 17:37:58 +01003774 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3775 kvm_s390_forward_psw(vcpu, ilen);
3776 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003777}
3778
Thomas Huth3fb4c402013-09-12 10:33:43 +02003779static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3780{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003781 struct mcck_volatile_info *mcck_info;
3782 struct sie_page *sie_page;
3783
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003784 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3785 vcpu->arch.sie_block->icptcode);
3786 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3787
David Hildenbrand27291e22014-01-23 12:26:52 +01003788 if (guestdbg_enabled(vcpu))
3789 kvm_s390_restore_guest_per_regs(vcpu);
3790
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003791 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3792 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003793
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003794 if (exit_reason == -EINTR) {
3795 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3796 sie_page = container_of(vcpu->arch.sie_block,
3797 struct sie_page, sie_block);
3798 mcck_info = &sie_page->mcck_info;
3799 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3800 return 0;
3801 }
3802
David Hildenbrand71f116b2015-10-19 16:24:28 +02003803 if (vcpu->arch.sie_block->icptcode > 0) {
3804 int rc = kvm_handle_sie_intercept(vcpu);
3805
3806 if (rc != -EOPNOTSUPP)
3807 return rc;
3808 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3809 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3810 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3811 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3812 return -EREMOTE;
3813 } else if (exit_reason != -EFAULT) {
3814 vcpu->stat.exit_null++;
3815 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003816 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3817 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3818 vcpu->run->s390_ucontrol.trans_exc_code =
3819 current->thread.gmap_addr;
3820 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003821 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003822 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003823 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003824 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003825 if (kvm_arch_setup_async_pf(vcpu))
3826 return 0;
3827 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003828 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003829 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003830}
3831
3832static int __vcpu_run(struct kvm_vcpu *vcpu)
3833{
3834 int rc, exit_reason;
3835
Thomas Huth800c1062013-09-12 10:33:45 +02003836 /*
3837 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3838 * ning the guest), so that memslots (and other stuff) are protected
3839 */
3840 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3841
Thomas Hutha76ccff2013-09-12 10:33:44 +02003842 do {
3843 rc = vcpu_pre_run(vcpu);
3844 if (rc)
3845 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003846
Thomas Huth800c1062013-09-12 10:33:45 +02003847 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003848 /*
3849 * As PF_VCPU will be used in fault handler, between
3850 * guest_enter and guest_exit should be no uaccess.
3851 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003852 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003853 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003854 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003855 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003856 exit_reason = sie64a(vcpu->arch.sie_block,
3857 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003858 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003859 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003860 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003861 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003862 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003863
Thomas Hutha76ccff2013-09-12 10:33:44 +02003864 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003865 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003866
Thomas Huth800c1062013-09-12 10:33:45 +02003867 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003868 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003869}
3870
David Hildenbrandb028ee32014-07-17 10:47:43 +02003871static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3872{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003873 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003874 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003875
3876 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003877 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003878 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3879 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3880 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3881 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3882 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3883 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003884 /* some control register changes require a tlb flush */
3885 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003886 }
3887 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003888 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003889 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3890 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3891 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3892 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3893 }
3894 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3895 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3896 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3897 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003898 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3899 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003900 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003901 /*
3902 * If userspace sets the riccb (e.g. after migration) to a valid state,
3903 * we should enable RI here instead of doing the lazy enablement.
3904 */
3905 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003906 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003907 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003908 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003909 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003910 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003911 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003912 /*
3913 * If userspace sets the gscb (e.g. after migration) to non-zero,
3914 * we should enable GS here instead of doing the lazy enablement.
3915 */
3916 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3917 test_kvm_facility(vcpu->kvm, 133) &&
3918 gscb->gssm &&
3919 !vcpu->arch.gs_enabled) {
3920 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3921 vcpu->arch.sie_block->ecb |= ECB_GS;
3922 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3923 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003924 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003925 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3926 test_kvm_facility(vcpu->kvm, 82)) {
3927 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3928 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3929 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003930 save_access_regs(vcpu->arch.host_acrs);
3931 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003932 /* save host (userspace) fprs/vrs */
3933 save_fpu_regs();
3934 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3935 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3936 if (MACHINE_HAS_VX)
3937 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3938 else
3939 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3940 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3941 if (test_fp_ctl(current->thread.fpu.fpc))
3942 /* User space provided an invalid FPC, let's clear it */
3943 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003944 if (MACHINE_HAS_GS) {
3945 preempt_disable();
3946 __ctl_set_bit(2, 4);
3947 if (current->thread.gs_cb) {
3948 vcpu->arch.host_gscb = current->thread.gs_cb;
3949 save_gs_cb(vcpu->arch.host_gscb);
3950 }
3951 if (vcpu->arch.gs_enabled) {
3952 current->thread.gs_cb = (struct gs_cb *)
3953 &vcpu->run->s.regs.gscb;
3954 restore_gs_cb(current->thread.gs_cb);
3955 }
3956 preempt_enable();
3957 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003958 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Fan Zhang80cd8762016-08-15 04:53:22 +02003959
David Hildenbrandb028ee32014-07-17 10:47:43 +02003960 kvm_run->kvm_dirty_regs = 0;
3961}
3962
3963static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3964{
3965 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3966 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3967 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3968 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003969 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003970 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3971 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3972 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3973 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3974 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3975 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3976 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003977 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003978 save_access_regs(vcpu->run->s.regs.acrs);
3979 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003980 /* Save guest register state */
3981 save_fpu_regs();
3982 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3983 /* Restore will be done lazily at return */
3984 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3985 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003986 if (MACHINE_HAS_GS) {
3987 __ctl_set_bit(2, 4);
3988 if (vcpu->arch.gs_enabled)
3989 save_gs_cb(current->thread.gs_cb);
3990 preempt_disable();
3991 current->thread.gs_cb = vcpu->arch.host_gscb;
3992 restore_gs_cb(vcpu->arch.host_gscb);
3993 preempt_enable();
3994 if (!vcpu->arch.host_gscb)
3995 __ctl_clear_bit(2, 4);
3996 vcpu->arch.host_gscb = NULL;
3997 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003998 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02003999}
4000
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004001int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4002{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004003 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004004
Paolo Bonzini460df4c2017-02-08 11:50:15 +01004005 if (kvm_run->immediate_exit)
4006 return -EINTR;
4007
Christoffer Dallaccb7572017-12-04 21:35:25 +01004008 vcpu_load(vcpu);
4009
David Hildenbrand27291e22014-01-23 12:26:52 +01004010 if (guestdbg_exit_pending(vcpu)) {
4011 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004012 rc = 0;
4013 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004014 }
4015
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004016 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004017
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004018 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4019 kvm_s390_vcpu_start(vcpu);
4020 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004021 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004022 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004023 rc = -EINVAL;
4024 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004025 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004026
David Hildenbrandb028ee32014-07-17 10:47:43 +02004027 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004028 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004029
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004030 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004031 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004032
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004033 if (signal_pending(current) && !rc) {
4034 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004035 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004036 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004037
David Hildenbrand27291e22014-01-23 12:26:52 +01004038 if (guestdbg_exit_pending(vcpu) && !rc) {
4039 kvm_s390_prepare_debug_exit(vcpu);
4040 rc = 0;
4041 }
4042
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004043 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004044 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004045 rc = 0;
4046 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004047
David Hildenbranddb0758b2016-02-15 09:42:25 +01004048 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004049 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004050
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004051 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004052
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004053 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004054out:
4055 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004056 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004057}
4058
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004059/*
4060 * store status at address
4061 * we use have two special cases:
4062 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4063 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4064 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004065int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004066{
Carsten Otte092670c2011-07-24 10:48:22 +02004067 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004068 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004069 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004070 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004071 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004072
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004073 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004074 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4075 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004076 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004077 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004078 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4079 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004080 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004081 gpa = px;
4082 } else
4083 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004084
4085 /* manually convert vector registers if necessary */
4086 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004087 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004088 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4089 fprs, 128);
4090 } else {
4091 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004092 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004093 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004094 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004095 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004096 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004097 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004098 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004099 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004100 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004101 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004102 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004103 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004104 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004105 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004106 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004107 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004108 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004109 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004110 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004111 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004112 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004113 &vcpu->arch.sie_block->gcr, 128);
4114 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004115}
4116
Thomas Huthe8798922013-11-06 15:46:33 +01004117int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4118{
4119 /*
4120 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004121 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004122 * it into the save area
4123 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004124 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004125 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004126 save_access_regs(vcpu->run->s.regs.acrs);
4127
4128 return kvm_s390_store_status_unloaded(vcpu, addr);
4129}
4130
David Hildenbrand8ad35752014-03-14 11:00:21 +01004131static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4132{
4133 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004134 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004135}
4136
4137static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4138{
4139 unsigned int i;
4140 struct kvm_vcpu *vcpu;
4141
4142 kvm_for_each_vcpu(i, vcpu, kvm) {
4143 __disable_ibs_on_vcpu(vcpu);
4144 }
4145}
4146
4147static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4148{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004149 if (!sclp.has_ibs)
4150 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004151 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004152 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004153}
4154
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004155void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
4156{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004157 int i, online_vcpus, started_vcpus = 0;
4158
4159 if (!is_vcpu_stopped(vcpu))
4160 return;
4161
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004162 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004163 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004164 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004165 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4166
4167 for (i = 0; i < online_vcpus; i++) {
4168 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4169 started_vcpus++;
4170 }
4171
4172 if (started_vcpus == 0) {
4173 /* we're the only active VCPU -> speed it up */
4174 __enable_ibs_on_vcpu(vcpu);
4175 } else if (started_vcpus == 1) {
4176 /*
4177 * As we are starting a second VCPU, we have to disable
4178 * the IBS facility on all VCPUs to remove potentially
4179 * oustanding ENABLE requests.
4180 */
4181 __disable_ibs_on_all_vcpus(vcpu->kvm);
4182 }
4183
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004184 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004185 /*
4186 * Another VCPU might have used IBS while we were offline.
4187 * Let's play safe and flush the VCPU at startup.
4188 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004189 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004190 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004191 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004192}
4193
4194void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
4195{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004196 int i, online_vcpus, started_vcpus = 0;
4197 struct kvm_vcpu *started_vcpu = NULL;
4198
4199 if (is_vcpu_stopped(vcpu))
4200 return;
4201
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004202 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004203 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004204 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004205 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4206
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004207 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004208 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004209
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004210 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004211 __disable_ibs_on_vcpu(vcpu);
4212
4213 for (i = 0; i < online_vcpus; i++) {
4214 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4215 started_vcpus++;
4216 started_vcpu = vcpu->kvm->vcpus[i];
4217 }
4218 }
4219
4220 if (started_vcpus == 1) {
4221 /*
4222 * As we only have one VCPU left, we want to enable the
4223 * IBS facility for that VCPU to speed it up.
4224 */
4225 __enable_ibs_on_vcpu(started_vcpu);
4226 }
4227
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004228 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004229 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004230}
4231
Cornelia Huckd6712df2012-12-20 15:32:11 +01004232static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4233 struct kvm_enable_cap *cap)
4234{
4235 int r;
4236
4237 if (cap->flags)
4238 return -EINVAL;
4239
4240 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004241 case KVM_CAP_S390_CSS_SUPPORT:
4242 if (!vcpu->kvm->arch.css_support) {
4243 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004244 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004245 trace_kvm_s390_enable_css(vcpu->kvm);
4246 }
4247 r = 0;
4248 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004249 default:
4250 r = -EINVAL;
4251 break;
4252 }
4253 return r;
4254}
4255
Thomas Huth41408c282015-02-06 15:01:21 +01004256static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4257 struct kvm_s390_mem_op *mop)
4258{
4259 void __user *uaddr = (void __user *)mop->buf;
4260 void *tmpbuf = NULL;
4261 int r, srcu_idx;
4262 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4263 | KVM_S390_MEMOP_F_CHECK_ONLY;
4264
4265 if (mop->flags & ~supported_flags)
4266 return -EINVAL;
4267
4268 if (mop->size > MEM_OP_MAX_SIZE)
4269 return -E2BIG;
4270
4271 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4272 tmpbuf = vmalloc(mop->size);
4273 if (!tmpbuf)
4274 return -ENOMEM;
4275 }
4276
4277 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4278
4279 switch (mop->op) {
4280 case KVM_S390_MEMOP_LOGICAL_READ:
4281 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004282 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4283 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004284 break;
4285 }
4286 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4287 if (r == 0) {
4288 if (copy_to_user(uaddr, tmpbuf, mop->size))
4289 r = -EFAULT;
4290 }
4291 break;
4292 case KVM_S390_MEMOP_LOGICAL_WRITE:
4293 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004294 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4295 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004296 break;
4297 }
4298 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4299 r = -EFAULT;
4300 break;
4301 }
4302 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4303 break;
4304 default:
4305 r = -EINVAL;
4306 }
4307
4308 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4309
4310 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4311 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4312
4313 vfree(tmpbuf);
4314 return r;
4315}
4316
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004317long kvm_arch_vcpu_async_ioctl(struct file *filp,
4318 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004319{
4320 struct kvm_vcpu *vcpu = filp->private_data;
4321 void __user *argp = (void __user *)arg;
4322
Avi Kivity93736622010-05-13 12:35:17 +03004323 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004324 case KVM_S390_IRQ: {
4325 struct kvm_s390_irq s390irq;
4326
Jens Freimann47b43c52014-11-11 20:57:06 +01004327 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004328 return -EFAULT;
4329 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004330 }
Avi Kivity93736622010-05-13 12:35:17 +03004331 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004332 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02004333 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01004334
4335 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004336 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004337 if (s390int_to_s390irq(&s390int, &s390irq))
4338 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004339 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004340 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004341 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004342 return -ENOIOCTLCMD;
4343}
4344
4345long kvm_arch_vcpu_ioctl(struct file *filp,
4346 unsigned int ioctl, unsigned long arg)
4347{
4348 struct kvm_vcpu *vcpu = filp->private_data;
4349 void __user *argp = (void __user *)arg;
4350 int idx;
4351 long r;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004352
4353 vcpu_load(vcpu);
4354
4355 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004356 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004357 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004358 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004359 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004360 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004361 case KVM_S390_SET_INITIAL_PSW: {
4362 psw_t psw;
4363
Avi Kivitybc923cc2010-05-13 12:21:46 +03004364 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004365 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004366 break;
4367 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4368 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004369 }
4370 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03004371 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4372 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004373 case KVM_SET_ONE_REG:
4374 case KVM_GET_ONE_REG: {
4375 struct kvm_one_reg reg;
4376 r = -EFAULT;
4377 if (copy_from_user(&reg, argp, sizeof(reg)))
4378 break;
4379 if (ioctl == KVM_SET_ONE_REG)
4380 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4381 else
4382 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4383 break;
4384 }
Carsten Otte27e03932012-01-04 10:25:21 +01004385#ifdef CONFIG_KVM_S390_UCONTROL
4386 case KVM_S390_UCAS_MAP: {
4387 struct kvm_s390_ucas_mapping ucasmap;
4388
4389 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4390 r = -EFAULT;
4391 break;
4392 }
4393
4394 if (!kvm_is_ucontrol(vcpu->kvm)) {
4395 r = -EINVAL;
4396 break;
4397 }
4398
4399 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4400 ucasmap.vcpu_addr, ucasmap.length);
4401 break;
4402 }
4403 case KVM_S390_UCAS_UNMAP: {
4404 struct kvm_s390_ucas_mapping ucasmap;
4405
4406 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4407 r = -EFAULT;
4408 break;
4409 }
4410
4411 if (!kvm_is_ucontrol(vcpu->kvm)) {
4412 r = -EINVAL;
4413 break;
4414 }
4415
4416 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4417 ucasmap.length);
4418 break;
4419 }
4420#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004421 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004422 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004423 break;
4424 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004425 case KVM_ENABLE_CAP:
4426 {
4427 struct kvm_enable_cap cap;
4428 r = -EFAULT;
4429 if (copy_from_user(&cap, argp, sizeof(cap)))
4430 break;
4431 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4432 break;
4433 }
Thomas Huth41408c282015-02-06 15:01:21 +01004434 case KVM_S390_MEM_OP: {
4435 struct kvm_s390_mem_op mem_op;
4436
4437 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4438 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4439 else
4440 r = -EFAULT;
4441 break;
4442 }
Jens Freimann816c7662014-11-24 17:13:46 +01004443 case KVM_S390_SET_IRQ_STATE: {
4444 struct kvm_s390_irq_state irq_state;
4445
4446 r = -EFAULT;
4447 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4448 break;
4449 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4450 irq_state.len == 0 ||
4451 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4452 r = -EINVAL;
4453 break;
4454 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004455 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004456 r = kvm_s390_set_irq_state(vcpu,
4457 (void __user *) irq_state.buf,
4458 irq_state.len);
4459 break;
4460 }
4461 case KVM_S390_GET_IRQ_STATE: {
4462 struct kvm_s390_irq_state irq_state;
4463
4464 r = -EFAULT;
4465 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4466 break;
4467 if (irq_state.len == 0) {
4468 r = -EINVAL;
4469 break;
4470 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004471 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004472 r = kvm_s390_get_irq_state(vcpu,
4473 (__u8 __user *) irq_state.buf,
4474 irq_state.len);
4475 break;
4476 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004477 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004478 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004479 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004480
4481 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004482 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004483}
4484
Souptick Joarder1499fa82018-04-19 00:49:58 +05304485vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004486{
4487#ifdef CONFIG_KVM_S390_UCONTROL
4488 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4489 && (kvm_is_ucontrol(vcpu->kvm))) {
4490 vmf->page = virt_to_page(vcpu->arch.sie_block);
4491 get_page(vmf->page);
4492 return 0;
4493 }
4494#endif
4495 return VM_FAULT_SIGBUS;
4496}
4497
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05304498int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4499 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09004500{
4501 return 0;
4502}
4503
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004504/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004505int kvm_arch_prepare_memory_region(struct kvm *kvm,
4506 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004507 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004508 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004509{
Nick Wangdd2887e2013-03-25 17:22:57 +01004510 /* A few sanity checks. We can have memory slots which have to be
4511 located/ended at a segment boundary (1MB). The memory in userland is
4512 ok to be fragmented into various different vmas. It is okay to mmap()
4513 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004514
Carsten Otte598841c2011-07-24 10:48:21 +02004515 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004516 return -EINVAL;
4517
Carsten Otte598841c2011-07-24 10:48:21 +02004518 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004519 return -EINVAL;
4520
Dominik Dingela3a92c32014-12-01 17:24:42 +01004521 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4522 return -EINVAL;
4523
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004524 return 0;
4525}
4526
4527void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004528 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004529 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004530 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004531 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004532{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004533 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004534
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004535 switch (change) {
4536 case KVM_MR_DELETE:
4537 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4538 old->npages * PAGE_SIZE);
4539 break;
4540 case KVM_MR_MOVE:
4541 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4542 old->npages * PAGE_SIZE);
4543 if (rc)
4544 break;
4545 /* FALLTHROUGH */
4546 case KVM_MR_CREATE:
4547 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4548 mem->guest_phys_addr, mem->memory_size);
4549 break;
4550 case KVM_MR_FLAGS_ONLY:
4551 break;
4552 default:
4553 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
4554 }
Carsten Otte598841c2011-07-24 10:48:21 +02004555 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004556 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02004557 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004558}
4559
Alexander Yarygin60a37702016-04-01 15:38:57 +03004560static inline unsigned long nonhyp_mask(int i)
4561{
4562 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4563
4564 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4565}
4566
Christian Borntraeger3491caf2016-05-13 12:16:35 +02004567void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4568{
4569 vcpu->valid_wakeup = false;
4570}
4571
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004572static int __init kvm_s390_init(void)
4573{
Alexander Yarygin60a37702016-04-01 15:38:57 +03004574 int i;
4575
David Hildenbrand07197fd2015-01-30 16:01:38 +01004576 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004577 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01004578 return -ENODEV;
4579 }
4580
Janosch Franka4499382018-07-13 11:28:31 +01004581 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004582 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01004583 return -EINVAL;
4584 }
4585
Alexander Yarygin60a37702016-04-01 15:38:57 +03004586 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00004587 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03004588 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4589
Michael Mueller9d8d5782015-02-02 15:42:51 +01004590 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004591}
4592
4593static void __exit kvm_s390_exit(void)
4594{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004595 kvm_exit();
4596}
4597
4598module_init(kvm_s390_init);
4599module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02004600
4601/*
4602 * Enable autoloading of the kvm module.
4603 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4604 * since x86 takes a different approach.
4605 */
4606#include <linux/miscdevice.h>
4607MODULE_ALIAS_MISCDEV(KVM_MINOR);
4608MODULE_ALIAS("devname:kvm");