blob: 2881151fd773b268a76590dd949ab48c8cae99e2 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Janosch Frank3e6c5562019-10-02 04:46:58 -04005 * Copyright IBM Corp. 2008, 2020
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Janosch Frank29b40f12019-09-30 04:19:18 -040047#include <asm/uv.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010048#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010049#include "gaccess.h"
50
Cornelia Huck5786fff2012-07-23 17:20:29 +020051#define CREATE_TRACE_POINTS
52#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020053#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020054
Thomas Huth41408c282015-02-06 15:01:21 +010055#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010056#define LOCAL_IRQS 32
57#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
58 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010059
Heiko Carstensb0c632d2008-03-25 18:47:20 +010060#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Christian Borntraegerccc40c52018-03-08 12:48:05 +000061#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
Heiko Carstensb0c632d2008-03-25 18:47:20 +010062
63struct kvm_stats_debugfs_item debugfs_entries[] = {
64 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020065 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010066 { "exit_validity", VCPU_STAT(exit_validity) },
67 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
68 { "exit_external_request", VCPU_STAT(exit_external_request) },
Christian Borntraegera5e0acea92018-02-23 07:57:33 +000069 { "exit_io_request", VCPU_STAT(exit_io_request) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010070 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010071 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030072 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010073 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
74 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020075 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010076 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020077 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020078 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
Christian Borntraeger8b905d22019-03-05 05:30:02 -050079 { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020080 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020081 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010082 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010083 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
84 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000085 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
86 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010087 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020088 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010089 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000090 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010091 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
92 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
93 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000094 { "deliver_program", VCPU_STAT(deliver_program) },
95 { "deliver_io", VCPU_STAT(deliver_io) },
QingFeng Hao32de0742018-03-02 11:56:47 +010096 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010097 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000098 { "inject_ckc", VCPU_STAT(inject_ckc) },
99 { "inject_cputm", VCPU_STAT(inject_cputm) },
100 { "inject_external_call", VCPU_STAT(inject_external_call) },
101 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
102 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
103 { "inject_io", VM_STAT(inject_io) },
104 { "inject_mchk", VCPU_STAT(inject_mchk) },
105 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
106 { "inject_program", VCPU_STAT(inject_program) },
107 { "inject_restart", VCPU_STAT(inject_restart) },
108 { "inject_service_signal", VM_STAT(inject_service_signal) },
109 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
110 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
111 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
112 { "inject_virtio", VM_STAT(inject_virtio) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100113 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
114 { "instruction_gs", VCPU_STAT(instruction_gs) },
115 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
116 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
117 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200118 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100119 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100120 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100121 { "instruction_sck", VCPU_STAT(instruction_sck) },
122 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100123 { "instruction_spx", VCPU_STAT(instruction_spx) },
124 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
125 { "instruction_stap", VCPU_STAT(instruction_stap) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100126 { "instruction_iske", VCPU_STAT(instruction_iske) },
127 { "instruction_ri", VCPU_STAT(instruction_ri) },
128 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
129 { "instruction_sske", VCPU_STAT(instruction_sske) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100130 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200131 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100132 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
133 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100134 { "instruction_tb", VCPU_STAT(instruction_tb) },
135 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200136 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100137 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200138 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200139 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100140 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100141 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200142 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100143 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200144 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
145 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100146 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200147 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
148 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500149 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100150 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
151 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
152 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200153 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
154 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
155 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100156 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
157 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
158 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger8474e5c2019-02-15 13:47:20 +0100159 { "diag_9c_ignored", VCPU_STAT(diagnose_9c_ignored) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100160 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
161 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
162 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100163 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100164 { NULL }
165};
166
Collin L. Walling8fa16962016-07-26 15:29:44 -0400167struct kvm_s390_tod_clock_ext {
168 __u8 epoch_idx;
169 __u64 tod;
170 __u8 reserved[7];
171} __packed;
172
David Hildenbranda411edf2016-02-02 15:41:22 +0100173/* allow nested virtualization in KVM (if enabled by user space) */
174static int nested;
175module_param(nested, int, S_IRUGO);
176MODULE_PARM_DESC(nested, "Nested virtualization support");
177
Janosch Franka4499382018-07-13 11:28:31 +0100178/* allow 1m huge page guest backing, if !nested */
179static int hpage;
180module_param(hpage, int, 0444);
181MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100182
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500183/* maximum percentage of steal time for polling. >100 is treated like 100 */
184static u8 halt_poll_max_steal = 10;
185module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000186MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500187
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000188/*
189 * For now we handle at most 16 double words as this is what the s390 base
190 * kernel handles and stores in the prefix page. If we ever need to go beyond
191 * this, this requires changes to code, but the external uapi can stay.
192 */
193#define SIZE_INTERNAL 16
194
195/*
196 * Base feature mask that defines default mask for facilities. Consists of the
197 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
198 */
199static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
200/*
201 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
202 * and defines the facilities that can be enabled via a cpu model.
203 */
204static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
205
206static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200207{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000208 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
209 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
210 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
211 sizeof(S390_lowcore.stfle_fac_list));
212
213 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200214}
215
David Hildenbrand15c97052015-03-19 17:36:43 +0100216/* available cpu features supported by kvm */
217static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200218/* available subfunctions indicated via query / "test bit" */
219static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100220
Michael Mueller9d8d5782015-02-02 15:42:51 +0100221static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200222static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200223debug_info_t *kvm_s390_dbf;
Janosch Frank3e6c5562019-10-02 04:46:58 -0400224debug_info_t *kvm_s390_dbf_uv;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100225
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100226/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200227int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100228{
229 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200230 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100231}
232
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700233int kvm_arch_check_processor_compat(void)
234{
235 return 0;
236}
237
Janosch Frank29b40f12019-09-30 04:19:18 -0400238/* forward declarations */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100239static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
240 unsigned long end);
Janosch Frank29b40f12019-09-30 04:19:18 -0400241static int sca_switch_to_extended(struct kvm *kvm);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200242
David Hildenbrand15757672018-02-07 12:46:45 +0100243static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
244{
245 u8 delta_idx = 0;
246
247 /*
248 * The TOD jumps by delta, we have to compensate this by adding
249 * -delta to the epoch.
250 */
251 delta = -delta;
252
253 /* sign-extension - we're adding to signed values below */
254 if ((s64)delta < 0)
255 delta_idx = -1;
256
257 scb->epoch += delta;
258 if (scb->ecd & ECD_MEF) {
259 scb->epdx += delta_idx;
260 if (scb->epoch < delta)
261 scb->epdx += 1;
262 }
263}
264
Fan Zhangfdf03652015-05-13 10:58:41 +0200265/*
266 * This callback is executed during stop_machine(). All CPUs are therefore
267 * temporarily stopped. In order not to change guest behavior, we have to
268 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
269 * so a CPU won't be stopped while calculating with the epoch.
270 */
271static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
272 void *v)
273{
274 struct kvm *kvm;
275 struct kvm_vcpu *vcpu;
276 int i;
277 unsigned long long *delta = v;
278
279 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200280 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100281 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
282 if (i == 0) {
283 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
284 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
285 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100286 if (vcpu->arch.cputm_enabled)
287 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100288 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100289 kvm_clock_sync_scb(vcpu->arch.vsie_block,
290 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200291 }
292 }
293 return NOTIFY_OK;
294}
295
296static struct notifier_block kvm_clock_notifier = {
297 .notifier_call = kvm_clock_sync,
298};
299
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100300int kvm_arch_hardware_setup(void)
301{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200302 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100303 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200304 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
305 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200306 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
307 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100308 return 0;
309}
310
311void kvm_arch_hardware_unsetup(void)
312{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100313 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200314 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200315 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
316 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100317}
318
David Hildenbrand22be5a132016-01-21 13:22:54 +0100319static void allow_cpu_feat(unsigned long nr)
320{
321 set_bit_inv(nr, kvm_s390_available_cpu_feat);
322}
323
David Hildenbrand0a763c72016-05-18 16:03:47 +0200324static inline int plo_test_bit(unsigned char nr)
325{
326 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100327 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200328
329 asm volatile(
330 /* Parameter registers are ignored for "test bit" */
331 " plo 0,0,0,0(0)\n"
332 " ipm %0\n"
333 " srl %0,28\n"
334 : "=d" (cc)
335 : "d" (r0)
336 : "cc");
337 return cc == 0;
338}
339
Heiko Carstensd0dea732019-10-02 14:34:37 +0200340static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
Christian Borntraegerd6681392019-02-20 03:04:07 -0500341{
342 register unsigned long r0 asm("0") = 0; /* query function */
343 register unsigned long r1 asm("1") = (unsigned long) query;
344
345 asm volatile(
346 /* Parameter regs are ignored */
347 " .insn rrf,%[opc] << 16,2,4,6,0\n"
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200348 :
Christian Borntraegerd6681392019-02-20 03:04:07 -0500349 : "d" (r0), "a" (r1), [opc] "i" (opcode)
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200350 : "cc", "memory");
Christian Borntraegerd6681392019-02-20 03:04:07 -0500351}
352
Christian Borntraeger173aec22018-12-28 10:59:06 +0100353#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100354#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100355
David Hildenbrand22be5a132016-01-21 13:22:54 +0100356static void kvm_s390_cpu_feat_init(void)
357{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200358 int i;
359
360 for (i = 0; i < 256; ++i) {
361 if (plo_test_bit(i))
362 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
363 }
364
365 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400366 ptff(kvm_s390_available_subfunc.ptff,
367 sizeof(kvm_s390_available_subfunc.ptff),
368 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200369
370 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200371 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
372 kvm_s390_available_subfunc.kmac);
373 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
374 kvm_s390_available_subfunc.kmc);
375 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
376 kvm_s390_available_subfunc.km);
377 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
378 kvm_s390_available_subfunc.kimd);
379 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
380 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200381 }
382 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200383 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
384 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200385 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200386 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
387 kvm_s390_available_subfunc.kmctr);
388 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
389 kvm_s390_available_subfunc.kmf);
390 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
391 kvm_s390_available_subfunc.kmo);
392 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
393 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200394 }
395 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100396 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200397 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200398
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400399 if (test_facility(146)) /* MSA8 */
400 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
401 kvm_s390_available_subfunc.kma);
402
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100403 if (test_facility(155)) /* MSA9 */
404 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
405 kvm_s390_available_subfunc.kdsa);
406
Christian Borntraeger173aec22018-12-28 10:59:06 +0100407 if (test_facility(150)) /* SORTL */
408 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
409
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100410 if (test_facility(151)) /* DFLTCC */
411 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
412
David Hildenbrand22be5a132016-01-21 13:22:54 +0100413 if (MACHINE_HAS_ESOP)
414 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200415 /*
416 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
417 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
418 */
419 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100420 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200421 return;
422 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100423 if (sclp.has_64bscao)
424 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100425 if (sclp.has_siif)
426 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100427 if (sclp.has_gpere)
428 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100429 if (sclp.has_gsls)
430 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100431 if (sclp.has_ib)
432 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100433 if (sclp.has_cei)
434 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100435 if (sclp.has_ibs)
436 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500437 if (sclp.has_kss)
438 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200439 /*
440 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
441 * all skey handling functions read/set the skey from the PGSTE
442 * instead of the real storage key.
443 *
444 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
445 * pages being detected as preserved although they are resident.
446 *
447 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
448 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
449 *
450 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
451 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
452 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
453 *
454 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
455 * cannot easily shadow the SCA because of the ipte lock.
456 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100457}
458
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100459int kvm_arch_init(void *opaque)
460{
Janosch Frankf76f6372019-10-02 03:56:27 -0400461 int rc = -ENOMEM;
Michael Mueller308c3e62018-11-30 15:32:06 +0100462
Christian Borntraeger78f26132015-07-22 15:50:58 +0200463 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
464 if (!kvm_s390_dbf)
465 return -ENOMEM;
466
Janosch Frank3e6c5562019-10-02 04:46:58 -0400467 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
468 if (!kvm_s390_dbf_uv)
469 goto out;
470
471 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
472 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
Janosch Frankf76f6372019-10-02 03:56:27 -0400473 goto out;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200474
David Hildenbrand22be5a132016-01-21 13:22:54 +0100475 kvm_s390_cpu_feat_init();
476
Cornelia Huck84877d92014-09-02 10:27:35 +0100477 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100478 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
479 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100480 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Janosch Frankf76f6372019-10-02 03:56:27 -0400481 goto out;
Michael Mueller308c3e62018-11-30 15:32:06 +0100482 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100483
484 rc = kvm_s390_gib_init(GAL_ISC);
485 if (rc)
Janosch Frankf76f6372019-10-02 03:56:27 -0400486 goto out;
Michael Muellerb1d1e762019-01-31 09:52:45 +0100487
Michael Mueller308c3e62018-11-30 15:32:06 +0100488 return 0;
489
Janosch Frankf76f6372019-10-02 03:56:27 -0400490out:
491 kvm_arch_exit();
Michael Mueller308c3e62018-11-30 15:32:06 +0100492 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100493}
494
Christian Borntraeger78f26132015-07-22 15:50:58 +0200495void kvm_arch_exit(void)
496{
Michael Mueller1282c212019-01-31 09:52:40 +0100497 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200498 debug_unregister(kvm_s390_dbf);
Janosch Frank3e6c5562019-10-02 04:46:58 -0400499 debug_unregister(kvm_s390_dbf_uv);
Christian Borntraeger78f26132015-07-22 15:50:58 +0200500}
501
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100502/* Section: device related */
503long kvm_arch_dev_ioctl(struct file *filp,
504 unsigned int ioctl, unsigned long arg)
505{
506 if (ioctl == KVM_S390_ENABLE_SIE)
507 return s390_enable_sie();
508 return -EINVAL;
509}
510
Alexander Graf784aa3d2014-07-14 18:27:35 +0200511int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100512{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100513 int r;
514
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200515 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100516 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200517 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100518 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100519#ifdef CONFIG_KVM_S390_UCONTROL
520 case KVM_CAP_S390_UCONTROL:
521#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200522 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100523 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200524 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100525 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100526 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100527 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200528 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200529 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200530 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200531 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100532 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100533 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200534 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100535 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400536 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100537 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200538 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200539 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100540 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100541 case KVM_CAP_S390_AIS_MIGRATION:
Janosch Frank7de3f142020-01-31 05:02:02 -0500542 case KVM_CAP_S390_VCPU_RESETS:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100543 r = 1;
544 break;
Janosch Franka4499382018-07-13 11:28:31 +0100545 case KVM_CAP_S390_HPAGE_1M:
546 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100547 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100548 r = 1;
549 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100550 case KVM_CAP_S390_MEM_OP:
551 r = MEM_OP_MAX_SIZE;
552 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200553 case KVM_CAP_NR_VCPUS:
554 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200555 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100556 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200557 if (!kvm_s390_use_sca_entries())
558 r = KVM_MAX_VCPUS;
559 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100560 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200561 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200562 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100563 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200564 break;
Eric Farman68c55752014-06-09 10:57:26 -0400565 case KVM_CAP_S390_VECTOR_REGISTERS:
566 r = MACHINE_HAS_VX;
567 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800568 case KVM_CAP_S390_RI:
569 r = test_facility(64);
570 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100571 case KVM_CAP_S390_GS:
572 r = test_facility(133);
573 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100574 case KVM_CAP_S390_BPB:
575 r = test_facility(82);
576 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200577 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100578 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200579 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100580 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100581}
582
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400583static void kvm_s390_sync_dirty_log(struct kvm *kvm,
Janosch Frank0959e162018-07-17 13:21:22 +0100584 struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400585{
Janosch Frank0959e162018-07-17 13:21:22 +0100586 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400587 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100588 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400589 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100590 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400591
Janosch Frank0959e162018-07-17 13:21:22 +0100592 /* Loop over all guest segments */
593 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400594 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100595 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
596 gaddr = gfn_to_gpa(cur_gfn);
597 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
598 if (kvm_is_error_hva(vmaddr))
599 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400600
Janosch Frank0959e162018-07-17 13:21:22 +0100601 bitmap_zero(bitmap, _PAGE_ENTRIES);
602 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
603 for (i = 0; i < _PAGE_ENTRIES; i++) {
604 if (test_bit(i, bitmap))
605 mark_page_dirty(kvm, cur_gfn + i);
606 }
607
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100608 if (fatal_signal_pending(current))
609 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100610 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400611 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400612}
613
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100614/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200615static void sca_del_vcpu(struct kvm_vcpu *vcpu);
616
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100617/*
618 * Get (and clear) the dirty memory log for a memory slot.
619 */
620int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
621 struct kvm_dirty_log *log)
622{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400623 int r;
624 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200625 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400626 struct kvm_memory_slot *memslot;
627 int is_dirty = 0;
628
Janosch Franke1e8a962017-02-02 16:39:31 +0100629 if (kvm_is_ucontrol(kvm))
630 return -EINVAL;
631
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400632 mutex_lock(&kvm->slots_lock);
633
634 r = -EINVAL;
635 if (log->slot >= KVM_USER_MEM_SLOTS)
636 goto out;
637
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200638 slots = kvm_memslots(kvm);
639 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400640 r = -ENOENT;
641 if (!memslot->dirty_bitmap)
642 goto out;
643
644 kvm_s390_sync_dirty_log(kvm, memslot);
645 r = kvm_get_dirty_log(kvm, log, &is_dirty);
646 if (r)
647 goto out;
648
649 /* Clear the dirty log */
650 if (is_dirty) {
651 n = kvm_dirty_bitmap_bytes(memslot);
652 memset(memslot->dirty_bitmap, 0, n);
653 }
654 r = 0;
655out:
656 mutex_unlock(&kvm->slots_lock);
657 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100658}
659
David Hildenbrand6502a342016-06-21 14:19:51 +0200660static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
661{
662 unsigned int i;
663 struct kvm_vcpu *vcpu;
664
665 kvm_for_each_vcpu(i, vcpu, kvm) {
666 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
667 }
668}
669
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100670int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200671{
672 int r;
673
674 if (cap->flags)
675 return -EINVAL;
676
677 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200678 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200679 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200680 kvm->arch.use_irqchip = 1;
681 r = 0;
682 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200683 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200684 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200685 kvm->arch.user_sigp = 1;
686 r = 0;
687 break;
Eric Farman68c55752014-06-09 10:57:26 -0400688 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100689 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200690 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100691 r = -EBUSY;
692 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100693 set_kvm_facility(kvm->arch.model.fac_mask, 129);
694 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200695 if (test_facility(134)) {
696 set_kvm_facility(kvm->arch.model.fac_mask, 134);
697 set_kvm_facility(kvm->arch.model.fac_list, 134);
698 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100699 if (test_facility(135)) {
700 set_kvm_facility(kvm->arch.model.fac_mask, 135);
701 set_kvm_facility(kvm->arch.model.fac_list, 135);
702 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100703 if (test_facility(148)) {
704 set_kvm_facility(kvm->arch.model.fac_mask, 148);
705 set_kvm_facility(kvm->arch.model.fac_list, 148);
706 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100707 if (test_facility(152)) {
708 set_kvm_facility(kvm->arch.model.fac_mask, 152);
709 set_kvm_facility(kvm->arch.model.fac_list, 152);
710 }
Michael Mueller18280d82015-03-16 16:05:41 +0100711 r = 0;
712 } else
713 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100714 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200715 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
716 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400717 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800718 case KVM_CAP_S390_RI:
719 r = -EINVAL;
720 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200721 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800722 r = -EBUSY;
723 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100724 set_kvm_facility(kvm->arch.model.fac_mask, 64);
725 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800726 r = 0;
727 }
728 mutex_unlock(&kvm->lock);
729 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
730 r ? "(not available)" : "(success)");
731 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100732 case KVM_CAP_S390_AIS:
733 mutex_lock(&kvm->lock);
734 if (kvm->created_vcpus) {
735 r = -EBUSY;
736 } else {
737 set_kvm_facility(kvm->arch.model.fac_mask, 72);
738 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100739 r = 0;
740 }
741 mutex_unlock(&kvm->lock);
742 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
743 r ? "(not available)" : "(success)");
744 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100745 case KVM_CAP_S390_GS:
746 r = -EINVAL;
747 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100748 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100749 r = -EBUSY;
750 } else if (test_facility(133)) {
751 set_kvm_facility(kvm->arch.model.fac_mask, 133);
752 set_kvm_facility(kvm->arch.model.fac_list, 133);
753 r = 0;
754 }
755 mutex_unlock(&kvm->lock);
756 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
757 r ? "(not available)" : "(success)");
758 break;
Janosch Franka4499382018-07-13 11:28:31 +0100759 case KVM_CAP_S390_HPAGE_1M:
760 mutex_lock(&kvm->lock);
761 if (kvm->created_vcpus)
762 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100763 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100764 r = -EINVAL;
765 else {
766 r = 0;
Janosch Frankdf88f312018-08-30 16:14:18 +0200767 down_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100768 kvm->mm->context.allow_gmap_hpage_1m = 1;
Janosch Frankdf88f312018-08-30 16:14:18 +0200769 up_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100770 /*
771 * We might have to create fake 4k page
772 * tables. To avoid that the hardware works on
773 * stale PGSTEs, we emulate these instructions.
774 */
775 kvm->arch.use_skf = 0;
776 kvm->arch.use_pfmfi = 0;
777 }
778 mutex_unlock(&kvm->lock);
779 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
780 r ? "(not available)" : "(success)");
781 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100782 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200783 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100784 kvm->arch.user_stsi = 1;
785 r = 0;
786 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200787 case KVM_CAP_S390_USER_INSTR0:
788 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
789 kvm->arch.user_instr0 = 1;
790 icpt_operexc_on_all_vcpus(kvm);
791 r = 0;
792 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200793 default:
794 r = -EINVAL;
795 break;
796 }
797 return r;
798}
799
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100800static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
801{
802 int ret;
803
804 switch (attr->attr) {
805 case KVM_S390_VM_MEM_LIMIT_SIZE:
806 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200807 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100808 kvm->arch.mem_limit);
809 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100810 ret = -EFAULT;
811 break;
812 default:
813 ret = -ENXIO;
814 break;
815 }
816 return ret;
817}
818
819static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200820{
821 int ret;
822 unsigned int idx;
823 switch (attr->attr) {
824 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100825 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100826 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200827 break;
828
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200829 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200830 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100831 if (kvm->created_vcpus)
832 ret = -EBUSY;
833 else if (kvm->mm->context.allow_gmap_hpage_1m)
834 ret = -EINVAL;
835 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200836 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100837 /* Not compatible with cmma. */
838 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200839 ret = 0;
840 }
841 mutex_unlock(&kvm->lock);
842 break;
843 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100844 ret = -ENXIO;
845 if (!sclp.has_cmma)
846 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200847 ret = -EINVAL;
848 if (!kvm->arch.use_cmma)
849 break;
850
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200851 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200852 mutex_lock(&kvm->lock);
853 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200854 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200855 srcu_read_unlock(&kvm->srcu, idx);
856 mutex_unlock(&kvm->lock);
857 ret = 0;
858 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100859 case KVM_S390_VM_MEM_LIMIT_SIZE: {
860 unsigned long new_limit;
861
862 if (kvm_is_ucontrol(kvm))
863 return -EINVAL;
864
865 if (get_user(new_limit, (u64 __user *)attr->addr))
866 return -EFAULT;
867
Dominik Dingela3a92c32014-12-01 17:24:42 +0100868 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
869 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100870 return -E2BIG;
871
Dominik Dingela3a92c32014-12-01 17:24:42 +0100872 if (!new_limit)
873 return -EINVAL;
874
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100875 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100876 if (new_limit != KVM_S390_NO_MEM_LIMIT)
877 new_limit -= 1;
878
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100879 ret = -EBUSY;
880 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200881 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100882 /* gmap_create will round the limit up */
883 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100884
885 if (!new) {
886 ret = -ENOMEM;
887 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100888 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100889 new->private = kvm;
890 kvm->arch.gmap = new;
891 ret = 0;
892 }
893 }
894 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100895 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
896 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
897 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100898 break;
899 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200900 default:
901 ret = -ENXIO;
902 break;
903 }
904 return ret;
905}
906
Tony Krowiaka374e892014-09-03 10:13:53 +0200907static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
908
Tony Krowiak20c922f2018-04-22 11:37:03 -0400909void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200910{
911 struct kvm_vcpu *vcpu;
912 int i;
913
Tony Krowiak20c922f2018-04-22 11:37:03 -0400914 kvm_s390_vcpu_block_all(kvm);
915
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400916 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400917 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400918 /* recreate the shadow crycb by leaving the VSIE handler */
919 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
920 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400921
922 kvm_s390_vcpu_unblock_all(kvm);
923}
924
925static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
926{
Tony Krowiaka374e892014-09-03 10:13:53 +0200927 mutex_lock(&kvm->lock);
928 switch (attr->attr) {
929 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200930 if (!test_kvm_facility(kvm, 76)) {
931 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400932 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200933 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200934 get_random_bytes(
935 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
936 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
937 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200938 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200939 break;
940 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200941 if (!test_kvm_facility(kvm, 76)) {
942 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400943 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200944 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200945 get_random_bytes(
946 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
947 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
948 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200949 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200950 break;
951 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200952 if (!test_kvm_facility(kvm, 76)) {
953 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400954 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200955 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200956 kvm->arch.crypto.aes_kw = 0;
957 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
958 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200959 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200960 break;
961 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200962 if (!test_kvm_facility(kvm, 76)) {
963 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400964 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200965 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200966 kvm->arch.crypto.dea_kw = 0;
967 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
968 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200969 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200970 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400971 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
972 if (!ap_instructions_available()) {
973 mutex_unlock(&kvm->lock);
974 return -EOPNOTSUPP;
975 }
976 kvm->arch.crypto.apie = 1;
977 break;
978 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
979 if (!ap_instructions_available()) {
980 mutex_unlock(&kvm->lock);
981 return -EOPNOTSUPP;
982 }
983 kvm->arch.crypto.apie = 0;
984 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200985 default:
986 mutex_unlock(&kvm->lock);
987 return -ENXIO;
988 }
989
Tony Krowiak20c922f2018-04-22 11:37:03 -0400990 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200991 mutex_unlock(&kvm->lock);
992 return 0;
993}
994
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200995static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
996{
997 int cx;
998 struct kvm_vcpu *vcpu;
999
1000 kvm_for_each_vcpu(cx, vcpu, kvm)
1001 kvm_s390_sync_request(req, vcpu);
1002}
1003
1004/*
1005 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001006 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001007 */
1008static int kvm_s390_vm_start_migration(struct kvm *kvm)
1009{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001010 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001011 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001012 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001013 int slotnr;
1014
1015 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001016 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001017 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001018 slots = kvm_memslots(kvm);
1019 if (!slots || !slots->used_slots)
1020 return -EINVAL;
1021
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001022 if (!kvm->arch.use_cmma) {
1023 kvm->arch.migration_mode = 1;
1024 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001025 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001026 /* mark all the pages in active slots as dirty */
1027 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1028 ms = slots->memslots + slotnr;
Igor Mammedov13a17cc2019-09-11 03:52:18 -04001029 if (!ms->dirty_bitmap)
1030 return -EINVAL;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001031 /*
1032 * The second half of the bitmap is only used on x86,
1033 * and would be wasted otherwise, so we put it to good
1034 * use here to keep track of the state of the storage
1035 * attributes.
1036 */
1037 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1038 ram_pages += ms->npages;
1039 }
1040 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1041 kvm->arch.migration_mode = 1;
1042 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001043 return 0;
1044}
1045
1046/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001047 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001048 * kvm_s390_vm_start_migration.
1049 */
1050static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1051{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001052 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001053 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001054 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001055 kvm->arch.migration_mode = 0;
1056 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001057 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001058 return 0;
1059}
1060
1061static int kvm_s390_vm_set_migration(struct kvm *kvm,
1062 struct kvm_device_attr *attr)
1063{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001064 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001065
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001066 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001067 switch (attr->attr) {
1068 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001069 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001070 break;
1071 case KVM_S390_VM_MIGRATION_STOP:
1072 res = kvm_s390_vm_stop_migration(kvm);
1073 break;
1074 default:
1075 break;
1076 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001077 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001078
1079 return res;
1080}
1081
1082static int kvm_s390_vm_get_migration(struct kvm *kvm,
1083 struct kvm_device_attr *attr)
1084{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001085 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001086
1087 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1088 return -ENXIO;
1089
1090 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1091 return -EFAULT;
1092 return 0;
1093}
1094
Collin L. Walling8fa16962016-07-26 15:29:44 -04001095static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1096{
1097 struct kvm_s390_vm_tod_clock gtod;
1098
1099 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1100 return -EFAULT;
1101
David Hildenbrand0e7def52018-02-07 12:46:43 +01001102 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001103 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001104 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001105
1106 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1107 gtod.epoch_idx, gtod.tod);
1108
1109 return 0;
1110}
1111
Jason J. Herne72f25022014-11-25 09:46:02 -05001112static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1113{
1114 u8 gtod_high;
1115
1116 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1117 sizeof(gtod_high)))
1118 return -EFAULT;
1119
1120 if (gtod_high != 0)
1121 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001122 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001123
1124 return 0;
1125}
1126
1127static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1128{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001129 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001130
David Hildenbrand0e7def52018-02-07 12:46:43 +01001131 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1132 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001133 return -EFAULT;
1134
David Hildenbrand0e7def52018-02-07 12:46:43 +01001135 kvm_s390_set_tod_clock(kvm, &gtod);
1136 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001137 return 0;
1138}
1139
1140static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1141{
1142 int ret;
1143
1144 if (attr->flags)
1145 return -EINVAL;
1146
1147 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001148 case KVM_S390_VM_TOD_EXT:
1149 ret = kvm_s390_set_tod_ext(kvm, attr);
1150 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001151 case KVM_S390_VM_TOD_HIGH:
1152 ret = kvm_s390_set_tod_high(kvm, attr);
1153 break;
1154 case KVM_S390_VM_TOD_LOW:
1155 ret = kvm_s390_set_tod_low(kvm, attr);
1156 break;
1157 default:
1158 ret = -ENXIO;
1159 break;
1160 }
1161 return ret;
1162}
1163
David Hildenbrand33d1b272018-04-27 14:36:13 +02001164static void kvm_s390_get_tod_clock(struct kvm *kvm,
1165 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001166{
1167 struct kvm_s390_tod_clock_ext htod;
1168
1169 preempt_disable();
1170
1171 get_tod_clock_ext((char *)&htod);
1172
1173 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001174 gtod->epoch_idx = 0;
1175 if (test_kvm_facility(kvm, 139)) {
1176 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1177 if (gtod->tod < htod.tod)
1178 gtod->epoch_idx += 1;
1179 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001180
1181 preempt_enable();
1182}
1183
1184static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1185{
1186 struct kvm_s390_vm_tod_clock gtod;
1187
1188 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001189 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001190 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1191 return -EFAULT;
1192
1193 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1194 gtod.epoch_idx, gtod.tod);
1195 return 0;
1196}
1197
Jason J. Herne72f25022014-11-25 09:46:02 -05001198static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1199{
1200 u8 gtod_high = 0;
1201
1202 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1203 sizeof(gtod_high)))
1204 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001205 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001206
1207 return 0;
1208}
1209
1210static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1211{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001212 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001213
David Hildenbrand60417fc2015-09-29 16:20:36 +02001214 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001215 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1216 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001217 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001218
1219 return 0;
1220}
1221
1222static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1223{
1224 int ret;
1225
1226 if (attr->flags)
1227 return -EINVAL;
1228
1229 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001230 case KVM_S390_VM_TOD_EXT:
1231 ret = kvm_s390_get_tod_ext(kvm, attr);
1232 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001233 case KVM_S390_VM_TOD_HIGH:
1234 ret = kvm_s390_get_tod_high(kvm, attr);
1235 break;
1236 case KVM_S390_VM_TOD_LOW:
1237 ret = kvm_s390_get_tod_low(kvm, attr);
1238 break;
1239 default:
1240 ret = -ENXIO;
1241 break;
1242 }
1243 return ret;
1244}
1245
Michael Mueller658b6ed2015-02-02 15:49:35 +01001246static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1247{
1248 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001249 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001250 int ret = 0;
1251
1252 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001253 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001254 ret = -EBUSY;
1255 goto out;
1256 }
1257 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1258 if (!proc) {
1259 ret = -ENOMEM;
1260 goto out;
1261 }
1262 if (!copy_from_user(proc, (void __user *)attr->addr,
1263 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001264 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001265 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1266 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001267 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001268 if (proc->ibc > unblocked_ibc)
1269 kvm->arch.model.ibc = unblocked_ibc;
1270 else if (proc->ibc < lowest_ibc)
1271 kvm->arch.model.ibc = lowest_ibc;
1272 else
1273 kvm->arch.model.ibc = proc->ibc;
1274 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001275 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001276 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001277 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1278 kvm->arch.model.ibc,
1279 kvm->arch.model.cpuid);
1280 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1281 kvm->arch.model.fac_list[0],
1282 kvm->arch.model.fac_list[1],
1283 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001284 } else
1285 ret = -EFAULT;
1286 kfree(proc);
1287out:
1288 mutex_unlock(&kvm->lock);
1289 return ret;
1290}
1291
David Hildenbrand15c97052015-03-19 17:36:43 +01001292static int kvm_s390_set_processor_feat(struct kvm *kvm,
1293 struct kvm_device_attr *attr)
1294{
1295 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001296
1297 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1298 return -EFAULT;
1299 if (!bitmap_subset((unsigned long *) data.feat,
1300 kvm_s390_available_cpu_feat,
1301 KVM_S390_VM_CPU_FEAT_NR_BITS))
1302 return -EINVAL;
1303
1304 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001305 if (kvm->created_vcpus) {
1306 mutex_unlock(&kvm->lock);
1307 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001308 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001309 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1310 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001311 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001312 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1313 data.feat[0],
1314 data.feat[1],
1315 data.feat[2]);
1316 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001317}
1318
David Hildenbrand0a763c72016-05-18 16:03:47 +02001319static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1320 struct kvm_device_attr *attr)
1321{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001322 mutex_lock(&kvm->lock);
1323 if (kvm->created_vcpus) {
1324 mutex_unlock(&kvm->lock);
1325 return -EBUSY;
1326 }
1327
1328 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1329 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1330 mutex_unlock(&kvm->lock);
1331 return -EFAULT;
1332 }
1333 mutex_unlock(&kvm->lock);
1334
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001335 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1336 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1337 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1338 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1339 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1340 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1341 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1342 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1343 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1344 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1345 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1346 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1347 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1348 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1349 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1350 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1351 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1352 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1353 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1354 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1355 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1356 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1357 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1358 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1359 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1360 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1361 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1362 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1363 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1364 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1365 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1366 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1367 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1368 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1369 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1370 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1371 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1372 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1373 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1374 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1375 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1376 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1377 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1378 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001379 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1380 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1381 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001382 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1383 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1384 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1385 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1386 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001387 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1388 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1389 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1390 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1391 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001392
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001393 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001394}
1395
Michael Mueller658b6ed2015-02-02 15:49:35 +01001396static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1397{
1398 int ret = -ENXIO;
1399
1400 switch (attr->attr) {
1401 case KVM_S390_VM_CPU_PROCESSOR:
1402 ret = kvm_s390_set_processor(kvm, attr);
1403 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001404 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1405 ret = kvm_s390_set_processor_feat(kvm, attr);
1406 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001407 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1408 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1409 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001410 }
1411 return ret;
1412}
1413
1414static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1415{
1416 struct kvm_s390_vm_cpu_processor *proc;
1417 int ret = 0;
1418
1419 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1420 if (!proc) {
1421 ret = -ENOMEM;
1422 goto out;
1423 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001424 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001425 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001426 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1427 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001428 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1429 kvm->arch.model.ibc,
1430 kvm->arch.model.cpuid);
1431 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1432 kvm->arch.model.fac_list[0],
1433 kvm->arch.model.fac_list[1],
1434 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001435 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1436 ret = -EFAULT;
1437 kfree(proc);
1438out:
1439 return ret;
1440}
1441
1442static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1443{
1444 struct kvm_s390_vm_cpu_machine *mach;
1445 int ret = 0;
1446
1447 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1448 if (!mach) {
1449 ret = -ENOMEM;
1450 goto out;
1451 }
1452 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001453 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001454 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001455 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001456 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001457 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001458 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1459 kvm->arch.model.ibc,
1460 kvm->arch.model.cpuid);
1461 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1462 mach->fac_mask[0],
1463 mach->fac_mask[1],
1464 mach->fac_mask[2]);
1465 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1466 mach->fac_list[0],
1467 mach->fac_list[1],
1468 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001469 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1470 ret = -EFAULT;
1471 kfree(mach);
1472out:
1473 return ret;
1474}
1475
David Hildenbrand15c97052015-03-19 17:36:43 +01001476static int kvm_s390_get_processor_feat(struct kvm *kvm,
1477 struct kvm_device_attr *attr)
1478{
1479 struct kvm_s390_vm_cpu_feat data;
1480
1481 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1482 KVM_S390_VM_CPU_FEAT_NR_BITS);
1483 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1484 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001485 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1486 data.feat[0],
1487 data.feat[1],
1488 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001489 return 0;
1490}
1491
1492static int kvm_s390_get_machine_feat(struct kvm *kvm,
1493 struct kvm_device_attr *attr)
1494{
1495 struct kvm_s390_vm_cpu_feat data;
1496
1497 bitmap_copy((unsigned long *) data.feat,
1498 kvm_s390_available_cpu_feat,
1499 KVM_S390_VM_CPU_FEAT_NR_BITS);
1500 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1501 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001502 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1503 data.feat[0],
1504 data.feat[1],
1505 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001506 return 0;
1507}
1508
David Hildenbrand0a763c72016-05-18 16:03:47 +02001509static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1510 struct kvm_device_attr *attr)
1511{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001512 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1513 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1514 return -EFAULT;
1515
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001516 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1517 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1518 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1519 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1520 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1521 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1522 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1523 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1524 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1525 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1526 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1527 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1528 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1529 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1530 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1531 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1532 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1533 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1534 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1535 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1536 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1537 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1538 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1539 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1540 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1541 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1542 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1544 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1545 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1546 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1547 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1548 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1549 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1550 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1551 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1552 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1553 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1554 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1555 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1556 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1557 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1558 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1559 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001560 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1561 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1562 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001563 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1564 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1565 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1566 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1567 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001568 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1569 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1570 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1571 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1572 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001573
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001574 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001575}
1576
1577static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1578 struct kvm_device_attr *attr)
1579{
1580 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1581 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1582 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001583
1584 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1585 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1586 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1587 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1588 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1589 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1590 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1591 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1592 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1593 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1594 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1595 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1596 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1597 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1598 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1599 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1600 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1601 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1602 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1603 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1604 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1605 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1606 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1607 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1608 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1609 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1610 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1611 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1612 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1613 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1614 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1615 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1616 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1617 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1618 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1619 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1620 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1621 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1622 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1623 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1624 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1625 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1626 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1627 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001628 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1629 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1630 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001631 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1632 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1633 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1634 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1635 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001636 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1637 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1638 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1639 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1640 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001641
David Hildenbrand0a763c72016-05-18 16:03:47 +02001642 return 0;
1643}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001644
Michael Mueller658b6ed2015-02-02 15:49:35 +01001645static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1646{
1647 int ret = -ENXIO;
1648
1649 switch (attr->attr) {
1650 case KVM_S390_VM_CPU_PROCESSOR:
1651 ret = kvm_s390_get_processor(kvm, attr);
1652 break;
1653 case KVM_S390_VM_CPU_MACHINE:
1654 ret = kvm_s390_get_machine(kvm, attr);
1655 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001656 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1657 ret = kvm_s390_get_processor_feat(kvm, attr);
1658 break;
1659 case KVM_S390_VM_CPU_MACHINE_FEAT:
1660 ret = kvm_s390_get_machine_feat(kvm, attr);
1661 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001662 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1663 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1664 break;
1665 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1666 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1667 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001668 }
1669 return ret;
1670}
1671
Dominik Dingelf2061652014-04-09 13:13:00 +02001672static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1673{
1674 int ret;
1675
1676 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001677 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001678 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001679 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001680 case KVM_S390_VM_TOD:
1681 ret = kvm_s390_set_tod(kvm, attr);
1682 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001683 case KVM_S390_VM_CPU_MODEL:
1684 ret = kvm_s390_set_cpu_model(kvm, attr);
1685 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001686 case KVM_S390_VM_CRYPTO:
1687 ret = kvm_s390_vm_set_crypto(kvm, attr);
1688 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001689 case KVM_S390_VM_MIGRATION:
1690 ret = kvm_s390_vm_set_migration(kvm, attr);
1691 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001692 default:
1693 ret = -ENXIO;
1694 break;
1695 }
1696
1697 return ret;
1698}
1699
1700static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1701{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001702 int ret;
1703
1704 switch (attr->group) {
1705 case KVM_S390_VM_MEM_CTRL:
1706 ret = kvm_s390_get_mem_control(kvm, attr);
1707 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001708 case KVM_S390_VM_TOD:
1709 ret = kvm_s390_get_tod(kvm, attr);
1710 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001711 case KVM_S390_VM_CPU_MODEL:
1712 ret = kvm_s390_get_cpu_model(kvm, attr);
1713 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001714 case KVM_S390_VM_MIGRATION:
1715 ret = kvm_s390_vm_get_migration(kvm, attr);
1716 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001717 default:
1718 ret = -ENXIO;
1719 break;
1720 }
1721
1722 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001723}
1724
1725static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1726{
1727 int ret;
1728
1729 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001730 case KVM_S390_VM_MEM_CTRL:
1731 switch (attr->attr) {
1732 case KVM_S390_VM_MEM_ENABLE_CMMA:
1733 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001734 ret = sclp.has_cmma ? 0 : -ENXIO;
1735 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001736 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001737 ret = 0;
1738 break;
1739 default:
1740 ret = -ENXIO;
1741 break;
1742 }
1743 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001744 case KVM_S390_VM_TOD:
1745 switch (attr->attr) {
1746 case KVM_S390_VM_TOD_LOW:
1747 case KVM_S390_VM_TOD_HIGH:
1748 ret = 0;
1749 break;
1750 default:
1751 ret = -ENXIO;
1752 break;
1753 }
1754 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001755 case KVM_S390_VM_CPU_MODEL:
1756 switch (attr->attr) {
1757 case KVM_S390_VM_CPU_PROCESSOR:
1758 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001759 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1760 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001761 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001762 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001763 ret = 0;
1764 break;
1765 default:
1766 ret = -ENXIO;
1767 break;
1768 }
1769 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001770 case KVM_S390_VM_CRYPTO:
1771 switch (attr->attr) {
1772 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1773 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1774 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1775 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1776 ret = 0;
1777 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001778 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1779 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1780 ret = ap_instructions_available() ? 0 : -ENXIO;
1781 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001782 default:
1783 ret = -ENXIO;
1784 break;
1785 }
1786 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001787 case KVM_S390_VM_MIGRATION:
1788 ret = 0;
1789 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001790 default:
1791 ret = -ENXIO;
1792 break;
1793 }
1794
1795 return ret;
1796}
1797
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001798static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1799{
1800 uint8_t *keys;
1801 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001802 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001803
1804 if (args->flags != 0)
1805 return -EINVAL;
1806
1807 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001808 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001809 return KVM_S390_GET_SKEYS_NONE;
1810
1811 /* Enforce sane limit on memory allocation */
1812 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1813 return -EINVAL;
1814
Michal Hocko752ade62017-05-08 15:57:27 -07001815 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001816 if (!keys)
1817 return -ENOMEM;
1818
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001819 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001820 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001821 for (i = 0; i < args->count; i++) {
1822 hva = gfn_to_hva(kvm, args->start_gfn + i);
1823 if (kvm_is_error_hva(hva)) {
1824 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001825 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001826 }
1827
David Hildenbrand154c8c12016-05-09 11:22:34 +02001828 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1829 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001830 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001831 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001832 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001833 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001834
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001835 if (!r) {
1836 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1837 sizeof(uint8_t) * args->count);
1838 if (r)
1839 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001840 }
1841
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001842 kvfree(keys);
1843 return r;
1844}
1845
1846static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1847{
1848 uint8_t *keys;
1849 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001850 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001851 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001852
1853 if (args->flags != 0)
1854 return -EINVAL;
1855
1856 /* Enforce sane limit on memory allocation */
1857 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1858 return -EINVAL;
1859
Michal Hocko752ade62017-05-08 15:57:27 -07001860 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001861 if (!keys)
1862 return -ENOMEM;
1863
1864 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1865 sizeof(uint8_t) * args->count);
1866 if (r) {
1867 r = -EFAULT;
1868 goto out;
1869 }
1870
1871 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001872 r = s390_enable_skey();
1873 if (r)
1874 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001875
Janosch Frankbd096f62018-07-18 13:40:22 +01001876 i = 0;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001877 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001878 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001879 while (i < args->count) {
1880 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001881 hva = gfn_to_hva(kvm, args->start_gfn + i);
1882 if (kvm_is_error_hva(hva)) {
1883 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001884 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001885 }
1886
1887 /* Lowest order bit is reserved */
1888 if (keys[i] & 0x01) {
1889 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001890 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001891 }
1892
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001893 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001894 if (r) {
1895 r = fixup_user_fault(current, current->mm, hva,
1896 FAULT_FLAG_WRITE, &unlocked);
1897 if (r)
1898 break;
1899 }
1900 if (!r)
1901 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001902 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001903 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001904 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001905out:
1906 kvfree(keys);
1907 return r;
1908}
1909
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001910/*
1911 * Base address and length must be sent at the start of each block, therefore
1912 * it's cheaper to send some clean data, as long as it's less than the size of
1913 * two longs.
1914 */
1915#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1916/* for consistency */
1917#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1918
1919/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001920 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1921 * address falls in a hole. In that case the index of one of the memslots
1922 * bordering the hole is returned.
1923 */
1924static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1925{
1926 int start = 0, end = slots->used_slots;
1927 int slot = atomic_read(&slots->lru_slot);
1928 struct kvm_memory_slot *memslots = slots->memslots;
1929
1930 if (gfn >= memslots[slot].base_gfn &&
1931 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1932 return slot;
1933
1934 while (start < end) {
1935 slot = start + (end - start) / 2;
1936
1937 if (gfn >= memslots[slot].base_gfn)
1938 end = slot;
1939 else
1940 start = slot + 1;
1941 }
1942
1943 if (gfn >= memslots[start].base_gfn &&
1944 gfn < memslots[start].base_gfn + memslots[start].npages) {
1945 atomic_set(&slots->lru_slot, start);
1946 }
1947
1948 return start;
1949}
1950
1951static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1952 u8 *res, unsigned long bufsize)
1953{
1954 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1955
1956 args->count = 0;
1957 while (args->count < bufsize) {
1958 hva = gfn_to_hva(kvm, cur_gfn);
1959 /*
1960 * We return an error if the first value was invalid, but we
1961 * return successfully if at least one value was copied.
1962 */
1963 if (kvm_is_error_hva(hva))
1964 return args->count ? 0 : -EFAULT;
1965 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1966 pgstev = 0;
1967 res[args->count++] = (pgstev >> 24) & 0x43;
1968 cur_gfn++;
1969 }
1970
1971 return 0;
1972}
1973
1974static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1975 unsigned long cur_gfn)
1976{
1977 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1978 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1979 unsigned long ofs = cur_gfn - ms->base_gfn;
1980
1981 if (ms->base_gfn + ms->npages <= cur_gfn) {
1982 slotidx--;
1983 /* If we are above the highest slot, wrap around */
1984 if (slotidx < 0)
1985 slotidx = slots->used_slots - 1;
1986
1987 ms = slots->memslots + slotidx;
1988 ofs = 0;
1989 }
1990 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1991 while ((slotidx > 0) && (ofs >= ms->npages)) {
1992 slotidx--;
1993 ms = slots->memslots + slotidx;
1994 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1995 }
1996 return ms->base_gfn + ofs;
1997}
1998
1999static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2000 u8 *res, unsigned long bufsize)
2001{
2002 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2003 struct kvm_memslots *slots = kvm_memslots(kvm);
2004 struct kvm_memory_slot *ms;
2005
2006 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2007 ms = gfn_to_memslot(kvm, cur_gfn);
2008 args->count = 0;
2009 args->start_gfn = cur_gfn;
2010 if (!ms)
2011 return 0;
2012 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2013 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2014
2015 while (args->count < bufsize) {
2016 hva = gfn_to_hva(kvm, cur_gfn);
2017 if (kvm_is_error_hva(hva))
2018 return 0;
2019 /* Decrement only if we actually flipped the bit to 0 */
2020 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2021 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2022 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2023 pgstev = 0;
2024 /* Save the value */
2025 res[args->count++] = (pgstev >> 24) & 0x43;
2026 /* If the next bit is too far away, stop. */
2027 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2028 return 0;
2029 /* If we reached the previous "next", find the next one */
2030 if (cur_gfn == next_gfn)
2031 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2032 /* Reached the end of memory or of the buffer, stop */
2033 if ((next_gfn >= mem_end) ||
2034 (next_gfn - args->start_gfn >= bufsize))
2035 return 0;
2036 cur_gfn++;
2037 /* Reached the end of the current memslot, take the next one. */
2038 if (cur_gfn - ms->base_gfn >= ms->npages) {
2039 ms = gfn_to_memslot(kvm, cur_gfn);
2040 if (!ms)
2041 return 0;
2042 }
2043 }
2044 return 0;
2045}
2046
2047/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002048 * This function searches for the next page with dirty CMMA attributes, and
2049 * saves the attributes in the buffer up to either the end of the buffer or
2050 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2051 * no trailing clean bytes are saved.
2052 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2053 * output buffer will indicate 0 as length.
2054 */
2055static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2056 struct kvm_s390_cmma_log *args)
2057{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002058 unsigned long bufsize;
2059 int srcu_idx, peek, ret;
2060 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002061
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002062 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002063 return -ENXIO;
2064 /* Invalid/unsupported flags were specified */
2065 if (args->flags & ~KVM_S390_CMMA_PEEK)
2066 return -EINVAL;
2067 /* Migration mode query, and we are not doing a migration */
2068 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002069 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002070 return -EINVAL;
2071 /* CMMA is disabled or was not used, or the buffer has length zero */
2072 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002073 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002074 memset(args, 0, sizeof(*args));
2075 return 0;
2076 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002077 /* We are not peeking, and there are no dirty pages */
2078 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2079 memset(args, 0, sizeof(*args));
2080 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002081 }
2082
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002083 values = vmalloc(bufsize);
2084 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002085 return -ENOMEM;
2086
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002087 down_read(&kvm->mm->mmap_sem);
2088 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002089 if (peek)
2090 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2091 else
2092 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002093 srcu_read_unlock(&kvm->srcu, srcu_idx);
2094 up_read(&kvm->mm->mmap_sem);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002095
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002096 if (kvm->arch.migration_mode)
2097 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2098 else
2099 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002100
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002101 if (copy_to_user((void __user *)args->values, values, args->count))
2102 ret = -EFAULT;
2103
2104 vfree(values);
2105 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002106}
2107
2108/*
2109 * This function sets the CMMA attributes for the given pages. If the input
2110 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002111 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002112 */
2113static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2114 const struct kvm_s390_cmma_log *args)
2115{
2116 unsigned long hva, mask, pgstev, i;
2117 uint8_t *bits;
2118 int srcu_idx, r = 0;
2119
2120 mask = args->mask;
2121
2122 if (!kvm->arch.use_cmma)
2123 return -ENXIO;
2124 /* invalid/unsupported flags */
2125 if (args->flags != 0)
2126 return -EINVAL;
2127 /* Enforce sane limit on memory allocation */
2128 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2129 return -EINVAL;
2130 /* Nothing to do */
2131 if (args->count == 0)
2132 return 0;
2133
Kees Cook42bc47b2018-06-12 14:27:11 -07002134 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002135 if (!bits)
2136 return -ENOMEM;
2137
2138 r = copy_from_user(bits, (void __user *)args->values, args->count);
2139 if (r) {
2140 r = -EFAULT;
2141 goto out;
2142 }
2143
2144 down_read(&kvm->mm->mmap_sem);
2145 srcu_idx = srcu_read_lock(&kvm->srcu);
2146 for (i = 0; i < args->count; i++) {
2147 hva = gfn_to_hva(kvm, args->start_gfn + i);
2148 if (kvm_is_error_hva(hva)) {
2149 r = -EFAULT;
2150 break;
2151 }
2152
2153 pgstev = bits[i];
2154 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002155 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002156 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2157 }
2158 srcu_read_unlock(&kvm->srcu, srcu_idx);
2159 up_read(&kvm->mm->mmap_sem);
2160
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002161 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002162 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002163 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002164 up_write(&kvm->mm->mmap_sem);
2165 }
2166out:
2167 vfree(bits);
2168 return r;
2169}
2170
Janosch Frank29b40f12019-09-30 04:19:18 -04002171static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
2172{
2173 struct kvm_vcpu *vcpu;
2174 u16 rc, rrc;
2175 int ret = 0;
2176 int i;
2177
2178 /*
2179 * We ignore failures and try to destroy as many CPUs as possible.
2180 * At the same time we must not free the assigned resources when
2181 * this fails, as the ultravisor has still access to that memory.
2182 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2183 * behind.
2184 * We want to return the first failure rc and rrc, though.
2185 */
2186 kvm_for_each_vcpu(i, vcpu, kvm) {
2187 mutex_lock(&vcpu->mutex);
2188 if (kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc) && !ret) {
2189 *rcp = rc;
2190 *rrcp = rrc;
2191 ret = -EIO;
2192 }
2193 mutex_unlock(&vcpu->mutex);
2194 }
2195 return ret;
2196}
2197
2198static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2199{
2200 int i, r = 0;
2201 u16 dummy;
2202
2203 struct kvm_vcpu *vcpu;
2204
2205 kvm_for_each_vcpu(i, vcpu, kvm) {
2206 mutex_lock(&vcpu->mutex);
2207 r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2208 mutex_unlock(&vcpu->mutex);
2209 if (r)
2210 break;
2211 }
2212 if (r)
2213 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2214 return r;
2215}
2216
2217static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2218{
2219 int r = 0;
2220 u16 dummy;
2221 void __user *argp = (void __user *)cmd->data;
2222
2223 switch (cmd->cmd) {
2224 case KVM_PV_ENABLE: {
2225 r = -EINVAL;
2226 if (kvm_s390_pv_is_protected(kvm))
2227 break;
2228
2229 /*
2230 * FMT 4 SIE needs esca. As we never switch back to bsca from
2231 * esca, we need no cleanup in the error cases below
2232 */
2233 r = sca_switch_to_extended(kvm);
2234 if (r)
2235 break;
2236
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002237 down_write(&current->mm->mmap_sem);
2238 r = gmap_mark_unmergeable();
2239 up_write(&current->mm->mmap_sem);
2240 if (r)
2241 break;
2242
Janosch Frank29b40f12019-09-30 04:19:18 -04002243 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2244 if (r)
2245 break;
2246
2247 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2248 if (r)
2249 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002250
2251 /* we need to block service interrupts from now on */
2252 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002253 break;
2254 }
2255 case KVM_PV_DISABLE: {
2256 r = -EINVAL;
2257 if (!kvm_s390_pv_is_protected(kvm))
2258 break;
2259
2260 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2261 /*
2262 * If a CPU could not be destroyed, destroy VM will also fail.
2263 * There is no point in trying to destroy it. Instead return
2264 * the rc and rrc from the first CPU that failed destroying.
2265 */
2266 if (r)
2267 break;
2268 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002269
2270 /* no need to block service interrupts any more */
2271 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002272 break;
2273 }
2274 case KVM_PV_SET_SEC_PARMS: {
2275 struct kvm_s390_pv_sec_parm parms = {};
2276 void *hdr;
2277
2278 r = -EINVAL;
2279 if (!kvm_s390_pv_is_protected(kvm))
2280 break;
2281
2282 r = -EFAULT;
2283 if (copy_from_user(&parms, argp, sizeof(parms)))
2284 break;
2285
2286 /* Currently restricted to 8KB */
2287 r = -EINVAL;
2288 if (parms.length > PAGE_SIZE * 2)
2289 break;
2290
2291 r = -ENOMEM;
2292 hdr = vmalloc(parms.length);
2293 if (!hdr)
2294 break;
2295
2296 r = -EFAULT;
2297 if (!copy_from_user(hdr, (void __user *)parms.origin,
2298 parms.length))
2299 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2300 &cmd->rc, &cmd->rrc);
2301
2302 vfree(hdr);
2303 break;
2304 }
2305 case KVM_PV_UNPACK: {
2306 struct kvm_s390_pv_unp unp = {};
2307
2308 r = -EINVAL;
2309 if (!kvm_s390_pv_is_protected(kvm))
2310 break;
2311
2312 r = -EFAULT;
2313 if (copy_from_user(&unp, argp, sizeof(unp)))
2314 break;
2315
2316 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2317 &cmd->rc, &cmd->rrc);
2318 break;
2319 }
2320 case KVM_PV_VERIFY: {
2321 r = -EINVAL;
2322 if (!kvm_s390_pv_is_protected(kvm))
2323 break;
2324
2325 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2326 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2327 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2328 cmd->rrc);
2329 break;
2330 }
2331 default:
2332 r = -ENOTTY;
2333 }
2334 return r;
2335}
2336
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002337long kvm_arch_vm_ioctl(struct file *filp,
2338 unsigned int ioctl, unsigned long arg)
2339{
2340 struct kvm *kvm = filp->private_data;
2341 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002342 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002343 int r;
2344
2345 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002346 case KVM_S390_INTERRUPT: {
2347 struct kvm_s390_interrupt s390int;
2348
2349 r = -EFAULT;
2350 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2351 break;
2352 r = kvm_s390_inject_vm(kvm, &s390int);
2353 break;
2354 }
Cornelia Huck84223592013-07-15 13:36:01 +02002355 case KVM_CREATE_IRQCHIP: {
2356 struct kvm_irq_routing_entry routing;
2357
2358 r = -EINVAL;
2359 if (kvm->arch.use_irqchip) {
2360 /* Set up dummy routing. */
2361 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002362 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002363 }
2364 break;
2365 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002366 case KVM_SET_DEVICE_ATTR: {
2367 r = -EFAULT;
2368 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2369 break;
2370 r = kvm_s390_vm_set_attr(kvm, &attr);
2371 break;
2372 }
2373 case KVM_GET_DEVICE_ATTR: {
2374 r = -EFAULT;
2375 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2376 break;
2377 r = kvm_s390_vm_get_attr(kvm, &attr);
2378 break;
2379 }
2380 case KVM_HAS_DEVICE_ATTR: {
2381 r = -EFAULT;
2382 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2383 break;
2384 r = kvm_s390_vm_has_attr(kvm, &attr);
2385 break;
2386 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002387 case KVM_S390_GET_SKEYS: {
2388 struct kvm_s390_skeys args;
2389
2390 r = -EFAULT;
2391 if (copy_from_user(&args, argp,
2392 sizeof(struct kvm_s390_skeys)))
2393 break;
2394 r = kvm_s390_get_skeys(kvm, &args);
2395 break;
2396 }
2397 case KVM_S390_SET_SKEYS: {
2398 struct kvm_s390_skeys args;
2399
2400 r = -EFAULT;
2401 if (copy_from_user(&args, argp,
2402 sizeof(struct kvm_s390_skeys)))
2403 break;
2404 r = kvm_s390_set_skeys(kvm, &args);
2405 break;
2406 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002407 case KVM_S390_GET_CMMA_BITS: {
2408 struct kvm_s390_cmma_log args;
2409
2410 r = -EFAULT;
2411 if (copy_from_user(&args, argp, sizeof(args)))
2412 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002413 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002414 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002415 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002416 if (!r) {
2417 r = copy_to_user(argp, &args, sizeof(args));
2418 if (r)
2419 r = -EFAULT;
2420 }
2421 break;
2422 }
2423 case KVM_S390_SET_CMMA_BITS: {
2424 struct kvm_s390_cmma_log args;
2425
2426 r = -EFAULT;
2427 if (copy_from_user(&args, argp, sizeof(args)))
2428 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002429 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002430 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002431 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002432 break;
2433 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002434 case KVM_S390_PV_COMMAND: {
2435 struct kvm_pv_cmd args;
2436
2437 r = 0;
2438 if (!is_prot_virt_host()) {
2439 r = -EINVAL;
2440 break;
2441 }
2442 if (copy_from_user(&args, argp, sizeof(args))) {
2443 r = -EFAULT;
2444 break;
2445 }
2446 if (args.flags) {
2447 r = -EINVAL;
2448 break;
2449 }
2450 mutex_lock(&kvm->lock);
2451 r = kvm_s390_handle_pv(kvm, &args);
2452 mutex_unlock(&kvm->lock);
2453 if (copy_to_user(argp, &args, sizeof(args))) {
2454 r = -EFAULT;
2455 break;
2456 }
2457 break;
2458 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002459 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002460 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002461 }
2462
2463 return r;
2464}
2465
Tony Krowiak45c9b472015-01-13 11:33:26 -05002466static int kvm_s390_apxa_installed(void)
2467{
Tony Krowiake585b242018-09-25 19:16:18 -04002468 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002469
Tony Krowiake585b242018-09-25 19:16:18 -04002470 if (ap_instructions_available()) {
2471 if (ap_qci(&info) == 0)
2472 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002473 }
2474
2475 return 0;
2476}
2477
Tony Krowiake585b242018-09-25 19:16:18 -04002478/*
2479 * The format of the crypto control block (CRYCB) is specified in the 3 low
2480 * order bits of the CRYCB designation (CRYCBD) field as follows:
2481 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2482 * AP extended addressing (APXA) facility are installed.
2483 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2484 * Format 2: Both the APXA and MSAX3 facilities are installed
2485 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002486static void kvm_s390_set_crycb_format(struct kvm *kvm)
2487{
2488 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2489
Tony Krowiake585b242018-09-25 19:16:18 -04002490 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2491 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2492
2493 /* Check whether MSAX3 is installed */
2494 if (!test_kvm_facility(kvm, 76))
2495 return;
2496
Tony Krowiak45c9b472015-01-13 11:33:26 -05002497 if (kvm_s390_apxa_installed())
2498 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2499 else
2500 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2501}
2502
Pierre Morel0e237e42018-10-05 10:31:09 +02002503void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2504 unsigned long *aqm, unsigned long *adm)
2505{
2506 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2507
2508 mutex_lock(&kvm->lock);
2509 kvm_s390_vcpu_block_all(kvm);
2510
2511 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2512 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2513 memcpy(crycb->apcb1.apm, apm, 32);
2514 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2515 apm[0], apm[1], apm[2], apm[3]);
2516 memcpy(crycb->apcb1.aqm, aqm, 32);
2517 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2518 aqm[0], aqm[1], aqm[2], aqm[3]);
2519 memcpy(crycb->apcb1.adm, adm, 32);
2520 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2521 adm[0], adm[1], adm[2], adm[3]);
2522 break;
2523 case CRYCB_FORMAT1:
2524 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2525 memcpy(crycb->apcb0.apm, apm, 8);
2526 memcpy(crycb->apcb0.aqm, aqm, 2);
2527 memcpy(crycb->apcb0.adm, adm, 2);
2528 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2529 apm[0], *((unsigned short *)aqm),
2530 *((unsigned short *)adm));
2531 break;
2532 default: /* Can not happen */
2533 break;
2534 }
2535
2536 /* recreate the shadow crycb for each vcpu */
2537 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2538 kvm_s390_vcpu_unblock_all(kvm);
2539 mutex_unlock(&kvm->lock);
2540}
2541EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2542
Tony Krowiak421045982018-09-25 19:16:25 -04002543void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2544{
2545 mutex_lock(&kvm->lock);
2546 kvm_s390_vcpu_block_all(kvm);
2547
2548 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2549 sizeof(kvm->arch.crypto.crycb->apcb0));
2550 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2551 sizeof(kvm->arch.crypto.crycb->apcb1));
2552
Pierre Morel0e237e42018-10-05 10:31:09 +02002553 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002554 /* recreate the shadow crycb for each vcpu */
2555 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002556 kvm_s390_vcpu_unblock_all(kvm);
2557 mutex_unlock(&kvm->lock);
2558}
2559EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2560
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002561static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002562{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002563 struct cpuid cpuid;
2564
2565 get_cpu_id(&cpuid);
2566 cpuid.version = 0xff;
2567 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002568}
2569
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002570static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002571{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002572 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002573 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002574
Tony Krowiake585b242018-09-25 19:16:18 -04002575 if (!test_kvm_facility(kvm, 76))
2576 return;
2577
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002578 /* Enable AES/DEA protected key functions by default */
2579 kvm->arch.crypto.aes_kw = 1;
2580 kvm->arch.crypto.dea_kw = 1;
2581 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2582 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2583 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2584 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002585}
2586
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002587static void sca_dispose(struct kvm *kvm)
2588{
2589 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002590 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002591 else
2592 free_page((unsigned long)(kvm->arch.sca));
2593 kvm->arch.sca = NULL;
2594}
2595
Carsten Ottee08b9632012-01-04 10:25:20 +01002596int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002597{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002598 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002599 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002600 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002601 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002602
Carsten Ottee08b9632012-01-04 10:25:20 +01002603 rc = -EINVAL;
2604#ifdef CONFIG_KVM_S390_UCONTROL
2605 if (type & ~KVM_VM_S390_UCONTROL)
2606 goto out_err;
2607 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2608 goto out_err;
2609#else
2610 if (type)
2611 goto out_err;
2612#endif
2613
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002614 rc = s390_enable_sie();
2615 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002616 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002617
Carsten Otteb2904112011-10-18 12:27:13 +02002618 rc = -ENOMEM;
2619
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002620 if (!sclp.has_64bscao)
2621 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002622 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002623 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002624 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002625 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002626 goto out_err;
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002627 mutex_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002628 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002629 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002630 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002631 kvm->arch.sca = (struct bsca_block *)
2632 ((char *) kvm->arch.sca + sca_offset);
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002633 mutex_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002634
2635 sprintf(debug_name, "kvm-%u", current->pid);
2636
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002637 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002638 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002639 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002640
Michael Mueller19114be2017-05-30 14:26:02 +02002641 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002642 kvm->arch.sie_page2 =
2643 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2644 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002645 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002646
Michael Mueller25c84db2019-01-31 09:52:41 +01002647 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002648 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002649
2650 for (i = 0; i < kvm_s390_fac_size(); i++) {
2651 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2652 (kvm_s390_fac_base[i] |
2653 kvm_s390_fac_ext[i]);
2654 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2655 kvm_s390_fac_base[i];
2656 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002657 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002658
David Hildenbrand19352222017-08-29 16:31:08 +02002659 /* we are always in czam mode - even on pre z14 machines */
2660 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2661 set_kvm_facility(kvm->arch.model.fac_list, 138);
2662 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002663 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2664 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002665 if (MACHINE_HAS_TLB_GUEST) {
2666 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2667 set_kvm_facility(kvm->arch.model.fac_list, 147);
2668 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002669
Pierre Morel05f31e32019-05-21 17:34:37 +02002670 if (css_general_characteristics.aiv && test_facility(65))
2671 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2672
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002673 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002674 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002675
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002676 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002677
Fei Li51978392017-02-17 17:06:26 +08002678 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002679 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002680 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2681 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002682 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002683 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002684
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002685 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002686 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002687
Carsten Ottee08b9632012-01-04 10:25:20 +01002688 if (type & KVM_VM_S390_UCONTROL) {
2689 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002690 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002691 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002692 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002693 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002694 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002695 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002696 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002697 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002698 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002699 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002700 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002701 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002702 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002703
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002704 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002705 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002706 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002707 kvm_s390_vsie_init(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002708 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002709 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002710
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002711 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002712out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002713 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002714 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002715 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002716 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002717 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002718}
2719
Christian Borntraegerd329c032008-11-26 14:50:27 +01002720void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2721{
Janosch Frank29b40f12019-09-30 04:19:18 -04002722 u16 rc, rrc;
2723
Christian Borntraegerd329c032008-11-26 14:50:27 +01002724 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002725 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002726 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002727 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002728 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002729 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002730
2731 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002732 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002733
Dominik Dingele6db1d62015-05-07 15:41:57 +02002734 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002735 kvm_s390_vcpu_unsetup_cmma(vcpu);
Janosch Frank29b40f12019-09-30 04:19:18 -04002736 /* We can not hold the vcpu mutex here, we are already dying */
2737 if (kvm_s390_pv_cpu_get_handle(vcpu))
2738 kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002739 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraegerd329c032008-11-26 14:50:27 +01002740}
2741
2742static void kvm_free_vcpus(struct kvm *kvm)
2743{
2744 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002745 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002746
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002747 kvm_for_each_vcpu(i, vcpu, kvm)
Sean Christopherson4543bdc2019-12-18 13:55:14 -08002748 kvm_vcpu_destroy(vcpu);
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002749
2750 mutex_lock(&kvm->lock);
2751 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2752 kvm->vcpus[i] = NULL;
2753
2754 atomic_set(&kvm->online_vcpus, 0);
2755 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002756}
2757
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002758void kvm_arch_destroy_vm(struct kvm *kvm)
2759{
Janosch Frank29b40f12019-09-30 04:19:18 -04002760 u16 rc, rrc;
2761
Christian Borntraegerd329c032008-11-26 14:50:27 +01002762 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002763 sca_dispose(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002764 kvm_s390_gisa_destroy(kvm);
Janosch Frank29b40f12019-09-30 04:19:18 -04002765 /*
2766 * We are already at the end of life and kvm->lock is not taken.
2767 * This is ok as the file descriptor is closed by now and nobody
2768 * can mess with the pv state. To avoid lockdep_assert_held from
2769 * complaining we do not use kvm_s390_pv_is_protected.
2770 */
2771 if (kvm_s390_pv_get_handle(kvm))
2772 kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
2773 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002774 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002775 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002776 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002777 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002778 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002779 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002780 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002781}
2782
2783/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002784static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2785{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002786 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002787 if (!vcpu->arch.gmap)
2788 return -ENOMEM;
2789 vcpu->arch.gmap->private = vcpu->kvm;
2790
2791 return 0;
2792}
2793
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002794static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2795{
David Hildenbranda6940672016-08-08 22:39:32 +02002796 if (!kvm_s390_use_sca_entries())
2797 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002798 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002799 if (vcpu->kvm->arch.use_esca) {
2800 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002801
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002802 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002803 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002804 } else {
2805 struct bsca_block *sca = vcpu->kvm->arch.sca;
2806
2807 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002808 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002809 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002810 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002811}
2812
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002813static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002814{
David Hildenbranda6940672016-08-08 22:39:32 +02002815 if (!kvm_s390_use_sca_entries()) {
2816 struct bsca_block *sca = vcpu->kvm->arch.sca;
2817
2818 /* we still need the basic sca for the ipte control */
2819 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2820 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002821 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002822 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002823 read_lock(&vcpu->kvm->arch.sca_lock);
2824 if (vcpu->kvm->arch.use_esca) {
2825 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002826
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002827 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002828 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2829 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002830 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002831 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002832 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002833 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002834
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002835 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002836 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2837 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002838 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002839 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002840 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002841}
2842
2843/* Basic SCA to Extended SCA data copy routines */
2844static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2845{
2846 d->sda = s->sda;
2847 d->sigp_ctrl.c = s->sigp_ctrl.c;
2848 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2849}
2850
2851static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2852{
2853 int i;
2854
2855 d->ipte_control = s->ipte_control;
2856 d->mcn[0] = s->mcn;
2857 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2858 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2859}
2860
2861static int sca_switch_to_extended(struct kvm *kvm)
2862{
2863 struct bsca_block *old_sca = kvm->arch.sca;
2864 struct esca_block *new_sca;
2865 struct kvm_vcpu *vcpu;
2866 unsigned int vcpu_idx;
2867 u32 scaol, scaoh;
2868
Janosch Frank29b40f12019-09-30 04:19:18 -04002869 if (kvm->arch.use_esca)
2870 return 0;
2871
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002872 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2873 if (!new_sca)
2874 return -ENOMEM;
2875
2876 scaoh = (u32)((u64)(new_sca) >> 32);
2877 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2878
2879 kvm_s390_vcpu_block_all(kvm);
2880 write_lock(&kvm->arch.sca_lock);
2881
2882 sca_copy_b_to_e(new_sca, old_sca);
2883
2884 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2885 vcpu->arch.sie_block->scaoh = scaoh;
2886 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002887 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002888 }
2889 kvm->arch.sca = new_sca;
2890 kvm->arch.use_esca = 1;
2891
2892 write_unlock(&kvm->arch.sca_lock);
2893 kvm_s390_vcpu_unblock_all(kvm);
2894
2895 free_page((unsigned long)old_sca);
2896
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002897 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2898 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002899 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002900}
2901
2902static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2903{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002904 int rc;
2905
David Hildenbranda6940672016-08-08 22:39:32 +02002906 if (!kvm_s390_use_sca_entries()) {
2907 if (id < KVM_MAX_VCPUS)
2908 return true;
2909 return false;
2910 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002911 if (id < KVM_S390_BSCA_CPU_SLOTS)
2912 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002913 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002914 return false;
2915
2916 mutex_lock(&kvm->lock);
2917 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2918 mutex_unlock(&kvm->lock);
2919
2920 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002921}
2922
David Hildenbranddb0758b2016-02-15 09:42:25 +01002923/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2924static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2925{
2926 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002927 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002928 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002929 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002930}
2931
2932/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2933static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2934{
2935 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002936 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002937 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2938 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002939 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002940}
2941
2942/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2943static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2944{
2945 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2946 vcpu->arch.cputm_enabled = true;
2947 __start_cpu_timer_accounting(vcpu);
2948}
2949
2950/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2951static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2952{
2953 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2954 __stop_cpu_timer_accounting(vcpu);
2955 vcpu->arch.cputm_enabled = false;
2956}
2957
2958static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2959{
2960 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2961 __enable_cpu_timer_accounting(vcpu);
2962 preempt_enable();
2963}
2964
2965static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2966{
2967 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2968 __disable_cpu_timer_accounting(vcpu);
2969 preempt_enable();
2970}
2971
David Hildenbrand4287f242016-02-15 09:40:12 +01002972/* set the cpu timer - may only be called from the VCPU thread itself */
2973void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2974{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002975 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002976 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002977 if (vcpu->arch.cputm_enabled)
2978 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002979 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002980 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002981 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002982}
2983
David Hildenbranddb0758b2016-02-15 09:42:25 +01002984/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002985__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2986{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002987 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002988 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002989
2990 if (unlikely(!vcpu->arch.cputm_enabled))
2991 return vcpu->arch.sie_block->cputm;
2992
David Hildenbrand9c23a132016-02-17 21:53:33 +01002993 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2994 do {
2995 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2996 /*
2997 * If the writer would ever execute a read in the critical
2998 * section, e.g. in irq context, we have a deadlock.
2999 */
3000 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3001 value = vcpu->arch.sie_block->cputm;
3002 /* if cputm_start is 0, accounting is being started/stopped */
3003 if (likely(vcpu->arch.cputm_start))
3004 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3005 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3006 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003007 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01003008}
3009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003010void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3011{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003012
David Hildenbrand37d9df92015-03-11 16:47:33 +01003013 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003014 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01003015 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003016 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01003017 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003018}
3019
3020void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3021{
David Hildenbrand01a745a2016-02-12 20:41:56 +01003022 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01003023 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003024 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003025 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01003026 vcpu->arch.enabled_gmap = gmap_get_enabled();
3027 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003028
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003029}
3030
Dominik Dingel31928aa2014-12-04 15:47:07 +01003031void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003032{
Jason J. Herne72f25022014-11-25 09:46:02 -05003033 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02003034 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003035 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01003036 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02003037 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003038 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02003039 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01003040 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02003041 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02003042 }
David Hildenbrand6502a342016-06-21 14:19:51 +02003043 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3044 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01003045 /* make vcpu_load load the right gmap on the first trigger */
3046 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003047}
3048
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003049static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3050{
3051 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3052 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3053 return true;
3054 return false;
3055}
3056
3057static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3058{
3059 /* At least one ECC subfunction must be present */
3060 return kvm_has_pckmo_subfunc(kvm, 32) ||
3061 kvm_has_pckmo_subfunc(kvm, 33) ||
3062 kvm_has_pckmo_subfunc(kvm, 34) ||
3063 kvm_has_pckmo_subfunc(kvm, 40) ||
3064 kvm_has_pckmo_subfunc(kvm, 41);
3065
3066}
3067
Tony Krowiak5102ee82014-06-27 14:46:01 -04003068static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3069{
Tony Krowiake585b242018-09-25 19:16:18 -04003070 /*
3071 * If the AP instructions are not being interpreted and the MSAX3
3072 * facility is not configured for the guest, there is nothing to set up.
3073 */
3074 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04003075 return;
3076
Tony Krowiake585b242018-09-25 19:16:18 -04003077 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02003078 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04003079 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003080 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02003081
Tony Krowiake585b242018-09-25 19:16:18 -04003082 if (vcpu->kvm->arch.crypto.apie)
3083 vcpu->arch.sie_block->eca |= ECA_APIE;
3084
3085 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003086 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02003087 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003088 /* ecc is also wrapped with AES key */
3089 if (kvm_has_pckmo_ecc(vcpu->kvm))
3090 vcpu->arch.sie_block->ecd |= ECD_ECC;
3091 }
3092
Tony Krowiaka374e892014-09-03 10:13:53 +02003093 if (vcpu->kvm->arch.crypto.dea_kw)
3094 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04003095}
3096
Dominik Dingelb31605c2014-03-25 13:47:11 +01003097void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3098{
3099 free_page(vcpu->arch.sie_block->cbrlo);
3100 vcpu->arch.sie_block->cbrlo = 0;
3101}
3102
3103int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3104{
3105 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
3106 if (!vcpu->arch.sie_block->cbrlo)
3107 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01003108 return 0;
3109}
3110
Michael Mueller91520f12015-02-27 14:32:11 +01003111static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3112{
3113 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3114
Michael Mueller91520f12015-02-27 14:32:11 +01003115 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01003116 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01003117 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01003118}
3119
Sean Christophersonff72bb52019-12-18 13:55:20 -08003120static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3121{
Dominik Dingelb31605c2014-03-25 13:47:11 +01003122 int rc = 0;
Janosch Frank29b40f12019-09-30 04:19:18 -04003123 u16 uvrc, uvrrc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003124
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01003125 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3126 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003127 CPUSTAT_STOPPED);
3128
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003129 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003130 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003131 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003132 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003133
Michael Mueller91520f12015-02-27 14:32:11 +01003134 kvm_s390_vcpu_setup_model(vcpu);
3135
David Hildenbrandbdab09f2016-04-12 11:07:49 +02003136 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3137 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003138 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01003139 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003140 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02003141 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003142 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003143
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003144 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003145 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02003146 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003147 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3148 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02003149 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003150 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02003151 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003152 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003153 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003154 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003155 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003156 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01003157 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003158 vcpu->arch.sie_block->eca |= ECA_VX;
3159 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04003160 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003161 if (test_kvm_facility(vcpu->kvm, 139))
3162 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003163 if (test_kvm_facility(vcpu->kvm, 156))
3164 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02003165 if (vcpu->arch.sie_block->gd) {
3166 vcpu->arch.sie_block->eca |= ECA_AIV;
3167 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3168 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3169 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003170 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3171 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08003172 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05003173
3174 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003175 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05003176 else
3177 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05003178
Dominik Dingele6db1d62015-05-07 15:41:57 +02003179 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01003180 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3181 if (rc)
3182 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003183 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01003184 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02003185 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01003186
Collin Walling67d49d52018-08-31 12:51:19 -04003187 vcpu->arch.sie_block->hpid = HPID_KVM;
3188
Tony Krowiak5102ee82014-06-27 14:46:01 -04003189 kvm_s390_vcpu_crypto_setup(vcpu);
3190
Janosch Frank29b40f12019-09-30 04:19:18 -04003191 mutex_lock(&vcpu->kvm->lock);
3192 if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3193 rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3194 if (rc)
3195 kvm_s390_vcpu_unsetup_cmma(vcpu);
3196 }
3197 mutex_unlock(&vcpu->kvm->lock);
3198
Dominik Dingelb31605c2014-03-25 13:47:11 +01003199 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003200}
3201
Sean Christopherson897cc382019-12-18 13:55:09 -08003202int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3203{
3204 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3205 return -EINVAL;
3206 return 0;
3207}
3208
Sean Christophersone529ef62019-12-18 13:55:15 -08003209int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003210{
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003211 struct sie_page *sie_page;
Sean Christopherson897cc382019-12-18 13:55:09 -08003212 int rc;
Carsten Otte4d475552011-10-18 12:27:12 +02003213
QingFeng Haoda72ca42017-06-07 11:41:19 +02003214 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003215 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
3216 if (!sie_page)
Sean Christophersone529ef62019-12-18 13:55:15 -08003217 return -ENOMEM;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003218
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003219 vcpu->arch.sie_block = &sie_page->sie_block;
3220 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3221
David Hildenbrandefed1102015-04-16 12:32:41 +02003222 /* the real guest size will always be smaller than msl */
3223 vcpu->arch.sie_block->mso = 0;
3224 vcpu->arch.sie_block->msl = sclp.hamax;
3225
Sean Christophersone529ef62019-12-18 13:55:15 -08003226 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003227 spin_lock_init(&vcpu->arch.local_int.lock);
Sean Christophersone529ef62019-12-18 13:55:15 -08003228 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003229 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3230 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003231 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003232
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003233 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3234 kvm_clear_async_pf_completion_queue(vcpu);
3235 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3236 KVM_SYNC_GPRS |
3237 KVM_SYNC_ACRS |
3238 KVM_SYNC_CRS |
3239 KVM_SYNC_ARCH0 |
3240 KVM_SYNC_PFAULT;
3241 kvm_s390_set_prefix(vcpu, 0);
3242 if (test_kvm_facility(vcpu->kvm, 64))
3243 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3244 if (test_kvm_facility(vcpu->kvm, 82))
3245 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3246 if (test_kvm_facility(vcpu->kvm, 133))
3247 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3248 if (test_kvm_facility(vcpu->kvm, 156))
3249 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3250 /* fprs can be synchronized via vrs, even if the guest has no vx. With
3251 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3252 */
3253 if (MACHINE_HAS_VX)
3254 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3255 else
3256 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3257
3258 if (kvm_is_ucontrol(vcpu->kvm)) {
3259 rc = __kvm_ucontrol_vcpu_init(vcpu);
3260 if (rc)
Sean Christophersona2017f12019-12-18 13:55:11 -08003261 goto out_free_sie_block;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003262 }
3263
Sean Christophersone529ef62019-12-18 13:55:15 -08003264 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3265 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3266 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003267
Sean Christophersonff72bb52019-12-18 13:55:20 -08003268 rc = kvm_s390_vcpu_setup(vcpu);
3269 if (rc)
3270 goto out_ucontrol_uninit;
Sean Christophersone529ef62019-12-18 13:55:15 -08003271 return 0;
3272
Sean Christophersonff72bb52019-12-18 13:55:20 -08003273out_ucontrol_uninit:
3274 if (kvm_is_ucontrol(vcpu->kvm))
3275 gmap_remove(vcpu->arch.gmap);
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003276out_free_sie_block:
3277 free_page((unsigned long)(vcpu->arch.sie_block));
Sean Christophersone529ef62019-12-18 13:55:15 -08003278 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003279}
3280
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003281int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3282{
David Hildenbrand9a022062014-08-05 17:40:47 +02003283 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003284}
3285
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003286bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3287{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003288 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003289}
3290
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003291void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003292{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003293 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003294 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003295}
3296
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003297void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003298{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003299 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003300}
3301
Christian Borntraeger8e236542015-04-09 13:49:04 +02003302static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3303{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003304 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003305 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003306}
3307
David Hildenbrand9ea59722018-09-25 19:16:16 -04003308bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3309{
3310 return atomic_read(&vcpu->arch.sie_block->prog20) &
3311 (PROG_BLOCK_SIE | PROG_REQUEST);
3312}
3313
Christian Borntraeger8e236542015-04-09 13:49:04 +02003314static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3315{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003316 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003317}
3318
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003319/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003320 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003321 * If the CPU is not running (e.g. waiting as idle) the function will
3322 * return immediately. */
3323void exit_sie(struct kvm_vcpu *vcpu)
3324{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003325 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003326 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003327 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3328 cpu_relax();
3329}
3330
Christian Borntraeger8e236542015-04-09 13:49:04 +02003331/* Kick a guest cpu out of SIE to process a request synchronously */
3332void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003333{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003334 kvm_make_request(req, vcpu);
3335 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003336}
3337
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003338static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3339 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003340{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003341 struct kvm *kvm = gmap->private;
3342 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003343 unsigned long prefix;
3344 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003345
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003346 if (gmap_is_shadow(gmap))
3347 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003348 if (start >= 1UL << 31)
3349 /* We are only interested in prefix pages */
3350 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003351 kvm_for_each_vcpu(i, vcpu, kvm) {
3352 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003353 prefix = kvm_s390_get_prefix(vcpu);
3354 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3355 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3356 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003357 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003358 }
3359 }
3360}
3361
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003362bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3363{
3364 /* do not poll with more than halt_poll_max_steal percent of steal time */
3365 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3366 halt_poll_max_steal) {
3367 vcpu->stat.halt_no_poll_steal++;
3368 return true;
3369 }
3370 return false;
3371}
3372
Christoffer Dallb6d33832012-03-08 16:44:24 -05003373int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3374{
3375 /* kvm common code refers to this, but never calls it */
3376 BUG();
3377 return 0;
3378}
3379
Carsten Otte14eebd92012-05-15 14:15:26 +02003380static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3381 struct kvm_one_reg *reg)
3382{
3383 int r = -EINVAL;
3384
3385 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003386 case KVM_REG_S390_TODPR:
3387 r = put_user(vcpu->arch.sie_block->todpr,
3388 (u32 __user *)reg->addr);
3389 break;
3390 case KVM_REG_S390_EPOCHDIFF:
3391 r = put_user(vcpu->arch.sie_block->epoch,
3392 (u64 __user *)reg->addr);
3393 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003394 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003395 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003396 (u64 __user *)reg->addr);
3397 break;
3398 case KVM_REG_S390_CLOCK_COMP:
3399 r = put_user(vcpu->arch.sie_block->ckc,
3400 (u64 __user *)reg->addr);
3401 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003402 case KVM_REG_S390_PFTOKEN:
3403 r = put_user(vcpu->arch.pfault_token,
3404 (u64 __user *)reg->addr);
3405 break;
3406 case KVM_REG_S390_PFCOMPARE:
3407 r = put_user(vcpu->arch.pfault_compare,
3408 (u64 __user *)reg->addr);
3409 break;
3410 case KVM_REG_S390_PFSELECT:
3411 r = put_user(vcpu->arch.pfault_select,
3412 (u64 __user *)reg->addr);
3413 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003414 case KVM_REG_S390_PP:
3415 r = put_user(vcpu->arch.sie_block->pp,
3416 (u64 __user *)reg->addr);
3417 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003418 case KVM_REG_S390_GBEA:
3419 r = put_user(vcpu->arch.sie_block->gbea,
3420 (u64 __user *)reg->addr);
3421 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003422 default:
3423 break;
3424 }
3425
3426 return r;
3427}
3428
3429static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3430 struct kvm_one_reg *reg)
3431{
3432 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003433 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003434
3435 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003436 case KVM_REG_S390_TODPR:
3437 r = get_user(vcpu->arch.sie_block->todpr,
3438 (u32 __user *)reg->addr);
3439 break;
3440 case KVM_REG_S390_EPOCHDIFF:
3441 r = get_user(vcpu->arch.sie_block->epoch,
3442 (u64 __user *)reg->addr);
3443 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003444 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003445 r = get_user(val, (u64 __user *)reg->addr);
3446 if (!r)
3447 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003448 break;
3449 case KVM_REG_S390_CLOCK_COMP:
3450 r = get_user(vcpu->arch.sie_block->ckc,
3451 (u64 __user *)reg->addr);
3452 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003453 case KVM_REG_S390_PFTOKEN:
3454 r = get_user(vcpu->arch.pfault_token,
3455 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003456 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3457 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003458 break;
3459 case KVM_REG_S390_PFCOMPARE:
3460 r = get_user(vcpu->arch.pfault_compare,
3461 (u64 __user *)reg->addr);
3462 break;
3463 case KVM_REG_S390_PFSELECT:
3464 r = get_user(vcpu->arch.pfault_select,
3465 (u64 __user *)reg->addr);
3466 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003467 case KVM_REG_S390_PP:
3468 r = get_user(vcpu->arch.sie_block->pp,
3469 (u64 __user *)reg->addr);
3470 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003471 case KVM_REG_S390_GBEA:
3472 r = get_user(vcpu->arch.sie_block->gbea,
3473 (u64 __user *)reg->addr);
3474 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003475 default:
3476 break;
3477 }
3478
3479 return r;
3480}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003481
Janosch Frank7de3f142020-01-31 05:02:02 -05003482static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003483{
Janosch Frank7de3f142020-01-31 05:02:02 -05003484 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3485 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3486 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
3487
3488 kvm_clear_async_pf_completion_queue(vcpu);
3489 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
3490 kvm_s390_vcpu_stop(vcpu);
3491 kvm_s390_clear_local_irqs(vcpu);
3492}
3493
3494static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3495{
3496 /* Initial reset is a superset of the normal reset */
3497 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
3498
3499 /* this equals initial cpu reset in pop, but we don't switch to ESA */
3500 vcpu->arch.sie_block->gpsw.mask = 0;
3501 vcpu->arch.sie_block->gpsw.addr = 0;
3502 kvm_s390_set_prefix(vcpu, 0);
3503 kvm_s390_set_cpu_timer(vcpu, 0);
3504 vcpu->arch.sie_block->ckc = 0;
3505 vcpu->arch.sie_block->todpr = 0;
3506 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3507 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3508 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
3509 vcpu->run->s.regs.fpc = 0;
3510 vcpu->arch.sie_block->gbea = 1;
3511 vcpu->arch.sie_block->pp = 0;
3512 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3513}
3514
3515static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
3516{
3517 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
3518
3519 /* Clear reset is a superset of the initial reset */
3520 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3521
3522 memset(&regs->gprs, 0, sizeof(regs->gprs));
3523 memset(&regs->vrs, 0, sizeof(regs->vrs));
3524 memset(&regs->acrs, 0, sizeof(regs->acrs));
3525 memset(&regs->gscb, 0, sizeof(regs->gscb));
3526
3527 regs->etoken = 0;
3528 regs->etoken_extension = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003529}
3530
3531int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3532{
Christoffer Dall875656f2017-12-04 21:35:27 +01003533 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003534 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003535 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003536 return 0;
3537}
3538
3539int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3540{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003541 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003542 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003543 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003544 return 0;
3545}
3546
3547int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3548 struct kvm_sregs *sregs)
3549{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003550 vcpu_load(vcpu);
3551
Christian Borntraeger59674c12012-01-11 11:20:33 +01003552 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003553 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003554
3555 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003556 return 0;
3557}
3558
3559int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3560 struct kvm_sregs *sregs)
3561{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003562 vcpu_load(vcpu);
3563
Christian Borntraeger59674c12012-01-11 11:20:33 +01003564 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003565 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003566
3567 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003568 return 0;
3569}
3570
3571int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3572{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003573 int ret = 0;
3574
3575 vcpu_load(vcpu);
3576
3577 if (test_fp_ctl(fpu->fpc)) {
3578 ret = -EINVAL;
3579 goto out;
3580 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003581 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003582 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003583 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3584 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003585 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003586 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003587
3588out:
3589 vcpu_put(vcpu);
3590 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003591}
3592
3593int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3594{
Christoffer Dall13931232017-12-04 21:35:34 +01003595 vcpu_load(vcpu);
3596
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003597 /* make sure we have the latest values */
3598 save_fpu_regs();
3599 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003600 convert_vx_to_fp((freg_t *) fpu->fprs,
3601 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003602 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003603 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003604 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003605
3606 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003607 return 0;
3608}
3609
3610static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3611{
3612 int rc = 0;
3613
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003614 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003615 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003616 else {
3617 vcpu->run->psw_mask = psw.mask;
3618 vcpu->run->psw_addr = psw.addr;
3619 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003620 return rc;
3621}
3622
3623int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3624 struct kvm_translation *tr)
3625{
3626 return -EINVAL; /* not implemented yet */
3627}
3628
David Hildenbrand27291e22014-01-23 12:26:52 +01003629#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3630 KVM_GUESTDBG_USE_HW_BP | \
3631 KVM_GUESTDBG_ENABLE)
3632
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003633int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3634 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003635{
David Hildenbrand27291e22014-01-23 12:26:52 +01003636 int rc = 0;
3637
Christoffer Dall66b56562017-12-04 21:35:33 +01003638 vcpu_load(vcpu);
3639
David Hildenbrand27291e22014-01-23 12:26:52 +01003640 vcpu->guest_debug = 0;
3641 kvm_s390_clear_bp_data(vcpu);
3642
Christoffer Dall66b56562017-12-04 21:35:33 +01003643 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3644 rc = -EINVAL;
3645 goto out;
3646 }
3647 if (!sclp.has_gpere) {
3648 rc = -EINVAL;
3649 goto out;
3650 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003651
3652 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3653 vcpu->guest_debug = dbg->control;
3654 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003655 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003656
3657 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3658 rc = kvm_s390_import_bp_data(vcpu, dbg);
3659 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003660 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003661 vcpu->arch.guestdbg.last_bp = 0;
3662 }
3663
3664 if (rc) {
3665 vcpu->guest_debug = 0;
3666 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003667 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003668 }
3669
Christoffer Dall66b56562017-12-04 21:35:33 +01003670out:
3671 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003672 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003673}
3674
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003675int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3676 struct kvm_mp_state *mp_state)
3677{
Christoffer Dallfd232562017-12-04 21:35:30 +01003678 int ret;
3679
3680 vcpu_load(vcpu);
3681
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003682 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003683 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3684 KVM_MP_STATE_OPERATING;
3685
3686 vcpu_put(vcpu);
3687 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003688}
3689
3690int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3691 struct kvm_mp_state *mp_state)
3692{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003693 int rc = 0;
3694
Christoffer Dalle83dff52017-12-04 21:35:31 +01003695 vcpu_load(vcpu);
3696
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003697 /* user space knows about this interface - let it control the state */
3698 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3699
3700 switch (mp_state->mp_state) {
3701 case KVM_MP_STATE_STOPPED:
3702 kvm_s390_vcpu_stop(vcpu);
3703 break;
3704 case KVM_MP_STATE_OPERATING:
3705 kvm_s390_vcpu_start(vcpu);
3706 break;
3707 case KVM_MP_STATE_LOAD:
3708 case KVM_MP_STATE_CHECK_STOP:
3709 /* fall through - CHECK_STOP and LOAD are not supported yet */
3710 default:
3711 rc = -ENXIO;
3712 }
3713
Christoffer Dalle83dff52017-12-04 21:35:31 +01003714 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003715 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003716}
3717
David Hildenbrand8ad35752014-03-14 11:00:21 +01003718static bool ibs_enabled(struct kvm_vcpu *vcpu)
3719{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003720 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003721}
3722
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003723static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3724{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003725retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003726 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003727 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003728 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003729 /*
3730 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003731 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003732 * This ensures that the ipte instruction for this request has
3733 * already finished. We might race against a second unmapper that
3734 * wants to set the blocking bit. Lets just retry the request loop.
3735 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003736 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003737 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003738 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3739 kvm_s390_get_prefix(vcpu),
3740 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003741 if (rc) {
3742 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003743 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003744 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003745 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003746 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003747
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003748 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3749 vcpu->arch.sie_block->ihcpu = 0xffff;
3750 goto retry;
3751 }
3752
David Hildenbrand8ad35752014-03-14 11:00:21 +01003753 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3754 if (!ibs_enabled(vcpu)) {
3755 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003756 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003757 }
3758 goto retry;
3759 }
3760
3761 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3762 if (ibs_enabled(vcpu)) {
3763 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003764 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003765 }
3766 goto retry;
3767 }
3768
David Hildenbrand6502a342016-06-21 14:19:51 +02003769 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3770 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3771 goto retry;
3772 }
3773
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003774 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3775 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003776 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003777 * instruction manually, in order to provide additional
3778 * functionalities needed for live migration.
3779 */
3780 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3781 goto retry;
3782 }
3783
3784 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3785 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003786 * Re-enable CMM virtualization if CMMA is available and
3787 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003788 */
3789 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003790 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003791 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3792 goto retry;
3793 }
3794
David Hildenbrand0759d062014-05-13 16:54:32 +02003795 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003796 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003797 /* we left the vsie handler, nothing to do, just clear the request */
3798 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003799
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003800 return 0;
3801}
3802
David Hildenbrand0e7def52018-02-07 12:46:43 +01003803void kvm_s390_set_tod_clock(struct kvm *kvm,
3804 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003805{
3806 struct kvm_vcpu *vcpu;
3807 struct kvm_s390_tod_clock_ext htod;
3808 int i;
3809
3810 mutex_lock(&kvm->lock);
3811 preempt_disable();
3812
3813 get_tod_clock_ext((char *)&htod);
3814
3815 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003816 kvm->arch.epdx = 0;
3817 if (test_kvm_facility(kvm, 139)) {
3818 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3819 if (kvm->arch.epoch > gtod->tod)
3820 kvm->arch.epdx -= 1;
3821 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003822
3823 kvm_s390_vcpu_block_all(kvm);
3824 kvm_for_each_vcpu(i, vcpu, kvm) {
3825 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3826 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3827 }
3828
3829 kvm_s390_vcpu_unblock_all(kvm);
3830 preempt_enable();
3831 mutex_unlock(&kvm->lock);
3832}
3833
Thomas Huthfa576c52014-05-06 17:20:16 +02003834/**
3835 * kvm_arch_fault_in_page - fault-in guest page if necessary
3836 * @vcpu: The corresponding virtual cpu
3837 * @gpa: Guest physical address
3838 * @writable: Whether the page should be writable or not
3839 *
3840 * Make sure that a guest page has been faulted-in on the host.
3841 *
3842 * Return: Zero on success, negative error code otherwise.
3843 */
3844long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003845{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003846 return gmap_fault(vcpu->arch.gmap, gpa,
3847 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003848}
3849
Dominik Dingel3c038e62013-10-07 17:11:48 +02003850static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3851 unsigned long token)
3852{
3853 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003854 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003855
3856 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003857 irq.u.ext.ext_params2 = token;
3858 irq.type = KVM_S390_INT_PFAULT_INIT;
3859 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003860 } else {
3861 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003862 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003863 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3864 }
3865}
3866
3867void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3868 struct kvm_async_pf *work)
3869{
3870 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3871 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3872}
3873
3874void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3875 struct kvm_async_pf *work)
3876{
3877 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3878 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3879}
3880
3881void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3882 struct kvm_async_pf *work)
3883{
3884 /* s390 will always inject the page directly */
3885}
3886
3887bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3888{
3889 /*
3890 * s390 will always inject the page directly,
3891 * but we still want check_async_completion to cleanup
3892 */
3893 return true;
3894}
3895
3896static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3897{
3898 hva_t hva;
3899 struct kvm_arch_async_pf arch;
3900 int rc;
3901
3902 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3903 return 0;
3904 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3905 vcpu->arch.pfault_compare)
3906 return 0;
3907 if (psw_extint_disabled(vcpu))
3908 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003909 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003910 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003911 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003912 return 0;
3913 if (!vcpu->arch.gmap->pfault_enabled)
3914 return 0;
3915
Heiko Carstens81480cc2014-01-01 16:36:07 +01003916 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3917 hva += current->thread.gmap_addr & ~PAGE_MASK;
3918 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003919 return 0;
3920
3921 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3922 return rc;
3923}
3924
Thomas Huth3fb4c402013-09-12 10:33:43 +02003925static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003926{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003927 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003928
Dominik Dingel3c038e62013-10-07 17:11:48 +02003929 /*
3930 * On s390 notifications for arriving pages will be delivered directly
3931 * to the guest but the house keeping for completed pfaults is
3932 * handled outside the worker.
3933 */
3934 kvm_check_async_pf_completion(vcpu);
3935
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003936 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3937 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003938
3939 if (need_resched())
3940 schedule();
3941
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003942 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003943 s390_handle_mcck();
3944
Jens Freimann79395032014-04-17 10:10:30 +02003945 if (!kvm_is_ucontrol(vcpu->kvm)) {
3946 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3947 if (rc)
3948 return rc;
3949 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003950
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003951 rc = kvm_s390_handle_requests(vcpu);
3952 if (rc)
3953 return rc;
3954
David Hildenbrand27291e22014-01-23 12:26:52 +01003955 if (guestdbg_enabled(vcpu)) {
3956 kvm_s390_backup_guest_per_regs(vcpu);
3957 kvm_s390_patch_guest_per_regs(vcpu);
3958 }
3959
Michael Mueller9f30f622019-01-31 09:52:44 +01003960 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
3961
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003962 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003963 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3964 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3965 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003966
Thomas Huth3fb4c402013-09-12 10:33:43 +02003967 return 0;
3968}
3969
Thomas Huth492d8642015-02-10 16:11:01 +01003970static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3971{
David Hildenbrand56317922016-01-12 17:37:58 +01003972 struct kvm_s390_pgm_info pgm_info = {
3973 .code = PGM_ADDRESSING,
3974 };
3975 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003976 int rc;
3977
3978 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3979 trace_kvm_s390_sie_fault(vcpu);
3980
3981 /*
3982 * We want to inject an addressing exception, which is defined as a
3983 * suppressing or terminating exception. However, since we came here
3984 * by a DAT access exception, the PSW still points to the faulting
3985 * instruction since DAT exceptions are nullifying. So we've got
3986 * to look up the current opcode to get the length of the instruction
3987 * to be able to forward the PSW.
3988 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003989 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003990 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003991 if (rc < 0) {
3992 return rc;
3993 } else if (rc) {
3994 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3995 * Forward by arbitrary ilc, injection will take care of
3996 * nullification if necessary.
3997 */
3998 pgm_info = vcpu->arch.pgm;
3999 ilen = 4;
4000 }
David Hildenbrand56317922016-01-12 17:37:58 +01004001 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4002 kvm_s390_forward_psw(vcpu, ilen);
4003 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01004004}
4005
Thomas Huth3fb4c402013-09-12 10:33:43 +02004006static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4007{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004008 struct mcck_volatile_info *mcck_info;
4009 struct sie_page *sie_page;
4010
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004011 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4012 vcpu->arch.sie_block->icptcode);
4013 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4014
David Hildenbrand27291e22014-01-23 12:26:52 +01004015 if (guestdbg_enabled(vcpu))
4016 kvm_s390_restore_guest_per_regs(vcpu);
4017
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01004018 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4019 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004020
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004021 if (exit_reason == -EINTR) {
4022 VCPU_EVENT(vcpu, 3, "%s", "machine check");
4023 sie_page = container_of(vcpu->arch.sie_block,
4024 struct sie_page, sie_block);
4025 mcck_info = &sie_page->mcck_info;
4026 kvm_s390_reinject_machine_check(vcpu, mcck_info);
4027 return 0;
4028 }
4029
David Hildenbrand71f116b2015-10-19 16:24:28 +02004030 if (vcpu->arch.sie_block->icptcode > 0) {
4031 int rc = kvm_handle_sie_intercept(vcpu);
4032
4033 if (rc != -EOPNOTSUPP)
4034 return rc;
4035 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4036 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4037 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4038 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4039 return -EREMOTE;
4040 } else if (exit_reason != -EFAULT) {
4041 vcpu->stat.exit_null++;
4042 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02004043 } else if (kvm_is_ucontrol(vcpu->kvm)) {
4044 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4045 vcpu->run->s390_ucontrol.trans_exc_code =
4046 current->thread.gmap_addr;
4047 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004048 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004049 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02004050 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004051 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004052 if (kvm_arch_setup_async_pf(vcpu))
4053 return 0;
4054 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004055 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02004056 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004057}
4058
4059static int __vcpu_run(struct kvm_vcpu *vcpu)
4060{
4061 int rc, exit_reason;
4062
Thomas Huth800c1062013-09-12 10:33:45 +02004063 /*
4064 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4065 * ning the guest), so that memslots (and other stuff) are protected
4066 */
4067 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4068
Thomas Hutha76ccff2013-09-12 10:33:44 +02004069 do {
4070 rc = vcpu_pre_run(vcpu);
4071 if (rc)
4072 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004073
Thomas Huth800c1062013-09-12 10:33:45 +02004074 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02004075 /*
4076 * As PF_VCPU will be used in fault handler, between
4077 * guest_enter and guest_exit should be no uaccess.
4078 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02004079 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004080 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004081 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02004082 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004083 exit_reason = sie64a(vcpu->arch.sie_block,
4084 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02004085 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004086 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004087 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02004088 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02004089 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004090
Thomas Hutha76ccff2013-09-12 10:33:44 +02004091 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01004092 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004093
Thomas Huth800c1062013-09-12 10:33:45 +02004094 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01004095 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004096}
4097
David Hildenbrandb028ee32014-07-17 10:47:43 +02004098static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4099{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004100 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004101 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004102
4103 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004104 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004105 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4106 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4107 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4108 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4109 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4110 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004111 /* some control register changes require a tlb flush */
4112 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004113 }
4114 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01004115 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004116 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4117 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4118 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4119 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4120 }
4121 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4122 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4123 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4124 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02004125 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4126 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004127 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004128 /*
4129 * If userspace sets the riccb (e.g. after migration) to a valid state,
4130 * we should enable RI here instead of doing the lazy enablement.
4131 */
4132 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004133 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02004134 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004135 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004136 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004137 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02004138 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004139 /*
4140 * If userspace sets the gscb (e.g. after migration) to non-zero,
4141 * we should enable GS here instead of doing the lazy enablement.
4142 */
4143 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4144 test_kvm_facility(vcpu->kvm, 133) &&
4145 gscb->gssm &&
4146 !vcpu->arch.gs_enabled) {
4147 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4148 vcpu->arch.sie_block->ecb |= ECB_GS;
4149 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4150 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02004151 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004152 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4153 test_kvm_facility(vcpu->kvm, 82)) {
4154 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4155 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4156 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004157 save_access_regs(vcpu->arch.host_acrs);
4158 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01004159 /* save host (userspace) fprs/vrs */
4160 save_fpu_regs();
4161 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4162 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4163 if (MACHINE_HAS_VX)
4164 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4165 else
4166 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4167 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4168 if (test_fp_ctl(current->thread.fpu.fpc))
4169 /* User space provided an invalid FPC, let's clear it */
4170 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004171 if (MACHINE_HAS_GS) {
4172 preempt_disable();
4173 __ctl_set_bit(2, 4);
4174 if (current->thread.gs_cb) {
4175 vcpu->arch.host_gscb = current->thread.gs_cb;
4176 save_gs_cb(vcpu->arch.host_gscb);
4177 }
4178 if (vcpu->arch.gs_enabled) {
4179 current->thread.gs_cb = (struct gs_cb *)
4180 &vcpu->run->s.regs.gscb;
4181 restore_gs_cb(current->thread.gs_cb);
4182 }
4183 preempt_enable();
4184 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004185 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Fan Zhang80cd8762016-08-15 04:53:22 +02004186
David Hildenbrandb028ee32014-07-17 10:47:43 +02004187 kvm_run->kvm_dirty_regs = 0;
4188}
4189
4190static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4191{
4192 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4193 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4194 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4195 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01004196 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004197 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4198 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4199 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4200 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
4201 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4202 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4203 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004204 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004205 save_access_regs(vcpu->run->s.regs.acrs);
4206 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01004207 /* Save guest register state */
4208 save_fpu_regs();
4209 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4210 /* Restore will be done lazily at return */
4211 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4212 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004213 if (MACHINE_HAS_GS) {
4214 __ctl_set_bit(2, 4);
4215 if (vcpu->arch.gs_enabled)
4216 save_gs_cb(current->thread.gs_cb);
4217 preempt_disable();
4218 current->thread.gs_cb = vcpu->arch.host_gscb;
4219 restore_gs_cb(vcpu->arch.host_gscb);
4220 preempt_enable();
4221 if (!vcpu->arch.host_gscb)
4222 __ctl_clear_bit(2, 4);
4223 vcpu->arch.host_gscb = NULL;
4224 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004225 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02004226}
4227
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004228int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4229{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004230 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004231
Paolo Bonzini460df4c2017-02-08 11:50:15 +01004232 if (kvm_run->immediate_exit)
4233 return -EINTR;
4234
Thomas Huth200824f2019-09-04 10:51:59 +02004235 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4236 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4237 return -EINVAL;
4238
Christoffer Dallaccb7572017-12-04 21:35:25 +01004239 vcpu_load(vcpu);
4240
David Hildenbrand27291e22014-01-23 12:26:52 +01004241 if (guestdbg_exit_pending(vcpu)) {
4242 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004243 rc = 0;
4244 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004245 }
4246
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004247 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004248
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004249 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4250 kvm_s390_vcpu_start(vcpu);
4251 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004252 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004253 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004254 rc = -EINVAL;
4255 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004256 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004257
David Hildenbrandb028ee32014-07-17 10:47:43 +02004258 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004259 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004260
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004261 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004262 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004263
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004264 if (signal_pending(current) && !rc) {
4265 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004266 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004267 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004268
David Hildenbrand27291e22014-01-23 12:26:52 +01004269 if (guestdbg_exit_pending(vcpu) && !rc) {
4270 kvm_s390_prepare_debug_exit(vcpu);
4271 rc = 0;
4272 }
4273
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004274 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004275 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004276 rc = 0;
4277 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004278
David Hildenbranddb0758b2016-02-15 09:42:25 +01004279 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004280 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004281
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004282 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004283
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004284 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004285out:
4286 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004287 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004288}
4289
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004290/*
4291 * store status at address
4292 * we use have two special cases:
4293 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4294 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4295 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004296int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004297{
Carsten Otte092670c2011-07-24 10:48:22 +02004298 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004299 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004300 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004301 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004302 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004303
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004304 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004305 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4306 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004307 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004308 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004309 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4310 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004311 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004312 gpa = px;
4313 } else
4314 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004315
4316 /* manually convert vector registers if necessary */
4317 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004318 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004319 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4320 fprs, 128);
4321 } else {
4322 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004323 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004324 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004325 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004326 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004327 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004328 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004329 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004330 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004331 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004332 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004333 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004334 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004335 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004336 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004337 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004338 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004339 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004340 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004341 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004342 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004343 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004344 &vcpu->arch.sie_block->gcr, 128);
4345 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004346}
4347
Thomas Huthe8798922013-11-06 15:46:33 +01004348int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4349{
4350 /*
4351 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004352 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004353 * it into the save area
4354 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004355 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004356 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004357 save_access_regs(vcpu->run->s.regs.acrs);
4358
4359 return kvm_s390_store_status_unloaded(vcpu, addr);
4360}
4361
David Hildenbrand8ad35752014-03-14 11:00:21 +01004362static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4363{
4364 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004365 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004366}
4367
4368static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4369{
4370 unsigned int i;
4371 struct kvm_vcpu *vcpu;
4372
4373 kvm_for_each_vcpu(i, vcpu, kvm) {
4374 __disable_ibs_on_vcpu(vcpu);
4375 }
4376}
4377
4378static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4379{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004380 if (!sclp.has_ibs)
4381 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004382 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004383 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004384}
4385
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004386void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
4387{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004388 int i, online_vcpus, started_vcpus = 0;
4389
4390 if (!is_vcpu_stopped(vcpu))
4391 return;
4392
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004393 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004394 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004395 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004396 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4397
4398 for (i = 0; i < online_vcpus; i++) {
4399 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4400 started_vcpus++;
4401 }
4402
4403 if (started_vcpus == 0) {
4404 /* we're the only active VCPU -> speed it up */
4405 __enable_ibs_on_vcpu(vcpu);
4406 } else if (started_vcpus == 1) {
4407 /*
4408 * As we are starting a second VCPU, we have to disable
4409 * the IBS facility on all VCPUs to remove potentially
4410 * oustanding ENABLE requests.
4411 */
4412 __disable_ibs_on_all_vcpus(vcpu->kvm);
4413 }
4414
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004415 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004416 /*
4417 * Another VCPU might have used IBS while we were offline.
4418 * Let's play safe and flush the VCPU at startup.
4419 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004420 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004421 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004422 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004423}
4424
4425void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
4426{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004427 int i, online_vcpus, started_vcpus = 0;
4428 struct kvm_vcpu *started_vcpu = NULL;
4429
4430 if (is_vcpu_stopped(vcpu))
4431 return;
4432
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004433 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004434 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004435 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004436 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4437
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004438 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004439 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004440
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004441 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004442 __disable_ibs_on_vcpu(vcpu);
4443
4444 for (i = 0; i < online_vcpus; i++) {
4445 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4446 started_vcpus++;
4447 started_vcpu = vcpu->kvm->vcpus[i];
4448 }
4449 }
4450
4451 if (started_vcpus == 1) {
4452 /*
4453 * As we only have one VCPU left, we want to enable the
4454 * IBS facility for that VCPU to speed it up.
4455 */
4456 __enable_ibs_on_vcpu(started_vcpu);
4457 }
4458
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004459 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004460 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004461}
4462
Cornelia Huckd6712df2012-12-20 15:32:11 +01004463static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4464 struct kvm_enable_cap *cap)
4465{
4466 int r;
4467
4468 if (cap->flags)
4469 return -EINVAL;
4470
4471 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004472 case KVM_CAP_S390_CSS_SUPPORT:
4473 if (!vcpu->kvm->arch.css_support) {
4474 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004475 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004476 trace_kvm_s390_enable_css(vcpu->kvm);
4477 }
4478 r = 0;
4479 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004480 default:
4481 r = -EINVAL;
4482 break;
4483 }
4484 return r;
4485}
4486
Thomas Huth41408c282015-02-06 15:01:21 +01004487static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4488 struct kvm_s390_mem_op *mop)
4489{
4490 void __user *uaddr = (void __user *)mop->buf;
4491 void *tmpbuf = NULL;
4492 int r, srcu_idx;
4493 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4494 | KVM_S390_MEMOP_F_CHECK_ONLY;
4495
Thomas Hutha13b03b2019-08-29 14:25:17 +02004496 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
Thomas Huth41408c282015-02-06 15:01:21 +01004497 return -EINVAL;
4498
4499 if (mop->size > MEM_OP_MAX_SIZE)
4500 return -E2BIG;
4501
4502 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4503 tmpbuf = vmalloc(mop->size);
4504 if (!tmpbuf)
4505 return -ENOMEM;
4506 }
4507
4508 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4509
4510 switch (mop->op) {
4511 case KVM_S390_MEMOP_LOGICAL_READ:
4512 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004513 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4514 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004515 break;
4516 }
4517 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4518 if (r == 0) {
4519 if (copy_to_user(uaddr, tmpbuf, mop->size))
4520 r = -EFAULT;
4521 }
4522 break;
4523 case KVM_S390_MEMOP_LOGICAL_WRITE:
4524 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004525 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4526 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004527 break;
4528 }
4529 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4530 r = -EFAULT;
4531 break;
4532 }
4533 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4534 break;
4535 default:
4536 r = -EINVAL;
4537 }
4538
4539 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4540
4541 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4542 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4543
4544 vfree(tmpbuf);
4545 return r;
4546}
4547
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004548long kvm_arch_vcpu_async_ioctl(struct file *filp,
4549 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004550{
4551 struct kvm_vcpu *vcpu = filp->private_data;
4552 void __user *argp = (void __user *)arg;
4553
Avi Kivity93736622010-05-13 12:35:17 +03004554 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004555 case KVM_S390_IRQ: {
4556 struct kvm_s390_irq s390irq;
4557
Jens Freimann47b43c52014-11-11 20:57:06 +01004558 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004559 return -EFAULT;
4560 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004561 }
Avi Kivity93736622010-05-13 12:35:17 +03004562 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004563 struct kvm_s390_interrupt s390int;
Thomas Huth53936b52019-09-12 13:54:38 +02004564 struct kvm_s390_irq s390irq = {};
Carsten Otteba5c1e92008-03-25 18:47:26 +01004565
4566 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004567 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004568 if (s390int_to_s390irq(&s390int, &s390irq))
4569 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004570 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004571 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004572 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004573 return -ENOIOCTLCMD;
4574}
4575
4576long kvm_arch_vcpu_ioctl(struct file *filp,
4577 unsigned int ioctl, unsigned long arg)
4578{
4579 struct kvm_vcpu *vcpu = filp->private_data;
4580 void __user *argp = (void __user *)arg;
4581 int idx;
4582 long r;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004583
4584 vcpu_load(vcpu);
4585
4586 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004587 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004588 idx = srcu_read_lock(&vcpu->kvm->srcu);
Christian Borntraeger55680892020-01-31 05:02:00 -05004589 r = kvm_s390_store_status_unloaded(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004590 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004591 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004592 case KVM_S390_SET_INITIAL_PSW: {
4593 psw_t psw;
4594
Avi Kivitybc923cc2010-05-13 12:21:46 +03004595 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004596 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004597 break;
4598 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4599 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004600 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004601 case KVM_S390_CLEAR_RESET:
4602 r = 0;
4603 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
4604 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004605 case KVM_S390_INITIAL_RESET:
Janosch Frank7de3f142020-01-31 05:02:02 -05004606 r = 0;
4607 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4608 break;
4609 case KVM_S390_NORMAL_RESET:
4610 r = 0;
4611 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004612 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004613 case KVM_SET_ONE_REG:
4614 case KVM_GET_ONE_REG: {
4615 struct kvm_one_reg reg;
4616 r = -EFAULT;
4617 if (copy_from_user(&reg, argp, sizeof(reg)))
4618 break;
4619 if (ioctl == KVM_SET_ONE_REG)
4620 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4621 else
4622 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4623 break;
4624 }
Carsten Otte27e03932012-01-04 10:25:21 +01004625#ifdef CONFIG_KVM_S390_UCONTROL
4626 case KVM_S390_UCAS_MAP: {
4627 struct kvm_s390_ucas_mapping ucasmap;
4628
4629 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4630 r = -EFAULT;
4631 break;
4632 }
4633
4634 if (!kvm_is_ucontrol(vcpu->kvm)) {
4635 r = -EINVAL;
4636 break;
4637 }
4638
4639 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4640 ucasmap.vcpu_addr, ucasmap.length);
4641 break;
4642 }
4643 case KVM_S390_UCAS_UNMAP: {
4644 struct kvm_s390_ucas_mapping ucasmap;
4645
4646 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4647 r = -EFAULT;
4648 break;
4649 }
4650
4651 if (!kvm_is_ucontrol(vcpu->kvm)) {
4652 r = -EINVAL;
4653 break;
4654 }
4655
4656 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4657 ucasmap.length);
4658 break;
4659 }
4660#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004661 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004662 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004663 break;
4664 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004665 case KVM_ENABLE_CAP:
4666 {
4667 struct kvm_enable_cap cap;
4668 r = -EFAULT;
4669 if (copy_from_user(&cap, argp, sizeof(cap)))
4670 break;
4671 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4672 break;
4673 }
Thomas Huth41408c282015-02-06 15:01:21 +01004674 case KVM_S390_MEM_OP: {
4675 struct kvm_s390_mem_op mem_op;
4676
4677 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4678 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4679 else
4680 r = -EFAULT;
4681 break;
4682 }
Jens Freimann816c7662014-11-24 17:13:46 +01004683 case KVM_S390_SET_IRQ_STATE: {
4684 struct kvm_s390_irq_state irq_state;
4685
4686 r = -EFAULT;
4687 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4688 break;
4689 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4690 irq_state.len == 0 ||
4691 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4692 r = -EINVAL;
4693 break;
4694 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004695 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004696 r = kvm_s390_set_irq_state(vcpu,
4697 (void __user *) irq_state.buf,
4698 irq_state.len);
4699 break;
4700 }
4701 case KVM_S390_GET_IRQ_STATE: {
4702 struct kvm_s390_irq_state irq_state;
4703
4704 r = -EFAULT;
4705 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4706 break;
4707 if (irq_state.len == 0) {
4708 r = -EINVAL;
4709 break;
4710 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004711 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004712 r = kvm_s390_get_irq_state(vcpu,
4713 (__u8 __user *) irq_state.buf,
4714 irq_state.len);
4715 break;
4716 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004717 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004718 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004719 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004720
4721 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004722 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004723}
4724
Souptick Joarder1499fa82018-04-19 00:49:58 +05304725vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004726{
4727#ifdef CONFIG_KVM_S390_UCONTROL
4728 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4729 && (kvm_is_ucontrol(vcpu->kvm))) {
4730 vmf->page = virt_to_page(vcpu->arch.sie_block);
4731 get_page(vmf->page);
4732 return 0;
4733 }
4734#endif
4735 return VM_FAULT_SIGBUS;
4736}
4737
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05304738int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4739 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09004740{
4741 return 0;
4742}
4743
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004744/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004745int kvm_arch_prepare_memory_region(struct kvm *kvm,
4746 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004747 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004748 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004749{
Nick Wangdd2887e2013-03-25 17:22:57 +01004750 /* A few sanity checks. We can have memory slots which have to be
4751 located/ended at a segment boundary (1MB). The memory in userland is
4752 ok to be fragmented into various different vmas. It is okay to mmap()
4753 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004754
Carsten Otte598841c2011-07-24 10:48:21 +02004755 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004756 return -EINVAL;
4757
Carsten Otte598841c2011-07-24 10:48:21 +02004758 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004759 return -EINVAL;
4760
Dominik Dingela3a92c32014-12-01 17:24:42 +01004761 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4762 return -EINVAL;
4763
Janosch Frank29b40f12019-09-30 04:19:18 -04004764 /* When we are protected, we should not change the memory slots */
4765 if (kvm_s390_pv_get_handle(kvm))
4766 return -EINVAL;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004767 return 0;
4768}
4769
4770void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004771 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004772 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004773 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004774 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004775{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004776 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004777
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004778 switch (change) {
4779 case KVM_MR_DELETE:
4780 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4781 old->npages * PAGE_SIZE);
4782 break;
4783 case KVM_MR_MOVE:
4784 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4785 old->npages * PAGE_SIZE);
4786 if (rc)
4787 break;
4788 /* FALLTHROUGH */
4789 case KVM_MR_CREATE:
4790 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4791 mem->guest_phys_addr, mem->memory_size);
4792 break;
4793 case KVM_MR_FLAGS_ONLY:
4794 break;
4795 default:
4796 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
4797 }
Carsten Otte598841c2011-07-24 10:48:21 +02004798 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004799 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02004800 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004801}
4802
Alexander Yarygin60a37702016-04-01 15:38:57 +03004803static inline unsigned long nonhyp_mask(int i)
4804{
4805 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4806
4807 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4808}
4809
Christian Borntraeger3491caf2016-05-13 12:16:35 +02004810void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4811{
4812 vcpu->valid_wakeup = false;
4813}
4814
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004815static int __init kvm_s390_init(void)
4816{
Alexander Yarygin60a37702016-04-01 15:38:57 +03004817 int i;
4818
David Hildenbrand07197fd2015-01-30 16:01:38 +01004819 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004820 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01004821 return -ENODEV;
4822 }
4823
Janosch Franka4499382018-07-13 11:28:31 +01004824 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004825 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01004826 return -EINVAL;
4827 }
4828
Alexander Yarygin60a37702016-04-01 15:38:57 +03004829 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00004830 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03004831 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4832
Michael Mueller9d8d5782015-02-02 15:42:51 +01004833 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004834}
4835
4836static void __exit kvm_s390_exit(void)
4837{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004838 kvm_exit();
4839}
4840
4841module_init(kvm_s390_init);
4842module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02004843
4844/*
4845 * Enable autoloading of the kvm module.
4846 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4847 * since x86 takes a different approach.
4848 */
4849#include <linux/miscdevice.h>
4850MODULE_ALIAS_MISCDEV(KVM_MINOR);
4851MODULE_ALIAS("devname:kvm");